{\rtf1\ansi\ansicpg1252\cocoartf2820
\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fnil\fcharset0 Menlo-Regular;}
{\colortbl;\red255\green255\blue255;\red0\green0\blue0;}
{\*\expandedcolortbl;;\csgray\c0;}
\paperw11900\paperh16840\margl1440\margr1440\vieww35800\viewh20300\viewkind0
\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\partightenfactor0

\f0\fs22 \cf2 \CocoaLigature0 MacBook PRO M3 36GB \
\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\
\
huge_new=# set temp_file_compression = "no";\
SET\
\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\partightenfactor0
huge_new=# SELECT datname, temp_bytes                                                                                                                                                                                                                       FROM pg_stat_database;\
    datname    |  temp_bytes  \
---------------+--------------\
               |            0\
 postgres      |    154000000\
 fjanus        |            0\
 template1     |            0\
 template0     |            0\
 hashjoin_test |            0\
 huge_tables   | 157592582113\
 random        |    890168658\
 eshop         |     56335229\
 huge_new      |            0\
(10 rows)\
\
huge_new=#  EXPLAIN ANALYZE SELECT a.id, a.data, b.data FROM table_a a JOIN table_b b ON a.id = b.id;                                                                                                                                                                                                                         QUERY PLAN                                                                   \
-----------------------------------------------------------------------------------------------------------------------------------------------\
 Gather  (cost=1318666.18..4309880.44 rows=10000008 width=816) (actual time=113489.050..168871.466 rows=10000000 loops=1)\
   Workers Planned: 7\
   Workers Launched: 7\
   ->  Hash Join  (cost=1317666.18..3308879.64 rows=1428573 width=816) (actual time=113528.579..161470.256 rows=1250000 loops=8)\
         Hash Cond: (b.id = a.id)\
         ->  Parallel Seq Scan on table_b b  (cost=0.00..1139683.45 rows=2857145 width=412) (actual time=0.638..9547.653 rows=2500000 loops=8)\
         ->  Hash  (cost=655556.08..655556.08 rows=10000008 width=412) (actual time=113206.367..113206.367 rows=10000000 loops=8)\
               Buckets: 32768  Batches: 1024  Memory Usage: 4486kB\
               ->  Seq Scan on table_a a  (cost=0.00..655556.08 rows=10000008 width=412) (actual time=0.524..9326.244 rows=10000000 loops=8)\
 Planning Time: 1.720 ms\
 Execution Time: 169180.107 ms\
(11 rows)\
\
huge_new=# SELECT datname, temp_bytes / 1024 / 1024 AS temp_bytes_mb FROM pg_stat_database;\
    datname    | temp_bytes_mb \
---------------+---------------\
               |             0\
 postgres      |           146\
 fjanus        |             0\
 template1     |             0\
 template0     |             0\
 hashjoin_test |             0\
 huge_tables   |        150291\
 random        |           848\
 eshop         |            53\
 huge_new      |         41234\
(10 rows)\
\
huge_new=# set temp_file_compression = "lz4";\
SET\
huge_new=# SELECT pg_stat_reset();\
 pg_stat_reset \
---------------\
 \
(1 row)\
\
huge_new=#  EXPLAIN ANALYZE SELECT a.id, a.data, b.data FROM table_a a JOIN table_b b ON a.id = b.id;                                                                                                                                                                                                                         QUERY PLAN                                                                   \
-----------------------------------------------------------------------------------------------------------------------------------------------\
 Gather  (cost=1318666.18..4309880.44 rows=10000008 width=816) (actual time=93632.254..128635.086 rows=10000000 loops=1)\
   Workers Planned: 7\
   Workers Launched: 7\
   ->  Hash Join  (cost=1317666.18..3308879.64 rows=1428573 width=816) (actual time=93657.750..123759.947 rows=1250000 loops=8)\
         Hash Cond: (b.id = a.id)\
         ->  Parallel Seq Scan on table_b b  (cost=0.00..1139683.45 rows=2857145 width=412) (actual time=0.478..5278.765 rows=2500000 loops=8)\
         ->  Hash  (cost=655556.08..655556.08 rows=10000008 width=412) (actual time=93328.973..93328.973 rows=10000000 loops=8)\
               Buckets: 32768  Batches: 1024  Memory Usage: 4486kB\
               ->  Seq Scan on table_a a  (cost=0.00..655556.08 rows=10000008 width=412) (actual time=0.957..6605.046 rows=10000000 loops=8)\
 Planning Time: 0.807 ms\
 Execution Time: 128980.688 ms\
(11 rows)\
\
huge_new=# SELECT datname, temp_bytes / 1024 / 1024 AS temp_bytes_mb FROM pg_stat_database;\
    datname    | temp_bytes_mb \
---------------+---------------\
               |             0\
 postgres      |           146\
 fjanus        |             0\
 template1     |             0\
 template0     |             0\
 hashjoin_test |             0\
 huge_tables   |        150291\
 random        |           848\
 eshop         |            53\
 huge_new      |          1937\
(10 rows)\
\
huge_new=# SELECT pg_size_pretty(pg_database_size('huge_new'));\
 pg_size_pretty \
----------------\
 13 GB\
(1 row)\
\
\
\
\
Virtual machine ARM64 10GB/ 6CPU\
\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\'97\
\
\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\partightenfactor0
huge=# SELECT datname,\
       temp_bytes / 1024 / 1024 AS temp_bytes_mb\
FROM pg_stat_database;\
     datname      | temp_bytes_mb \
------------------+---------------\
                  |             0\
 postgres         |             0\
 template1        |             0\
 template0        |             0\
 huge             |             0\
(11 rows)\
\
huge=# set temp_file_compression = "lz4";\
SET\
huge=# EXPLAIN ANALYZE SELECT a.id, a.data, b.data FROM table_a a JOIN table_b b ON a.id = b.id;\
                                                                  QUERY PLAN                                                                   \
-----------------------------------------------------------------------------------------------------------------------------------------------\
 Gather  (cost=1318666.18..4967281.40 rows=10000008 width=816) (actual time=16515.365..35155.453 rows=10000000 loops=1)\
   Workers Planned: 2\
   Workers Launched: 2\
   ->  Hash Join  (cost=1317666.18..3966280.60 rows=4166670 width=816) (actual time=16497.442..31216.586 rows=3333333 loops=3)\
         Hash Cond: (b.id = a.id)\
         ->  Parallel Seq Scan on table_b b  (cost=0.00..1194445.40 rows=8333340 width=412) (actual time=0.873..3916.384 rows=6666667 loops=3)\
         ->  Hash  (cost=655556.08..655556.08 rows=10000008 width=412) (actual time=16479.002..16479.003 rows=10000000 loops=3)\
               Buckets: 32768  Batches: 1024  Memory Usage: 4486kB\
               ->  Seq Scan on table_a a  (cost=0.00..655556.08 rows=10000008 width=412) (actual time=0.356..9466.043 rows=10000000 loops=3)\
 Planning Time: 0.814 ms\
 Execution Time: 35431.791 ms\
(11 rows)\
\
huge=# SELECT datname,\
       temp_bytes / 1024 / 1024 AS temp_bytes_mb\
FROM pg_stat_database;\
     datname      | temp_bytes_mb \
------------------+---------------\
                  |             0\
 postgres         |             0\
 template1        |             0\
 template0        |             0\
 fjanus           |             0\
 test             |             0\
 d22              |             0\
 hashjoin_test    |            16\
 compression_test |           196\
 cmptest          |             1\
 huge             |           921\
(11 rows)\
\
huge=# set temp_file_compression = "no";\
SET\
huge=# SELECT pg_stat_reset();\
 pg_stat_reset \
---------------\
 \
(1 row)\
\
huge=# EXPLAIN ANALYZE SELECT a.id, a.data, b.data FROM table_a a JOIN table_b b ON a.id = b.id;\
                                                                   QUERY PLAN                                                                   \
------------------------------------------------------------------------------------------------------------------------------------------------\
 Gather  (cost=1318666.18..4967281.40 rows=10000008 width=816) (actual time=23555.046..69838.589 rows=10000000 loops=1)\
   Workers Planned: 2\
   Workers Launched: 2\
   ->  Hash Join  (cost=1317666.18..3966280.60 rows=4166670 width=816) (actual time=23542.253..59981.044 rows=3333333 loops=3)\
         Hash Cond: (b.id = a.id)\
         ->  Parallel Seq Scan on table_b b  (cost=0.00..1194445.40 rows=8333340 width=412) (actual time=0.397..13283.039 rows=6666667 loops=3)\
         ->  Hash  (cost=655556.08..655556.08 rows=10000008 width=412) (actual time=23520.499..23520.500 rows=10000000 loops=3)\
               Buckets: 32768  Batches: 1024  Memory Usage: 4486kB\
               ->  Seq Scan on table_a a  (cost=0.00..655556.08 rows=10000008 width=412) (actual time=0.378..15375.891 rows=10000000 loops=3)\
 Planning Time: 1.231 ms\
 Execution Time: 70140.003 ms\
(11 rows)\
\
huge=# SELECT datname,\
       temp_bytes / 1024 / 1024 AS temp_bytes_mb\
FROM pg_stat_database;\
     datname      | temp_bytes_mb \
------------------+---------------\
                  |             0\
 postgres         |             0\
 template1        |             0\
 template0        |             0\
 fjanus           |             0\
 test             |             0\
 d22              |             0\
 hashjoin_test    |            16\
 compression_test |           196\
 cmptest          |             1\
 huge             |         20655\
(11 rows)\
\
huge=# SELECT pg_size_pretty(pg_database_size('huge'));\
 pg_size_pretty \
----------------\
 13 GB\
(1 row)\
\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\partightenfactor0
\
\
\
\
}