From 88ef1773d5dcc35db1ed1158b1f66690668d5593 Mon Sep 17 00:00:00 2001
From: Thomas Munro <thomas.munro@gmail.com>
Date: Thu, 18 Mar 2021 20:30:44 +1300
Subject: [PATCH] Fix oversized memory allocation in Parallel Hash Join.

While expanding the number of batches from 1 to a higher number,
Parallel Hash Join could try to allocate one byte more than MaxAllocSize
and raise an error.

Back-patch to 11.

Bug: #16925
Reported-by: Karen Talarico <karen.talarico@swarm64.com>
Discussion: https://postgr.es/m/16925-ec96d83529d0d629%40postgresql.org
---
 src/backend/executor/nodeHash.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index c5f2d1d22b..1dbb3d0453 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -1135,6 +1135,8 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
 					double		dtuples;
 					double		dbuckets;
 					int			new_nbuckets;
+					size_t		max_pointers;
+					size_t		mppow2;
 
 					/*
 					 * We probably also need a smaller bucket array.  How many
@@ -1146,10 +1148,14 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
 					 * batches and it would be wasteful to keep the large
 					 * array.
 					 */
+					max_pointers = MaxAllocSize / sizeof(dsa_pointer_atomic);
+					/* If max_pointers isn't a power of 2, must round it down to one */
+					mppow2 = 1L << my_log2(max_pointers);
+					if (max_pointers != mppow2)
+						max_pointers = mppow2 / 2;
 					dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
 					dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
-					dbuckets = Min(dbuckets,
-								   MaxAllocSize / sizeof(dsa_pointer_atomic));
+					dbuckets = Min(dbuckets, max_pointers);
 					new_nbuckets = (int) dbuckets;
 					new_nbuckets = Max(new_nbuckets, 1024);
 					new_nbuckets = 1 << my_log2(new_nbuckets);
-- 
2.30.1

