From b4f7147ac797197e0fd0137b07ace4fdb3d76860 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryzbyj@telsasoft.com>
Date: Sun, 9 Feb 2020 15:08:14 -0600
Subject: [PATCH v9 6/8] Refactor for consistency/symmetry

This moves hash instrumentation out of execGrouping.c / TupleHashTable and into
higher level nodes, for consistency with bitmapHeapScan.

This might be unimportant and maybe clearer left in execGrouping.c.
---
 .../postgres_fdw/expected/postgres_fdw.out    | 56 +++++++++++++------
 src/backend/commands/explain.c                | 20 +++----
 src/backend/executor/execGrouping.c           | 33 -----------
 src/backend/executor/nodeAgg.c                |  7 ++-
 src/backend/executor/nodeRecursiveunion.c     |  4 +-
 src/backend/executor/nodeSetOp.c              |  6 +-
 src/backend/executor/nodeSubplan.c            | 12 +++-
 src/include/executor/executor.h               |  1 -
 src/include/executor/nodeAgg.h                |  1 +
 src/include/nodes/execnodes.h                 | 24 +++++++-
 10 files changed, 98 insertions(+), 66 deletions(-)

diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 62c2697920..2ddae83178 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -2086,9 +2086,11 @@ SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2
          ->  HashAggregate
                Output: t1.c1, avg((t1.c1 + t2.c1))
                Group Key: t1.c1
+               Buckets: 256
                ->  HashAggregate
                      Output: t1.c1, t2.c1
                      Group Key: t1.c1, t2.c1
+                     Buckets: 4096
                      ->  Append
                            ->  Foreign Scan
                                  Output: t1.c1, t2.c1
@@ -2098,7 +2100,7 @@ SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2
                                  Output: t1_1.c1, t2_1.c1
                                  Relations: (public.ft1 t1_1) INNER JOIN (public.ft2 t2_1)
                                  Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1"))))
-(20 rows)
+(22 rows)
 
 SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) UNION SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) AS t (t1c1, t2c1) GROUP BY t1c1 ORDER BY t1c1 OFFSET 100 LIMIT 10;
  t1c1 |         avg          
@@ -2129,11 +2131,12 @@ SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM
          ->  HashAggregate
                Output: t2.c1, t3.c1
                Group Key: t2.c1, t3.c1
+               Buckets: 2
                ->  Foreign Scan
                      Output: t2.c1, t3.c1
                      Relations: (public.ft1 t2) INNER JOIN (public.ft2 t3)
                      Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r1.c2 = $1::integer))))
-(13 rows)
+(14 rows)
 
 SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
  C 1 
@@ -2610,10 +2613,11 @@ select c2 * (random() <= 1)::int as c2 from ft2 group by c2 * (random() <= 1)::i
    ->  HashAggregate
          Output: ((c2 * ((random() <= '1'::double precision))::integer))
          Group Key: (ft2.c2 * ((random() <= '1'::double precision))::integer)
+         Buckets: 2
          ->  Foreign Scan on public.ft2
                Output: (c2 * ((random() <= '1'::double precision))::integer)
                Remote SQL: SELECT c2 FROM "S 1"."T 1"
-(9 rows)
+(10 rows)
 
 -- GROUP BY clause in various forms, cardinal, alias and constant expression
 explain (verbose, costs off)
@@ -2713,11 +2717,12 @@ select sum(c1) from ft1 group by c2 having avg(c1 * (random() <= 1)::int) > 100
    ->  HashAggregate
          Output: sum(c1), c2
          Group Key: ft1.c2
+         Buckets: 16
          Filter: (avg((ft1.c1 * ((random() <= '1'::double precision))::integer)) > '100'::numeric)
          ->  Foreign Scan on public.ft1
                Output: c1, c2
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1"
-(10 rows)
+(11 rows)
 
 -- Remote aggregate in combination with a local Param (for the output
 -- of an initplan) can be trouble, per bug #15781
@@ -2963,10 +2968,11 @@ select sum(c1) filter (where (c1 / c1) * random() <= 1) from ft1 group by c2 ord
    ->  HashAggregate
          Output: sum(c1) FILTER (WHERE ((((c1 / c1))::double precision * random()) <= '1'::double precision)), c2
          Group Key: ft1.c2
+         Buckets: 16
          ->  Foreign Scan on public.ft1
                Output: c1, c2
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1"
-(9 rows)
+(10 rows)
 
 explain (verbose, costs off)
 select sum(c2) filter (where c2 in (select c2 from ft1 where c2 < 5)) from ft1;
@@ -3229,6 +3235,7 @@ select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x w
    ->  HashAggregate
          Output: count(*), x.b
          Group Key: x.b
+         Buckets: 16
          ->  Hash Join
                Output: x.b
                Inner Unique: true
@@ -3244,7 +3251,7 @@ select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x w
                                  Output: ft1_1.c2, (sum(ft1_1.c1))
                                  Relations: Aggregate on (public.ft1 ft1_1)
                                  Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY 1
-(21 rows)
+(22 rows)
 
 select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x where ft1.c2 = x.a group by x.b order by 1, 2;
  count |   b   
@@ -3449,11 +3456,12 @@ select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls la
    ->  MixedAggregate
          Output: c2, sum(c1)
          Hash Key: ft1.c2
+         Buckets: 16
          Group Key: ()
          ->  Foreign Scan on public.ft1
                Output: c2, c1
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(10 rows)
+(11 rows)
 
 select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls last;
  c2 |  sum   
@@ -3474,11 +3482,12 @@ select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last
    ->  MixedAggregate
          Output: c2, sum(c1)
          Hash Key: ft1.c2
+         Buckets: 16
          Group Key: ()
          ->  Foreign Scan on public.ft1
                Output: c2, c1
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(10 rows)
+(11 rows)
 
 select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last;
  c2 |  sum   
@@ -3499,11 +3508,13 @@ select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) orde
    ->  HashAggregate
          Output: c2, c6, sum(c1)
          Hash Key: ft1.c2
+         Buckets: 16
          Hash Key: ft1.c6
+         Buckets: 16
          ->  Foreign Scan on public.ft1
                Output: c2, c6, c1
                Remote SQL: SELECT "C 1", c2, c6 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(10 rows)
+(12 rows)
 
 select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) order by 1 nulls last, 2 nulls last;
  c2 | c6 |  sum  
@@ -3526,10 +3537,11 @@ select c2, sum(c1), grouping(c2) from ft1 where c2 < 3 group by c2 order by 1 nu
    ->  HashAggregate
          Output: c2, sum(c1), GROUPING(c2)
          Group Key: ft1.c2
+         Buckets: 16
          ->  Foreign Scan on public.ft1
                Output: c2, c1
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(9 rows)
+(10 rows)
 
 select c2, sum(c1), grouping(c2) from ft1 where c2 < 3 group by c2 order by 1 nulls last;
  c2 |  sum  | grouping 
@@ -7147,13 +7159,14 @@ select * from bar where f1 in (select f1 from foo) for update;
                ->  HashAggregate
                      Output: foo.ctid, foo.f1, foo.*, foo.tableoid
                      Group Key: foo.f1
+                     Buckets: 256
                      ->  Append
                            ->  Seq Scan on public.foo foo_1
                                  Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
                            ->  Foreign Scan on public.foo2 foo_2
                                  Output: foo_2.ctid, foo_2.f1, foo_2.*, foo_2.tableoid
                                  Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
-(23 rows)
+(24 rows)
 
 select * from bar where f1 in (select f1 from foo) for update;
  f1 | f2 
@@ -7185,13 +7198,14 @@ select * from bar where f1 in (select f1 from foo) for share;
                ->  HashAggregate
                      Output: foo.ctid, foo.f1, foo.*, foo.tableoid
                      Group Key: foo.f1
+                     Buckets: 256
                      ->  Append
                            ->  Seq Scan on public.foo foo_1
                                  Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
                            ->  Foreign Scan on public.foo2 foo_2
                                  Output: foo_2.ctid, foo_2.f1, foo_2.*, foo_2.tableoid
                                  Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
-(23 rows)
+(24 rows)
 
 select * from bar where f1 in (select f1 from foo) for share;
  f1 | f2 
@@ -7222,6 +7236,7 @@ update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
                ->  HashAggregate
                      Output: foo.ctid, foo.f1, foo.*, foo.tableoid
                      Group Key: foo.f1
+                     Buckets: 256
                      ->  Append
                            ->  Seq Scan on public.foo foo_1
                                  Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
@@ -7240,13 +7255,14 @@ update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
                ->  HashAggregate
                      Output: foo.ctid, foo.f1, foo.*, foo.tableoid
                      Group Key: foo.f1
+                     Buckets: 256
                      ->  Append
                            ->  Seq Scan on public.foo foo_1
                                  Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
                            ->  Foreign Scan on public.foo2 foo_2
                                  Output: foo_2.ctid, foo_2.f1, foo_2.*, foo_2.tableoid
                                  Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
-(39 rows)
+(41 rows)
 
 update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
 select tableoid::regclass, * from bar order by 1,2;
@@ -8751,12 +8767,13 @@ SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 O
    Sort Key: pagg_tab.a
    ->  HashAggregate
          Group Key: pagg_tab.a
+         Buckets: 64
          Filter: (avg(pagg_tab.b) < '22'::numeric)
          ->  Append
                ->  Foreign Scan on fpagg_tab_p1 pagg_tab_1
                ->  Foreign Scan on fpagg_tab_p2 pagg_tab_2
                ->  Foreign Scan on fpagg_tab_p3 pagg_tab_3
-(9 rows)
+(10 rows)
 
 -- Plan with partitionwise aggregates is enabled
 SET enable_partitionwise_aggregate TO true;
@@ -8799,6 +8816,7 @@ SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
          ->  HashAggregate
                Output: t1.a, count(((t1.*)::pagg_tab))
                Group Key: t1.a
+               Buckets: 16
                Filter: (avg(t1.b) < '22'::numeric)
                ->  Foreign Scan on public.fpagg_tab_p1 t1
                      Output: t1.a, t1.*, t1.b
@@ -8806,6 +8824,7 @@ SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
          ->  HashAggregate
                Output: t1_1.a, count(((t1_1.*)::pagg_tab))
                Group Key: t1_1.a
+               Buckets: 16
                Filter: (avg(t1_1.b) < '22'::numeric)
                ->  Foreign Scan on public.fpagg_tab_p2 t1_1
                      Output: t1_1.a, t1_1.*, t1_1.b
@@ -8813,11 +8832,12 @@ SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
          ->  HashAggregate
                Output: t1_2.a, count(((t1_2.*)::pagg_tab))
                Group Key: t1_2.a
+               Buckets: 16
                Filter: (avg(t1_2.b) < '22'::numeric)
                ->  Foreign Scan on public.fpagg_tab_p3 t1_2
                      Output: t1_2.a, t1_2.*, t1_2.b
                      Remote SQL: SELECT a, b, c FROM public.pagg_tab_p3
-(25 rows)
+(28 rows)
 
 SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
  a  | count 
@@ -8839,18 +8859,22 @@ SELECT b, avg(a), max(a), count(*) FROM pagg_tab GROUP BY b HAVING sum(a) < 700
    Sort Key: pagg_tab.b
    ->  Finalize HashAggregate
          Group Key: pagg_tab.b
+         Buckets: 64
          Filter: (sum(pagg_tab.a) < 700)
          ->  Append
                ->  Partial HashAggregate
                      Group Key: pagg_tab.b
+                     Buckets: 64
                      ->  Foreign Scan on fpagg_tab_p1 pagg_tab
                ->  Partial HashAggregate
                      Group Key: pagg_tab_1.b
+                     Buckets: 64
                      ->  Foreign Scan on fpagg_tab_p2 pagg_tab_1
                ->  Partial HashAggregate
                      Group Key: pagg_tab_2.b
+                     Buckets: 64
                      ->  Foreign Scan on fpagg_tab_p3 pagg_tab_2
-(15 rows)
+(19 rows)
 
 -- ===================================================================
 -- access rights and superuser
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 579a14abf2..b80f7b3c16 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -1393,13 +1393,13 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			{
 				ExplainIndentText(es);
 				appendStringInfoString(es->str, "Hashtable: ");
-				show_tuplehash_info(&subplanstate->hashtable->instrument, NULL, es);
+				show_tuplehash_info(&subplanstate->instrument, NULL, es);
 			}
 			if (subplanstate->hashnulls)
 			{
 				ExplainIndentText(es);
 				appendStringInfoString(es->str, "Null Hashtable: ");
-				show_tuplehash_info(&subplanstate->hashnulls->instrument, NULL, es);
+				show_tuplehash_info(&subplanstate->instrument_nulls, NULL, es);
 			}
 		}
 		if (es->indent)
@@ -1433,14 +1433,14 @@ ExplainNode(PlanState *planstate, List *ancestors,
 		if (subplanstate && subplanstate->hashtable)
 		{
 			ExplainOpenGroup("Hashtable", "Hashtable", true, es);
-			show_tuplehash_info(&subplanstate->hashtable->instrument, NULL, es);
+			show_tuplehash_info(&subplanstate->instrument, NULL, es);
 			ExplainCloseGroup("Hashtable", "Hashtable", true, es);
 		}
 
 		if (subplanstate && subplanstate->hashnulls)
 		{
 			ExplainOpenGroup("Null Hashtable", "Null Hashtable", true, es);
-			show_tuplehash_info(&subplanstate->hashnulls->instrument, NULL, es);
+			show_tuplehash_info(&subplanstate->instrument_nulls, NULL, es);
 			ExplainCloseGroup("Null Hashtable", "Null Hashtable", true, es);
 		}
 	}
@@ -1971,14 +1971,14 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			{
 				SetOpState *sos = castNode(SetOpState, planstate);
 				if (sos->hashtable)
-					show_tuplehash_info(&sos->hashtable->instrument, NULL, es);
+					show_tuplehash_info(&sos->instrument, NULL, es);
 			}
 			break;
 		case T_RecursiveUnion:
 			{
 				RecursiveUnionState *rus = (RecursiveUnionState *)planstate;
 				if (rus->hashtable)
-					show_tuplehash_info(&rus->hashtable->instrument, NULL, es);
+					show_tuplehash_info(&rus->instrument, NULL, es);
 			}
 			break;
 		case T_Group:
@@ -2394,7 +2394,7 @@ show_agg_keys(AggState *astate, List *ancestors,
 								 ancestors, es);
 			Assert(astate->num_hashes <= 1);
 			if (astate->num_hashes)
-				show_tuplehash_info(&astate->perhash[0].hashtable->instrument, astate, es);
+				show_tuplehash_info(&astate->perhash[0].instrument, astate, es);
 		}
 
 		ancestors = list_delete_first(ancestors);
@@ -2421,7 +2421,7 @@ show_grouping_sets(AggState *aggstate, Agg *agg,
 
 	show_grouping_set_info(aggstate, agg, NULL, context, useprefix, ancestors,
 			aggstate->num_hashes ?
-			&aggstate->perhash[setno++].hashtable->instrument : NULL,
+			&aggstate->perhash[setno++].instrument : NULL,
 			es);
 
 	foreach(lc, agg->chain)
@@ -2434,7 +2434,7 @@ show_grouping_sets(AggState *aggstate, Agg *agg,
 				aggnode->aggstrategy == AGG_MIXED)
 		{
 			Assert(setno < aggstate->num_hashes);
-			inst = &aggstate->perhash[setno++].hashtable->instrument;
+			inst = &aggstate->perhash[setno++].instrument;
 		}
 
 		show_grouping_set_info(aggstate, aggnode, sortnode,
@@ -3243,7 +3243,7 @@ show_tidbitmap_info(BitmapHeapScanState *planstate, ExplainState *es)
 		}
 	}
 
-	show_tuplehash_info(&planstate->instrument, es, NULL);
+	show_tuplehash_info(&planstate->instrument, NULL, es);
 }
 
 /*
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 10276d3f58..009d27b9a8 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -188,7 +188,6 @@ BuildTupleHashTableExt(PlanState *parent,
 	hashtable->inputslot = NULL;
 	hashtable->in_hash_funcs = NULL;
 	hashtable->cur_eq_func = NULL;
-	memset(&hashtable->instrument, 0, sizeof(hashtable->instrument));
 
 	/*
 	 * If parallelism is in use, even if the master backend is performing the
@@ -204,7 +203,6 @@ BuildTupleHashTableExt(PlanState *parent,
 		hashtable->hash_iv = 0;
 
 	hashtable->hashtab = tuplehash_create(metacxt, nbuckets, hashtable);
-	UpdateTupleHashTableStats(hashtable, true);
 
 	/*
 	 * We copy the input tuple descriptor just for safety --- we assume all
@@ -283,40 +281,9 @@ BuildTupleHashTable(PlanState *parent,
 void
 ResetTupleHashTable(TupleHashTable hashtable)
 {
-	UpdateTupleHashTableStats(hashtable, false);
 	tuplehash_reset(hashtable->hashtab);
 }
 
-/* Update instrumentation stats */
-void
-UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial)
-{
-	hashtable->instrument.nbuckets = hashtable->hashtab->size;
-	if (initial)
-	{
-		hashtable->instrument.nbuckets_original = hashtable->hashtab->size;
-		// hashtable->instrument.space_peak_hash = hashtable->hashtab->size *
-			// sizeof(TupleHashEntryData);
-		hashtable->instrument.space_peak_hash =
-			MemoryContextMemAllocated(hashtable->hashtab->ctx, true);
-		hashtable->instrument.space_peak_tuples = 0;
-	}
-	else
-	{
-		/* hashtable->entrysize includes additionalsize */
-		size_t hash_size = MemoryContextMemAllocated(hashtable->hashtab->ctx, true);
-		size_t tuple_size = MemoryContextMemAllocated(hashtable->tablecxt, true);
-
-		hashtable->instrument.space_peak_hash = Max(
-			hashtable->instrument.space_peak_hash,
-			hash_size);
-
-		hashtable->instrument.space_peak_tuples = Max(
-			hashtable->instrument.space_peak_tuples, tuple_size);
-				// hashtable->hashtab->members * hashtable->entrysize);
-	}
-}
-
 /*
  * Find or create a hashtable entry for the tuple group containing the
  * given tuple.  The tuple must be the same type as the hashtable entries.
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 2d6783843a..c6d03521e4 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1500,6 +1500,10 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets)
 		hashcxt,
 		tmpcxt,
 		DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+
+	InitTupleHashTableStats(perhash->instrument,
+			perhash->hashtable->hashtab,
+			hashcxt, additionalsize);
 }
 
 /*
@@ -1878,7 +1882,8 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
 	{
 		hash_mem += MemoryContextMemAllocated(
 			aggstate->perhash[i].hashcontext->ecxt_per_tuple_memory, true);
-		UpdateTupleHashTableStats(aggstate->perhash[i].hashtable, false);
+		UpdateTupleHashTableStats(aggstate->perhash[i].instrument,
+				aggstate->perhash[i].hashtable->hashtab);
 	}
 
 	/* memory for read/write tape buffers, if spilled XXX */
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index 93272c28b1..5e70e008e5 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -50,6 +50,8 @@ build_hash_table(RecursiveUnionState *rustate)
 												rustate->tableContext,
 												rustate->tempContext,
 												false);
+
+	InitTupleHashTableStats(rustate->instrument, rustate->hashtable->hashtab, rustate->tableContext, 0);
 }
 
 
@@ -157,7 +159,7 @@ ExecRecursiveUnion(PlanState *pstate)
 	}
 
 	if (node->hashtable)
-		UpdateTupleHashTableStats(node->hashtable, false);
+		UpdateTupleHashTableStats(node->instrument, node->hashtable->hashtab);
 
 	return NULL;
 }
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 9c0e0ab96e..5eca128183 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -139,6 +139,9 @@ build_hash_table(SetOpState *setopstate)
 												   setopstate->tableContext,
 												   econtext->ecxt_per_tuple_memory,
 												   false);
+
+	InitTupleHashTableStats(setopstate->instrument,
+			setopstate->hashtable->hashtab, setopstate->tableContext, 0);
 }
 
 /*
@@ -415,7 +418,8 @@ setop_fill_hash_table(SetOpState *setopstate)
 
 	setopstate->table_filled = true;
 	/* Initialize to walk the hash table */
-	UpdateTupleHashTableStats(setopstate->hashtable, false);
+	UpdateTupleHashTableStats(setopstate->instrument,
+			setopstate->hashtable->hashtab);
 	ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
 }
 
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 22c32612ba..0de6be40e4 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -505,6 +505,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 	if (node->hashtable)
 		ResetTupleHashTable(node->hashtable);
 	else
+	{
 		node->hashtable = BuildTupleHashTableExt(node->parent,
 												 node->descRight,
 												 ncols,
@@ -518,6 +519,9 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 												 node->hashtablecxt,
 												 node->hashtempcxt,
 												 false);
+		InitTupleHashTableStats(node->instrument, node->hashtable->hashtab,
+				node->hashtablecxt, 0);
+	}
 
 	if (!subplan->unknownEqFalse)
 	{
@@ -533,6 +537,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 		if (node->hashnulls)
 			ResetTupleHashTable(node->hashnulls);
 		else
+		{
 			node->hashnulls = BuildTupleHashTableExt(node->parent,
 													 node->descRight,
 													 ncols,
@@ -546,6 +551,9 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 													 node->hashtablecxt,
 													 node->hashtempcxt,
 													 false);
+			InitTupleHashTableStats(node->instrument_nulls,
+					node->hashnulls->hashtab, node->hashtablecxt, 0);
+		}
 	}
 	else
 		node->hashnulls = NULL;
@@ -621,9 +629,9 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 	ExecClearTuple(node->projRight->pi_state.resultslot);
 
 	MemoryContextSwitchTo(oldcontext);
-	UpdateTupleHashTableStats(node->hashtable, false);
+	UpdateTupleHashTableStats(node->instrument, node->hashtable->hashtab);
 	if (node->hashnulls)
-		UpdateTupleHashTableStats(node->hashnulls, false);
+		UpdateTupleHashTableStats(node->instrument_nulls, node->hashnulls->hashtab);
 }
 
 /*
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index f71cc03ad5..c7deeac662 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -150,7 +150,6 @@ extern TupleHashEntry FindTupleHashEntry(TupleHashTable hashtable,
 										 ExprState *eqcomp,
 										 FmgrInfo *hashfunctions);
 extern void ResetTupleHashTable(TupleHashTable hashtable);
-extern void UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial);
 
 /*
  * prototypes from functions in execJunk.c
diff --git a/src/include/executor/nodeAgg.h b/src/include/executor/nodeAgg.h
index 72a9568bbe..c49f068f5c 100644
--- a/src/include/executor/nodeAgg.h
+++ b/src/include/executor/nodeAgg.h
@@ -309,6 +309,7 @@ typedef struct AggStatePerHashData
 	Agg		   *aggnode;		/* original Agg node, for numGroups etc. */
 	MemoryContext	hash_metacxt;	/* memory for hash table itself */
 	ExprContext	*hashcontext;	/* context for hash table data */
+	HashTableInstrumentation    instrument;
 }			AggStatePerHashData;
 
 
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 19b657263b..10239aea4f 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -693,12 +693,31 @@ typedef struct TupleHashEntryData
 #define SH_DECLARE
 #include "lib/simplehash.h"
 
+#define InitTupleHashTableStats(instr, htable, tupctx, addsize) \
+	do{\
+	instr.entrysize = sizeof(MinimalTuple) + addsize; \
+	instr.tuplectx = tupctx; \
+	instr.nbuckets = htable->size; \
+	instr.nbuckets_original = htable->size; \
+	instr.space_peak_hash = MemoryContextMemAllocated(htable->ctx, false); \
+	instr.space_peak_tuples = 0; \
+	}while(0)
+
+#define UpdateTupleHashTableStats(instr, htable) \
+	do{\
+	instr.nbuckets = htable->size; \
+	instr.space_peak_hash = Max(instr.space_peak_hash, MemoryContextMemAllocated(htable->ctx, false)); \
+	instr.space_peak_tuples = Max(instr.space_peak_tuples, MemoryContextMemAllocated(instr.tuplectx, false)); \
+	}while(0)
+
 typedef struct HashTableInstrumentation
 {
+	size_t	entrysize;				/* Includes additionalsize */
 	size_t	nbuckets;				/* number of buckets at end of execution */
 	size_t	nbuckets_original;		/* planned number of buckets */
 	size_t	space_peak_hash;		/* peak memory usage in bytes */
 	size_t	space_peak_tuples;		/* peak memory usage in bytes */
+	MemoryContext	tuplectx;		/* Context where tuples are stored */
 } HashTableInstrumentation;
 
 typedef struct TupleHashTableData
@@ -719,7 +738,6 @@ typedef struct TupleHashTableData
 	ExprState  *cur_eq_func;	/* comparator for input vs. table */
 	uint32		hash_iv;		/* hash-function IV */
 	ExprContext *exprcontext;	/* expression context */
-	HashTableInstrumentation instrument;
 }			TupleHashTableData;
 
 typedef tuplehash_iterator TupleHashIterator;
@@ -885,6 +903,8 @@ typedef struct SubPlanState
 	FmgrInfo   *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */
 	FmgrInfo   *cur_eq_funcs;	/* equality functions for LHS vs. table */
 	ExprState  *cur_eq_comp;	/* equality comparator for LHS vs. table */
+	HashTableInstrumentation instrument;
+	HashTableInstrumentation instrument_nulls; /* instrumentation for nulls hashtable */
 } SubPlanState;
 
 /* ----------------
@@ -1293,6 +1313,7 @@ typedef struct RecursiveUnionState
 	MemoryContext tempContext;	/* short-term context for comparisons */
 	TupleHashTable hashtable;	/* hash table for tuples already seen */
 	MemoryContext tableContext; /* memory context containing hash table */
+	HashTableInstrumentation instrument;
 } RecursiveUnionState;
 
 /* ----------------
@@ -2424,6 +2445,7 @@ typedef struct SetOpState
 	MemoryContext tableContext; /* memory context containing hash table */
 	bool		table_filled;	/* hash table filled yet? */
 	TupleHashIterator hashiter; /* for iterating through hash table */
+	HashTableInstrumentation instrument;
 } SetOpState;
 
 /* ----------------
-- 
2.17.0

