From f5e4834638034ecf1b646f913299a6f609df451e Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <postgres@jeltef.nl>
Date: Thu, 4 Dec 2025 15:36:19 +0100
Subject: [PATCH v7 3/5] Use hash_make macros throughout the codebase

This shows how our code base looks when using the new APIs. This has
some typesafety, readability and maintanability benefits, but it also
introduces some backpatching problems. These backpatching problems
cannot be resolved by backporting the new hash_make macros, because some
of them require C11 (which we only require on master for now). I think
it's unlikely that we'll need to backpatch things in code that creates
hashtables though, so it could still be worth it to do this complete
refactor.

At the very least we should choose a few places where we use the new
macros to make sure they have coverage.
---
 contrib/dblink/dblink.c                       | 10 +--
 .../pg_stat_statements/pg_stat_statements.c   | 10 +--
 contrib/pg_trgm/trgm_regexp.c                 |  9 +--
 contrib/postgres_fdw/connection.c             | 10 +--
 contrib/postgres_fdw/shippable.c              | 10 ++-
 contrib/tablefunc/tablefunc.c                 | 17 ++---
 src/backend/access/common/heaptuple.c         | 13 +---
 src/backend/access/gist/gistbuild.c           | 11 +---
 src/backend/access/gist/gistbuildbuffers.c    | 10 +--
 src/backend/access/hash/hashpage.c            | 13 +---
 src/backend/access/heap/rewriteheap.c         | 39 ++++-------
 src/backend/access/transam/xlogprefetcher.c   |  8 +--
 src/backend/access/transam/xlogutils.c        | 13 ++--
 src/backend/catalog/pg_enum.c                 | 24 ++-----
 src/backend/catalog/pg_inherits.c             | 11 +---
 src/backend/catalog/storage.c                 | 21 ++----
 src/backend/commands/async.c                  | 49 ++++----------
 src/backend/commands/prepare.c                | 12 +---
 src/backend/commands/sequence.c               | 10 +--
 src/backend/commands/tablecmds.c              | 16 +----
 src/backend/executor/nodeModifyTable.c        | 10 +--
 src/backend/nodes/extensible.c                | 10 +--
 src/backend/optimizer/util/plancat.c          | 17 ++---
 src/backend/optimizer/util/predtest.c         |  9 +--
 src/backend/optimizer/util/relnode.c          | 13 +---
 src/backend/parser/parse_oper.c               | 10 ++-
 src/backend/partitioning/partdesc.c           |  9 +--
 src/backend/postmaster/autovacuum.c           | 22 ++-----
 src/backend/postmaster/checkpointer.c         | 12 +---
 .../replication/logical/applyparallelworker.c | 13 +---
 src/backend/replication/logical/relation.c    | 22 ++-----
 .../replication/logical/reorderbuffer.c       | 29 ++------
 src/backend/replication/logical/tablesync.c   | 10 ++-
 src/backend/replication/pgoutput/pgoutput.c   | 11 +---
 src/backend/storage/buffer/buf_table.c        | 15 ++---
 src/backend/storage/buffer/bufmgr.c           | 11 ++--
 src/backend/storage/buffer/localbuf.c         | 11 +---
 src/backend/storage/file/reinit.c             |  8 +--
 src/backend/storage/ipc/shmem.c               | 33 +++++++---
 src/backend/storage/ipc/standby.c             | 20 ++----
 src/backend/storage/lmgr/lock.c               | 55 +++++-----------
 src/backend/storage/lmgr/lwlock.c             |  8 +--
 src/backend/storage/lmgr/predicate.c          | 66 ++++++++-----------
 src/backend/storage/smgr/smgr.c               | 10 ++-
 src/backend/storage/sync/sync.c               | 11 +---
 src/backend/tsearch/ts_typanalyze.c           | 13 +---
 src/backend/utils/activity/wait_event.c       | 23 +++----
 src/backend/utils/adt/array_typanalyze.c      | 23 ++-----
 src/backend/utils/adt/json.c                  | 15 +----
 src/backend/utils/adt/jsonfuncs.c             | 20 ++----
 src/backend/utils/adt/mcxtfuncs.c             | 11 +---
 src/backend/utils/adt/ri_triggers.c           | 29 ++++----
 src/backend/utils/adt/ruleutils.c             | 23 ++-----
 src/backend/utils/cache/attoptcache.c         | 16 ++---
 src/backend/utils/cache/evtcache.c            |  9 +--
 src/backend/utils/cache/funccache.c           | 14 ++--
 src/backend/utils/cache/relcache.c            | 19 ++----
 src/backend/utils/cache/relfilenumbermap.c    |  9 +--
 src/backend/utils/cache/spccache.c            |  9 +--
 src/backend/utils/cache/ts_cache.c            | 27 +++-----
 src/backend/utils/cache/typcache.c            | 33 ++++------
 src/backend/utils/fmgr/dfmgr.c                | 12 ++--
 src/backend/utils/fmgr/fmgr.c                 | 11 +---
 src/backend/utils/misc/guc.c                  | 14 ++--
 src/backend/utils/misc/injection_point.c      | 14 ++--
 src/backend/utils/mmgr/portalmem.c            | 10 +--
 src/backend/utils/time/combocid.c             | 13 +---
 src/pl/plperl/plperl.c                        | 32 +++------
 src/pl/plpgsql/src/pl_exec.c                  | 32 ++++-----
 src/pl/plpython/plpy_plpymodule.c             |  9 ++-
 src/pl/plpython/plpy_procedure.c              |  9 +--
 src/pl/tcl/pltcl.c                            | 19 ++----
 src/timezone/pgtz.c                           | 12 +---
 73 files changed, 380 insertions(+), 861 deletions(-)

diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 8cb3166495c..192418df4af 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -2540,13 +2540,9 @@ getConnectionByName(const char *name)
 static HTAB *
 createConnHash(void)
 {
-	HASHCTL		ctl;
-
-	ctl.keysize = NAMEDATALEN;
-	ctl.entrysize = sizeof(remoteConnHashEnt);
-
-	return hash_create("Remote Con hash", NUMCONN, &ctl,
-					   HASH_ELEM | HASH_STRINGS);
+	return hash_make_cxt(remoteConnHashEnt, name,
+						 "Remote Con hash", NUMCONN,
+						 TopMemoryContext);
 }
 
 static remoteConn *
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 4a427533bd8..8a67390e561 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -517,7 +517,6 @@ static void
 pgss_shmem_startup(void)
 {
 	bool		found;
-	HASHCTL		info;
 	FILE	   *file = NULL;
 	FILE	   *qfile = NULL;
 	uint32		header;
@@ -557,12 +556,9 @@ pgss_shmem_startup(void)
 		pgss->stats.stats_reset = GetCurrentTimestamp();
 	}
 
-	info.keysize = sizeof(pgssHashKey);
-	info.entrysize = sizeof(pgssEntry);
-	pgss_hash = ShmemInitHash("pg_stat_statements hash",
-							  pgss_max, pgss_max,
-							  &info,
-							  HASH_ELEM | HASH_BLOBS);
+	pgss_hash = shmem_hash_make(pgssEntry, key,
+								"pg_stat_statements hash",
+								pgss_max, pgss_max);
 
 	LWLockRelease(AddinShmemInitLock);
 
diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c
index 1d1b5fe304d..ecd4d677948 100644
--- a/contrib/pg_trgm/trgm_regexp.c
+++ b/contrib/pg_trgm/trgm_regexp.c
@@ -893,7 +893,6 @@ convertPgWchar(pg_wchar c, trgm_mb_char *result)
 static void
 transformGraph(TrgmNFA *trgmNFA)
 {
-	HASHCTL		hashCtl;
 	TrgmStateKey initkey;
 	TrgmState  *initstate;
 	ListCell   *lc;
@@ -905,13 +904,7 @@ transformGraph(TrgmNFA *trgmNFA)
 	trgmNFA->overflowed = false;
 
 	/* Create hashtable for states */
-	hashCtl.keysize = sizeof(TrgmStateKey);
-	hashCtl.entrysize = sizeof(TrgmState);
-	hashCtl.hcxt = CurrentMemoryContext;
-	trgmNFA->states = hash_create("Trigram NFA",
-								  1024,
-								  &hashCtl,
-								  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	trgmNFA->states = hash_make(TrgmState, stateKey, "Trigram NFA", 1024);
 	trgmNFA->nstates = 0;
 
 	/* Create initial state: ambiguous prefix, NFA's initial state */
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 487a1a23170..5c77c9ecde5 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -214,17 +214,13 @@ GetConnection(UserMapping *user, bool will_prep_stmt, PgFdwConnState **state)
 	/* First time through, initialize connection cache hashtable */
 	if (ConnectionHash == NULL)
 	{
-		HASHCTL		ctl;
-
 		if (pgfdw_we_get_result == 0)
 			pgfdw_we_get_result =
 				WaitEventExtensionNew("PostgresFdwGetResult");
 
-		ctl.keysize = sizeof(ConnCacheKey);
-		ctl.entrysize = sizeof(ConnCacheEntry);
-		ConnectionHash = hash_create("postgres_fdw connections", 8,
-									 &ctl,
-									 HASH_ELEM | HASH_BLOBS);
+		ConnectionHash = hash_make_cxt(ConnCacheEntry, key,
+									   "postgres_fdw connections", 8,
+									   TopMemoryContext);
 
 		/*
 		 * Register some callback functions that manage connection cleanup.
diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c
index d32d3d0e461..ec1852490e0 100644
--- a/contrib/postgres_fdw/shippable.c
+++ b/contrib/postgres_fdw/shippable.c
@@ -28,6 +28,7 @@
 #include "postgres_fdw.h"
 #include "utils/hsearch.h"
 #include "utils/inval.h"
+#include "utils/memutils.h"
 #include "utils/syscache.h"
 
 /* Hash table for caching the results of shippability lookups */
@@ -90,13 +91,10 @@ InvalidateShippableCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 static void
 InitializeShippableCache(void)
 {
-	HASHCTL		ctl;
-
 	/* Create the hash table. */
-	ctl.keysize = sizeof(ShippableCacheKey);
-	ctl.entrysize = sizeof(ShippableCacheEntry);
-	ShippableCacheHash =
-		hash_create("Shippability cache", 256, &ctl, HASH_ELEM | HASH_BLOBS);
+	ShippableCacheHash = hash_make_cxt(ShippableCacheEntry, key,
+									   "Shippability cache", 256,
+									   TopMemoryContext);
 
 	/* Set up invalidation callback on pg_foreign_server. */
 	CacheRegisterSyscacheCallback(FOREIGNSERVEROID,
diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c
index ca2434c6e19..426f4b236a2 100644
--- a/contrib/tablefunc/tablefunc.c
+++ b/contrib/tablefunc/tablefunc.c
@@ -705,24 +705,17 @@ static HTAB *
 load_categories_hash(char *cats_sql, MemoryContext per_query_ctx)
 {
 	HTAB	   *crosstab_hash;
-	HASHCTL		ctl;
 	int			ret;
 	uint64		proc;
 	MemoryContext SPIcontext;
 
-	/* initialize the category hash table */
-	ctl.keysize = MAX_CATNAME_LEN;
-	ctl.entrysize = sizeof(crosstab_HashEnt);
-	ctl.hcxt = per_query_ctx;
-
 	/*
-	 * use INIT_CATS, defined above as a guess of how many hash table entries
-	 * to create, initially
+	 * Initialize the category hash table. Use INIT_CATS, defined above as a
+	 * guess of how many hash table entries to create, initially.
 	 */
-	crosstab_hash = hash_create("crosstab hash",
-								INIT_CATS,
-								&ctl,
-								HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	crosstab_hash = hash_make_cxt(crosstab_HashEnt, internal_catname,
+								  "crosstab hash", INIT_CATS,
+								  per_query_ctx);
 
 	/* Connect to SPI manager */
 	SPI_connect();
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 11bec20e82e..e648001f11b 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -125,18 +125,9 @@ missing_match(const void *key1, const void *key2, Size keysize)
 static void
 init_missing_cache(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = sizeof(missing_cache_key);
-	hash_ctl.entrysize = sizeof(missing_cache_key);
-	hash_ctl.hcxt = TopMemoryContext;
-	hash_ctl.hash = missing_hash;
-	hash_ctl.match = missing_match;
 	missing_cache =
-		hash_create("Missing Values Cache",
-					32,
-					&hash_ctl,
-					HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION | HASH_COMPARE);
+		hashset_make_fn_cxt(missing_cache_key, "Missing Values Cache", 32,
+							missing_hash, missing_match, TopMemoryContext);
 }
 
 /* ----------------------------------------------------------------
diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c
index 7f57c787f4c..09f11c459d7 100644
--- a/src/backend/access/gist/gistbuild.c
+++ b/src/backend/access/gist/gistbuild.c
@@ -1515,15 +1515,8 @@ typedef struct
 static void
 gistInitParentMap(GISTBuildState *buildstate)
 {
-	HASHCTL		hashCtl;
-
-	hashCtl.keysize = sizeof(BlockNumber);
-	hashCtl.entrysize = sizeof(ParentMapEntry);
-	hashCtl.hcxt = CurrentMemoryContext;
-	buildstate->parentMap = hash_create("gistbuild parent map",
-										1024,
-										&hashCtl,
-										HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	buildstate->parentMap = hash_make(ParentMapEntry, childblkno,
+									  "gistbuild parent map", 1024);
 }
 
 static void
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 3213cf45aa6..029af8542f9 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -44,7 +44,6 @@ GISTBuildBuffers *
 gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
 {
 	GISTBuildBuffers *gfbb;
-	HASHCTL		hashCtl;
 
 	gfbb = palloc_object(GISTBuildBuffers);
 	gfbb->pagesPerBuffer = pagesPerBuffer;
@@ -72,13 +71,8 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
 	 * nodeBuffersTab hash is association between index blocks and it's
 	 * buffers.
 	 */
-	hashCtl.keysize = sizeof(BlockNumber);
-	hashCtl.entrysize = sizeof(GISTNodeBuffer);
-	hashCtl.hcxt = CurrentMemoryContext;
-	gfbb->nodeBuffersTab = hash_create("gistbuildbuffers",
-									   1024,
-									   &hashCtl,
-									   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	gfbb->nodeBuffersTab = hash_make(GISTNodeBuffer, nodeBlocknum,
+									 "gistbuildbuffers", 1024);
 
 	gfbb->bufferEmptyingQueue = NIL;
 
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 8e220a3ae16..99eb291e832 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -1356,7 +1356,6 @@ void
 _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
 				   uint32 maxbucket, uint32 highmask, uint32 lowmask)
 {
-	HASHCTL		hash_ctl;
 	HTAB	   *tidhtab;
 	Buffer		bucket_nbuf = InvalidBuffer;
 	Buffer		nbuf;
@@ -1367,16 +1366,8 @@ _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
 	Bucket		nbucket;
 	bool		found;
 
-	/* Initialize hash tables used to track TIDs */
-	hash_ctl.keysize = sizeof(ItemPointerData);
-	hash_ctl.entrysize = sizeof(ItemPointerData);
-	hash_ctl.hcxt = CurrentMemoryContext;
-
-	tidhtab =
-		hash_create("bucket ctids",
-					256,		/* arbitrary initial size */
-					&hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	/* Initialize hash tables used to track TIDs (with arbitrary initial size) */
+	tidhtab = hashset_make(ItemPointerData, "bucket ctids", 256);
 
 	bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
 
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 77fd48eb59e..fbe4d6e6e98 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -237,7 +237,6 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
 	RewriteState state;
 	MemoryContext rw_cxt;
 	MemoryContext old_cxt;
-	HASHCTL		hash_ctl;
 
 	/*
 	 * To ease cleanup, make a separate context that will contain the
@@ -262,24 +261,19 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
 	state->rs_cxt = rw_cxt;
 	state->rs_bulkstate = smgr_bulk_start_rel(new_heap, MAIN_FORKNUM);
 
-	/* Initialize hash tables used to track update chains */
-	hash_ctl.keysize = sizeof(TidHashKey);
-	hash_ctl.entrysize = sizeof(UnresolvedTupData);
-	hash_ctl.hcxt = state->rs_cxt;
-
+	/*
+	 * Initialize hash tables used to track update chains (with arbitrary
+	 * initial sizes)
+	 */
 	state->rs_unresolved_tups =
-		hash_create("Rewrite / Unresolved ctids",
-					128,		/* arbitrary initial size */
-					&hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-
-	hash_ctl.entrysize = sizeof(OldToNewMappingData);
+		hash_make_cxt(UnresolvedTupData, key,
+					  "Rewrite / Unresolved ctids", 128,
+					  state->rs_cxt);
 
 	state->rs_old_new_tid_map =
-		hash_create("Rewrite / Old to new tid map",
-					128,		/* arbitrary initial size */
-					&hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash_make_cxt(OldToNewMappingData, key,
+					  "Rewrite / Old to new tid map", 128,
+					  state->rs_cxt);
 
 	MemoryContextSwitchTo(old_cxt);
 
@@ -760,7 +754,6 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
 static void
 logical_begin_heap_rewrite(RewriteState state)
 {
-	HASHCTL		hash_ctl;
 	TransactionId logical_xmin;
 
 	/*
@@ -791,15 +784,11 @@ logical_begin_heap_rewrite(RewriteState state)
 	state->rs_begin_lsn = GetXLogInsertRecPtr();
 	state->rs_num_rewrite_mappings = 0;
 
-	hash_ctl.keysize = sizeof(TransactionId);
-	hash_ctl.entrysize = sizeof(RewriteMappingFile);
-	hash_ctl.hcxt = state->rs_cxt;
-
 	state->rs_logical_mappings =
-		hash_create("Logical rewrite mapping",
-					128,		/* arbitrary initial size */
-					&hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash_make_cxt(RewriteMappingFile, xid,
+					  "Logical rewrite mapping",
+					  128,		/* arbitrary initial size */
+					  state->rs_cxt);
 }
 
 /*
diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c
index 3c3f067aafb..02e41780dd9 100644
--- a/src/backend/access/transam/xlogprefetcher.c
+++ b/src/backend/access/transam/xlogprefetcher.c
@@ -362,15 +362,13 @@ XLogPrefetcher *
 XLogPrefetcherAllocate(XLogReaderState *reader)
 {
 	XLogPrefetcher *prefetcher;
-	HASHCTL		ctl;
 
 	prefetcher = palloc0_object(XLogPrefetcher);
 	prefetcher->reader = reader;
 
-	ctl.keysize = sizeof(RelFileLocator);
-	ctl.entrysize = sizeof(XLogPrefetcherFilter);
-	prefetcher->filter_table = hash_create("XLogPrefetcherFilterTable", 1024,
-										   &ctl, HASH_ELEM | HASH_BLOBS);
+	prefetcher->filter_table = hash_make_cxt(XLogPrefetcherFilter, rlocator,
+											 "XLogPrefetcherFilterTable", 1024,
+											 TopMemoryContext);
 	dlist_init(&prefetcher->filter_queue);
 
 	SharedStats->wal_distance = 0;
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 5fbe39133b8..d11e42c9490 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -27,6 +27,7 @@
 #include "storage/fd.h"
 #include "storage/smgr.h"
 #include "utils/hsearch.h"
+#include "utils/memutils.h"
 #include "utils/rel.h"
 
 
@@ -131,15 +132,9 @@ log_invalid_page(RelFileLocator locator, ForkNumber forkno, BlockNumber blkno,
 	if (invalid_page_tab == NULL)
 	{
 		/* create hash table when first needed */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(xl_invalid_page_key);
-		ctl.entrysize = sizeof(xl_invalid_page);
-
-		invalid_page_tab = hash_create("XLOG invalid-page table",
-									   100,
-									   &ctl,
-									   HASH_ELEM | HASH_BLOBS);
+		invalid_page_tab = hash_make_cxt(xl_invalid_page, key,
+										 "XLOG invalid-page table", 100,
+										 TopMemoryContext);
 	}
 
 	/* we currently assume xl_invalid_page_key contains no padding */
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 33a461484d4..86c8bada557 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -267,15 +267,9 @@ EnumValuesDelete(Oid enumTypeOid)
 static void
 init_uncommitted_enum_types(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(Oid);
-	hash_ctl.hcxt = TopTransactionContext;
-	uncommitted_enum_types = hash_create("Uncommitted enum types",
-										 32,
-										 &hash_ctl,
-										 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	uncommitted_enum_types = hashset_make_cxt(Oid,
+											  "Uncommitted enum types", 32,
+											  TopTransactionContext);
 }
 
 /*
@@ -284,15 +278,9 @@ init_uncommitted_enum_types(void)
 static void
 init_uncommitted_enum_values(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(Oid);
-	hash_ctl.hcxt = TopTransactionContext;
-	uncommitted_enum_values = hash_create("Uncommitted enum values",
-										  32,
-										  &hash_ctl,
-										  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	uncommitted_enum_values = hashset_make_cxt(Oid,
+											   "Uncommitted enum values", 32,
+											   TopTransactionContext);
 }
 
 /*
diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c
index cef80633157..537769b39ae 100644
--- a/src/backend/catalog/pg_inherits.c
+++ b/src/backend/catalog/pg_inherits.c
@@ -256,19 +256,12 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
 {
 	/* hash table for O(1) rel_oid -> rel_numparents cell lookup */
 	HTAB	   *seen_rels;
-	HASHCTL		ctl;
 	List	   *rels_list,
 			   *rel_numparents;
 	ListCell   *l;
 
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(SeenRelsEntry);
-	ctl.hcxt = CurrentMemoryContext;
-
-	seen_rels = hash_create("find_all_inheritors temporary table",
-							32, /* start small and extend */
-							&ctl,
-							HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	seen_rels = hash_make(SeenRelsEntry, rel_id,
+						  "find_all_inheritors temporary table", 32);
 
 	/*
 	 * We build a list starting with the given rel and adding all direct and
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index e443a4993c5..db3e08319b5 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -90,15 +90,9 @@ AddPendingSync(const RelFileLocator *rlocator)
 
 	/* create the hash if not yet */
 	if (!pendingSyncHash)
-	{
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(RelFileLocator);
-		ctl.entrysize = sizeof(PendingRelSync);
-		ctl.hcxt = TopTransactionContext;
-		pendingSyncHash = hash_create("pending sync hash", 16, &ctl,
-									  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-	}
+		pendingSyncHash = hash_make_cxt(PendingRelSync, rlocator,
+										"pending sync hash", 16,
+										TopTransactionContext);
 
 	pending = hash_search(pendingSyncHash, rlocator, HASH_ENTER, &found);
 	Assert(!found);
@@ -600,7 +594,6 @@ void
 SerializePendingSyncs(Size maxSize, char *startAddress)
 {
 	HTAB	   *tmphash;
-	HASHCTL		ctl;
 	HASH_SEQ_STATUS scan;
 	PendingRelSync *sync;
 	PendingRelDelete *delete;
@@ -611,12 +604,8 @@ SerializePendingSyncs(Size maxSize, char *startAddress)
 		goto terminate;
 
 	/* Create temporary hash to collect active relfilelocators */
-	ctl.keysize = sizeof(RelFileLocator);
-	ctl.entrysize = sizeof(RelFileLocator);
-	ctl.hcxt = CurrentMemoryContext;
-	tmphash = hash_create("tmp relfilelocators",
-						  hash_get_num_entries(pendingSyncHash), &ctl,
-						  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	tmphash = hashset_make(RelFileLocator, "tmp relfilelocators",
+						   hash_get_num_entries(pendingSyncHash));
 
 	/* collect all rlocator from pending syncs */
 	hash_seq_init(&scan, pendingSyncHash);
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 657c591618d..73f93512f9b 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -726,21 +726,15 @@ initGlobalChannelTable(void)
 static void
 initLocalChannelTable(void)
 {
-	HASHCTL		hash_ctl;
-
 	/* Quick exit if we already did this */
 	if (localChannelTable != NULL)
 		return;
 
 	/* Initialize local hash table for this backend's listened channels */
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = sizeof(ChannelName);
-
 	localChannelTable =
-		hash_create("Local Listen Channels",
-					64,
-					&hash_ctl,
-					HASH_ELEM | HASH_STRINGS);
+		hash_make_cxt(ChannelName, channel, "Local Listen Channels",
+					  64,
+					  TopMemoryContext);
 }
 
 /*
@@ -752,20 +746,13 @@ initLocalChannelTable(void)
 static void
 initPendingListenActions(void)
 {
-	HASHCTL		hash_ctl;
-
 	if (pendingListenActions != NULL)
 		return;
 
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = sizeof(PendingListenEntry);
-	hash_ctl.hcxt = CurTransactionContext;
-
 	pendingListenActions =
-		hash_create("Pending Listen Actions",
-					list_length(pendingActions->actions),
-					&hash_ctl,
-					HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+		hash_make_cxt(PendingListenEntry, channel, "Pending Listen Actions",
+					  list_length(pendingActions->actions),
+					  CurTransactionContext);
 }
 
 /*
@@ -3162,31 +3149,21 @@ AddEventToPendingNotifies(Notification *n)
 	if (list_length(pendingNotifies->events) >= MIN_HASHABLE_NOTIFIES &&
 		pendingNotifies->hashtab == NULL)
 	{
-		HASHCTL		hash_ctl;
 		ListCell   *l;
 
 		/* Create the hash table */
-		hash_ctl.keysize = sizeof(Notification *);
-		hash_ctl.entrysize = sizeof(struct NotificationHash);
-		hash_ctl.hash = notification_hash;
-		hash_ctl.match = notification_match;
-		hash_ctl.hcxt = CurTransactionContext;
 		pendingNotifies->hashtab =
-			hash_create("Pending Notifies",
-						256L,
-						&hash_ctl,
-						HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+			hash_make_fn_cxt(struct NotificationHash, event,
+							 "Pending Notifies", 256,
+							 notification_hash, notification_match,
+							 CurTransactionContext);
 
 		/* Create the unique channel name table */
 		Assert(pendingNotifies->uniqueChannelHash == NULL);
-		hash_ctl.keysize = NAMEDATALEN;
-		hash_ctl.entrysize = sizeof(ChannelName);
-		hash_ctl.hcxt = CurTransactionContext;
 		pendingNotifies->uniqueChannelHash =
-			hash_create("Pending Notify Channel Names",
-						64L,
-						&hash_ctl,
-						HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+			hash_make_cxt(ChannelName, channel, "Pending Notify Channel Names",
+							 64L,
+							 CurTransactionContext);
 
 		/* Insert all the already-existing events */
 		foreach(l, pendingNotifies->events)
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 5b86a727587..eb1a7ecfebc 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -371,15 +371,9 @@ EvaluateParams(ParseState *pstate, PreparedStatement *pstmt, List *params,
 static void
 InitQueryHashTable(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = sizeof(PreparedStatement);
-
-	prepared_queries = hash_create("Prepared Queries",
-								   32,
-								   &hash_ctl,
-								   HASH_ELEM | HASH_STRINGS);
+	prepared_queries = hash_make_cxt(PreparedStatement, stmt_name,
+									 "Prepared Queries", 32,
+									 TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index e1b808bbb60..5a2683c0735 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -1113,13 +1113,9 @@ lock_and_open_sequence(SeqTable seq)
 static void
 create_seq_hashtable(void)
 {
-	HASHCTL		ctl;
-
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(SeqTableData);
-
-	seqhashtab = hash_create("Sequence values", 16, &ctl,
-							 HASH_ELEM | HASH_BLOBS);
+	seqhashtab = hash_make_cxt(SeqTableData, relid,
+							   "Sequence values", 16,
+							   TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index f976c0e5c7e..40a39989eaa 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2160,19 +2160,9 @@ ExecuteTruncateGuts(List *explicit_rels,
 
 			/* First time through, initialize hashtable for foreign tables */
 			if (!ft_htab)
-			{
-				HASHCTL		hctl;
-
-				memset(&hctl, 0, sizeof(HASHCTL));
-				hctl.keysize = sizeof(Oid);
-				hctl.entrysize = sizeof(ForeignTruncateInfo);
-				hctl.hcxt = CurrentMemoryContext;
-
-				ft_htab = hash_create("TRUNCATE for Foreign Tables",
-									  32,	/* start small and extend */
-									  &hctl,
-									  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-			}
+				ft_htab = hash_make(ForeignTruncateInfo, serverid,
+									"TRUNCATE for Foreign Tables",
+									32);	/* start small and extend */
 
 			/* Find or create cached entry for the foreign table */
 			ft_info = hash_search(ft_htab, &serverid, HASH_ENTER, &found);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index f5e9d369940..79c470e797d 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -5139,15 +5139,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 #endif
 	if (nrels >= MT_NRELS_HASH)
 	{
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = sizeof(Oid);
-		hash_ctl.entrysize = sizeof(MTTargetRelLookup);
-		hash_ctl.hcxt = CurrentMemoryContext;
 		mtstate->mt_resultOidHash =
-			hash_create("ModifyTable target hash",
-						nrels, &hash_ctl,
-						HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+			hash_make(MTTargetRelLookup, relationOid,
+					  "ModifyTable target hash", nrels);
 		for (i = 0; i < nrels; i++)
 		{
 			Oid			hashkey;
diff --git a/src/backend/nodes/extensible.c b/src/backend/nodes/extensible.c
index 0d43d66c1cd..bf0b94d3c72 100644
--- a/src/backend/nodes/extensible.c
+++ b/src/backend/nodes/extensible.c
@@ -22,6 +22,7 @@
 
 #include "nodes/extensible.h"
 #include "utils/hsearch.h"
+#include "utils/memutils.h"
 
 static HTAB *extensible_node_methods = NULL;
 static HTAB *custom_scan_methods = NULL;
@@ -45,13 +46,8 @@ RegisterExtensibleNodeEntry(HTAB **p_htable, const char *htable_label,
 
 	if (*p_htable == NULL)
 	{
-		HASHCTL		ctl;
-
-		ctl.keysize = EXTNODENAME_MAX_LEN;
-		ctl.entrysize = sizeof(ExtensibleNodeEntry);
-
-		*p_htable = hash_create(htable_label, 100, &ctl,
-								HASH_ELEM | HASH_STRINGS);
+		*p_htable = hash_make_cxt(ExtensibleNodeEntry, extnodename,
+								  htable_label, 100, TopMemoryContext);
 	}
 
 	if (strlen(extnodename) >= EXTNODENAME_MAX_LEN)
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index a90e1c9ee6b..ffa89cb3364 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -713,19 +713,10 @@ get_relation_notnullatts(PlannerInfo *root, Relation relation)
 	/* create the hash table if it hasn't been created yet */
 	if (root->glob->rel_notnullatts_hash == NULL)
 	{
-		HTAB	   *hashtab;
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = sizeof(Oid);
-		hash_ctl.entrysize = sizeof(NotnullHashEntry);
-		hash_ctl.hcxt = CurrentMemoryContext;
-
-		hashtab = hash_create("Relation NOT NULL attnums",
-							  64L,	/* arbitrary initial size */
-							  &hash_ctl,
-							  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-
-		root->glob->rel_notnullatts_hash = hashtab;
+		root->glob->rel_notnullatts_hash =
+			hash_make(NotnullHashEntry, relid,
+					  "Relation NOT NULL attnums",
+					  64L);		/* arbitrary initial size */
 	}
 
 	/*
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 26858d1d2b0..3bf5e7ac8bb 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -2117,12 +2117,9 @@ lookup_proof_cache(Oid pred_op, Oid clause_op, bool refute_it)
 	if (OprProofCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(OprProofCacheKey);
-		ctl.entrysize = sizeof(OprProofCacheEntry);
-		OprProofCacheHash = hash_create("Btree proof lookup cache", 256,
-										&ctl, HASH_ELEM | HASH_BLOBS);
+		OprProofCacheHash = hash_make_cxt(OprProofCacheEntry, key,
+										  "Btree proof lookup cache", 256,
+										  TopMemoryContext);
 
 		/* Arrange to flush cache on pg_amop changes */
 		CacheRegisterSyscacheCallback(AMOPOPID,
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index f57631e876f..a907f1d028e 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -601,19 +601,12 @@ static void
 build_join_rel_hash(PlannerInfo *root)
 {
 	HTAB	   *hashtab;
-	HASHCTL		hash_ctl;
 	ListCell   *l;
 
 	/* Create the hash table */
-	hash_ctl.keysize = sizeof(Relids);
-	hash_ctl.entrysize = sizeof(JoinHashEntry);
-	hash_ctl.hash = bitmap_hash;
-	hash_ctl.match = bitmap_match;
-	hash_ctl.hcxt = CurrentMemoryContext;
-	hashtab = hash_create("JoinRelHashTable",
-						  256L,
-						  &hash_ctl,
-						  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+	hashtab = hash_make_fn(JoinHashEntry, join_relids,
+						   "JoinRelHashTable", 256,
+						   bitmap_hash, bitmap_match);
 
 	/* Insert all the already-existing joinrels */
 	foreach(l, root->join_rel_list)
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index 768e4cff9c5..d36e7ad3030 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -27,6 +27,7 @@
 #include "utils/builtins.h"
 #include "utils/inval.h"
 #include "utils/lsyscache.h"
+#include "utils/memutils.h"
 #include "utils/syscache.h"
 #include "utils/typcache.h"
 
@@ -1028,12 +1029,9 @@ find_oper_cache_entry(OprCacheKey *key)
 	if (OprCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(OprCacheKey);
-		ctl.entrysize = sizeof(OprCacheEntry);
-		OprCacheHash = hash_create("Operator lookup cache", 256,
-								   &ctl, HASH_ELEM | HASH_BLOBS);
+		OprCacheHash = hash_make_cxt(OprCacheEntry, key,
+									 "Operator lookup cache", 256,
+									 TopMemoryContext);
 
 		/* Arrange to flush cache on pg_operator and pg_cast changes */
 		CacheRegisterSyscacheCallback(OPERNAMENSP,
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index c3d275f8726..db26e4a82b6 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -424,17 +424,12 @@ CreatePartitionDirectory(MemoryContext mcxt, bool omit_detached)
 {
 	MemoryContext oldcontext = MemoryContextSwitchTo(mcxt);
 	PartitionDirectory pdir;
-	HASHCTL		ctl;
 
 	pdir = palloc_object(PartitionDirectoryData);
 	pdir->pdir_mcxt = mcxt;
 
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(PartitionDirectoryEntry);
-	ctl.hcxt = mcxt;
-
-	pdir->pdir_hash = hash_create("partition directory", 256, &ctl,
-								  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	pdir->pdir_hash = hash_make_cxt(PartitionDirectoryEntry, reloid,
+									"partition directory", 256, mcxt);
 	pdir->omit_detached = omit_detached;
 
 	MemoryContextSwitchTo(oldcontext);
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 22379de1e31..d4a29bbf87b 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -904,7 +904,6 @@ rebuild_database_list(Oid newdb)
 	MemoryContext newcxt;
 	MemoryContext oldcxt;
 	MemoryContext tmpcxt;
-	HASHCTL		hctl;
 	int			score;
 	int			nelems;
 	HTAB	   *dbhash;
@@ -934,12 +933,10 @@ rebuild_database_list(Oid newdb)
 	 * score, and finally put the array elements into the new doubly linked
 	 * list.
 	 */
-	hctl.keysize = sizeof(Oid);
-	hctl.entrysize = sizeof(avl_dbase);
-	hctl.hcxt = tmpcxt;
-	dbhash = hash_create("autovacuum db hash", 20, &hctl,	/* magic number here
-															 * FIXME */
-						 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	dbhash = hash_make_cxt(avl_dbase, adl_datid,
+						   "autovacuum db hash",
+						   20,	/* magic number here FIXME */
+						   tmpcxt);
 
 	/* start by inserting the new database */
 	score = 0;
@@ -1882,7 +1879,6 @@ do_autovacuum(void)
 	Form_pg_database dbForm;
 	List	   *table_oids = NIL;
 	List	   *orphan_oids = NIL;
-	HASHCTL		ctl;
 	HTAB	   *table_toast_map;
 	ListCell   *volatile cell;
 	BufferAccessStrategy bstrategy;
@@ -1955,13 +1951,9 @@ do_autovacuum(void)
 	pg_class_desc = CreateTupleDescCopy(RelationGetDescr(classRel));
 
 	/* create hash table for toast <-> main relid mapping */
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(av_relation);
-
-	table_toast_map = hash_create("TOAST to main relid map",
-								  100,
-								  &ctl,
-								  HASH_ELEM | HASH_BLOBS);
+	table_toast_map = hash_make_cxt(av_relation, ar_toastrelid,
+									"TOAST to main relid map", 100,
+									TopMemoryContext);
 
 	/*
 	 * Scan pg_class to determine which tables to vacuum.
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 6482c21b8f9..24259ed5e42 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -1309,7 +1309,6 @@ CompactCheckpointerRequestQueue(void)
 	int			num_requests;
 	int			read_idx,
 				write_idx;
-	HASHCTL		ctl;
 	HTAB	   *htab;
 	bool	   *skip_slot;
 
@@ -1329,14 +1328,9 @@ CompactCheckpointerRequestQueue(void)
 	head = CheckpointerShmem->head;
 
 	/* Initialize temporary hash table */
-	ctl.keysize = sizeof(CheckpointerRequest);
-	ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
-	ctl.hcxt = CurrentMemoryContext;
-
-	htab = hash_create("CompactCheckpointerRequestQueue",
-					   CheckpointerShmem->num_requests,
-					   &ctl,
-					   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	htab = hash_make(struct CheckpointerSlotMapping, request,
+					 "CompactCheckpointerRequestQueue",
+					 CheckpointerShmem->num_requests);
 
 	/*
 	 * The basic idea here is that a request can be skipped if it's followed
diff --git a/src/backend/replication/logical/applyparallelworker.c b/src/backend/replication/logical/applyparallelworker.c
index 5ebd2353fed..e7e99a6ecec 100644
--- a/src/backend/replication/logical/applyparallelworker.c
+++ b/src/backend/replication/logical/applyparallelworker.c
@@ -484,16 +484,9 @@ pa_allocate_worker(TransactionId xid)
 	/* First time through, initialize parallel apply worker state hashtable. */
 	if (!ParallelApplyTxnHash)
 	{
-		HASHCTL		ctl;
-
-		MemSet(&ctl, 0, sizeof(ctl));
-		ctl.keysize = sizeof(TransactionId);
-		ctl.entrysize = sizeof(ParallelApplyWorkerEntry);
-		ctl.hcxt = ApplyContext;
-
-		ParallelApplyTxnHash = hash_create("logical replication parallel apply workers hash",
-										   16, &ctl,
-										   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		ParallelApplyTxnHash = hash_make_cxt(ParallelApplyWorkerEntry, xid,
+											 "logical replication parallel apply workers hash",
+											 16, ApplyContext);
 	}
 
 	/* Create an entry for the requested transaction. */
diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c
index 0b1d80b5b0f..fcf295f1df1 100644
--- a/src/backend/replication/logical/relation.c
+++ b/src/backend/replication/logical/relation.c
@@ -105,8 +105,6 @@ logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 static void
 logicalrep_relmap_init(void)
 {
-	HASHCTL		ctl;
-
 	if (!LogicalRepRelMapContext)
 		LogicalRepRelMapContext =
 			AllocSetContextCreate(CacheMemoryContext,
@@ -114,12 +112,9 @@ logicalrep_relmap_init(void)
 								  ALLOCSET_DEFAULT_SIZES);
 
 	/* Initialize the relation hash table. */
-	ctl.keysize = sizeof(LogicalRepRelId);
-	ctl.entrysize = sizeof(LogicalRepRelMapEntry);
-	ctl.hcxt = LogicalRepRelMapContext;
-
-	LogicalRepRelMap = hash_create("logicalrep relation map cache", 128, &ctl,
-								   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	LogicalRepRelMap = hash_make_cxt(LogicalRepRelMapEntry, remoterel.remoteid,
+									 "logicalrep relation map cache", 128,
+									 LogicalRepRelMapContext);
 
 	/* Watch for invalidation events. */
 	CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb,
@@ -611,8 +606,6 @@ logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel)
 static void
 logicalrep_partmap_init(void)
 {
-	HASHCTL		ctl;
-
 	if (!LogicalRepPartMapContext)
 		LogicalRepPartMapContext =
 			AllocSetContextCreate(CacheMemoryContext,
@@ -620,12 +613,9 @@ logicalrep_partmap_init(void)
 								  ALLOCSET_DEFAULT_SIZES);
 
 	/* Initialize the relation hash table. */
-	ctl.keysize = sizeof(Oid);	/* partition OID */
-	ctl.entrysize = sizeof(LogicalRepPartMapEntry);
-	ctl.hcxt = LogicalRepPartMapContext;
-
-	LogicalRepPartMap = hash_create("logicalrep partition map cache", 64, &ctl,
-									HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	LogicalRepPartMap = hash_make_cxt(LogicalRepPartMapEntry, partoid,
+									  "logicalrep partition map cache", 64,
+									  LogicalRepPartMapContext);
 
 	/* Watch for invalidation events. */
 	CacheRegisterRelcacheCallback(logicalrep_partmap_invalidate_cb,
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index a0293f6ec7c..fa93a1bbde1 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -324,7 +324,6 @@ ReorderBuffer *
 ReorderBufferAllocate(void)
 {
 	ReorderBuffer *buffer;
-	HASHCTL		hash_ctl;
 	MemoryContext new_ctx;
 
 	Assert(MyReplicationSlot != NULL);
@@ -337,8 +336,6 @@ ReorderBufferAllocate(void)
 	buffer =
 		(ReorderBuffer *) MemoryContextAlloc(new_ctx, sizeof(ReorderBuffer));
 
-	memset(&hash_ctl, 0, sizeof(hash_ctl));
-
 	buffer->context = new_ctx;
 
 	buffer->change_context = SlabContextCreate(new_ctx,
@@ -367,12 +364,8 @@ ReorderBufferAllocate(void)
 												  SLAB_DEFAULT_BLOCK_SIZE,
 												  SLAB_DEFAULT_BLOCK_SIZE);
 
-	hash_ctl.keysize = sizeof(TransactionId);
-	hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt);
-	hash_ctl.hcxt = buffer->context;
-
-	buffer->by_txn = hash_create("ReorderBufferByXid", 1000, &hash_ctl,
-								 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	buffer->by_txn = hash_make_cxt(ReorderBufferTXNByIdEnt, xid,
+								   "ReorderBufferByXid", 1000, buffer->context);
 
 	buffer->by_txn_last_xid = InvalidTransactionId;
 	buffer->by_txn_last_txn = NULL;
@@ -1836,22 +1829,17 @@ static void
 ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
 {
 	dlist_iter	iter;
-	HASHCTL		hash_ctl;
 
 	if (!rbtxn_has_catalog_changes(txn) || dlist_is_empty(&txn->tuplecids))
 		return;
 
-	hash_ctl.keysize = sizeof(ReorderBufferTupleCidKey);
-	hash_ctl.entrysize = sizeof(ReorderBufferTupleCidEnt);
-	hash_ctl.hcxt = rb->context;
-
 	/*
 	 * create the hash with the exact number of to-be-stored tuplecids from
 	 * the start
 	 */
 	txn->tuplecid_hash =
-		hash_create("ReorderBufferTupleCid", txn->ntuplecids, &hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash_make_cxt(ReorderBufferTupleCidEnt, key,
+					  "ReorderBufferTupleCid", txn->ntuplecids, rb->context);
 
 	dlist_foreach(iter, &txn->tuplecids)
 	{
@@ -4974,15 +4962,10 @@ StartupReorderBuffer(void)
 static void
 ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
 {
-	HASHCTL		hash_ctl;
-
 	Assert(txn->toast_hash == NULL);
 
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(ReorderBufferToastEnt);
-	hash_ctl.hcxt = rb->context;
-	txn->toast_hash = hash_create("ReorderBufferToastHash", 5, &hash_ctl,
-								  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	txn->toast_hash = hash_make_cxt(ReorderBufferToastEnt, chunk_id,
+									"ReorderBufferToastHash", 5, rb->context);
 }
 
 /*
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index 67e57520386..31273ad9f58 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -117,6 +117,7 @@
 #include "utils/array.h"
 #include "utils/builtins.h"
 #include "utils/lsyscache.h"
+#include "utils/memutils.h"
 #include "utils/rls.h"
 #include "utils/snapmgr.h"
 #include "utils/syscache.h"
@@ -390,12 +391,9 @@ ProcessSyncingTablesForApply(XLogRecPtr current_lsn)
 	 */
 	if (table_states_not_ready != NIL && !last_start_times)
 	{
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(struct tablesync_start_time_mapping);
-		last_start_times = hash_create("Logical replication table sync worker start times",
-									   256, &ctl, HASH_ELEM | HASH_BLOBS);
+		last_start_times = hash_make_cxt(struct tablesync_start_time_mapping, relid,
+										 "Logical replication table sync worker start times",
+										 256, TopMemoryContext);
 	}
 
 	/*
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 9ee8949e040..0dcfbfdd609 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -1973,7 +1973,6 @@ pgoutput_stream_prepare_txn(LogicalDecodingContext *ctx,
 static void
 init_rel_sync_cache(MemoryContext cachectx)
 {
-	HASHCTL		ctl;
 	static bool relation_callbacks_registered = false;
 
 	/* Nothing to do if hash table already exists */
@@ -1981,13 +1980,9 @@ init_rel_sync_cache(MemoryContext cachectx)
 		return;
 
 	/* Make a new hash table for the cache */
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(RelationSyncEntry);
-	ctl.hcxt = cachectx;
-
-	RelationSyncCache = hash_create("logical replication output relation cache",
-									128, &ctl,
-									HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
+	RelationSyncCache = hash_make_cxt(RelationSyncEntry, relid,
+									  "logical replication output relation cache",
+									  128, cachectx);
 
 	Assert(RelationSyncCache != NULL);
 
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index 23d85fd32e2..95653944a9d 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -50,19 +50,16 @@ BufTableShmemSize(int size)
 void
 InitBufTable(int size)
 {
-	HASHCTL		info;
+	HASHOPTS	opts = {0};
 
 	/* assume no locking is needed yet */
 
 	/* BufferTag maps to Buffer */
-	info.keysize = sizeof(BufferTag);
-	info.entrysize = sizeof(BufferLookupEnt);
-	info.num_partitions = NUM_BUFFER_PARTITIONS;
-
-	SharedBufHash = ShmemInitHash("Shared Buffer Lookup Table",
-								  size, size,
-								  &info,
-								  HASH_ELEM | HASH_BLOBS | HASH_PARTITION | HASH_FIXED_SIZE);
+	opts.num_partitions = NUM_BUFFER_PARTITIONS;
+	opts.fixed_size = true;
+	SharedBufHash = shmem_hash_make_ext(BufferLookupEnt, key,
+										"Shared Buffer Lookup Table",
+										size, size, &opts);
 }
 
 /*
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 6f935648ae9..2a97ecdbba6 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -63,6 +63,7 @@
 #include "storage/smgr.h"
 #include "storage/standby.h"
 #include "utils/memdebug.h"
+#include "utils/memutils.h"
 #include "utils/ps_status.h"
 #include "utils/rel.h"
 #include "utils/resowner.h"
@@ -4119,8 +4120,6 @@ AtEOXact_Buffers(bool isCommit)
 void
 InitBufferManagerAccess(void)
 {
-	HASHCTL		hash_ctl;
-
 	/*
 	 * An advisory limit on the number of pins each backend should hold, based
 	 * on shared_buffers and the maximum number of connections possible.
@@ -4133,11 +4132,9 @@ InitBufferManagerAccess(void)
 	memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
 	memset(&PrivateRefCountArrayKeys, 0, sizeof(PrivateRefCountArrayKeys));
 
-	hash_ctl.keysize = sizeof(Buffer);
-	hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
-
-	PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
-									  HASH_ELEM | HASH_BLOBS);
+	PrivateRefCountHash = hash_make_cxt(PrivateRefCountEntry, buffer,
+										"PrivateRefCount", 100,
+										TopMemoryContext);
 
 	/*
 	 * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 04a540379a2..5a5f442a782 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -728,7 +728,6 @@ static void
 InitLocalBuffers(void)
 {
 	int			nbufs = num_temp_buffers;
-	HASHCTL		info;
 	int			i;
 
 	/*
@@ -779,13 +778,9 @@ InitLocalBuffers(void)
 	}
 
 	/* Create the lookup hash table */
-	info.keysize = sizeof(BufferTag);
-	info.entrysize = sizeof(LocalBufferLookupEnt);
-
-	LocalBufHash = hash_create("Local Buffer Lookup Table",
-							   nbufs,
-							   &info,
-							   HASH_ELEM | HASH_BLOBS);
+	LocalBufHash = hash_make_cxt(LocalBufferLookupEnt, key,
+								 "Local Buffer Lookup Table", nbufs,
+								 TopMemoryContext);
 
 	if (!LocalBufHash)
 		elog(ERROR, "could not initialize local buffer hash table");
diff --git a/src/backend/storage/file/reinit.c b/src/backend/storage/file/reinit.c
index 25fa2151309..26e810e6b50 100644
--- a/src/backend/storage/file/reinit.c
+++ b/src/backend/storage/file/reinit.c
@@ -175,7 +175,6 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
 	if ((op & UNLOGGED_RELATION_CLEANUP) != 0)
 	{
 		HTAB	   *hash;
-		HASHCTL		ctl;
 
 		/*
 		 * It's possible that someone could create a ton of unlogged relations
@@ -184,11 +183,8 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
 		 * need to be reset.  Otherwise, this cleanup operation would be
 		 * O(n^2).
 		 */
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(unlogged_relation_entry);
-		ctl.hcxt = CurrentMemoryContext;
-		hash = hash_create("unlogged relation OIDs", 32, &ctl,
-						   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash = hash_make(unlogged_relation_entry, relnumber,
+						 "unlogged relation OIDs", 32);
 
 		/* Scan the directory. */
 		dbspace_dir = AllocateDir(dbspacedirname);
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index d2f4710f141..b6a8d1ec776 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -284,8 +284,6 @@ ShmemAddrIsValid(const void *addr)
 void
 InitShmemIndex(void)
 {
-	HASHCTL		info;
-
 	/*
 	 * Create the shared memory shmem index.
 	 *
@@ -294,13 +292,8 @@ InitShmemIndex(void)
 	 * initializing the ShmemIndex itself.  The special "ShmemIndex" hash
 	 * table name will tell ShmemInitStruct to fake it.
 	 */
-	info.keysize = SHMEM_INDEX_KEYSIZE;
-	info.entrysize = sizeof(ShmemIndexEnt);
-
-	ShmemIndex = ShmemInitHash("ShmemIndex",
-							   SHMEM_INDEX_SIZE, SHMEM_INDEX_SIZE,
-							   &info,
-							   HASH_ELEM | HASH_STRINGS);
+	ShmemIndex = shmem_hash_make(ShmemIndexEnt, key, "ShmemIndex",
+								 SHMEM_INDEX_SIZE, SHMEM_INDEX_SIZE);
 }
 
 /*
@@ -369,6 +362,28 @@ ShmemInitHash(const char *name,		/* table string name for shmem index */
 	return hash_create(name, init_size, infoP, hash_flags);
 }
 
+/*
+ * Implementation function for shmem_hash_make macros.
+ *
+ * Creates a shared memory hash table with simplified parameters.
+ * Pass NULL for opts to use all defaults.
+ */
+HTAB *
+shmem_hash_make_impl(const char *name, int64 init_size, int64 max_size,
+					 Size keysize, Size entrysize, bool string_key,
+					 const HASHOPTS *opts)
+{
+	HASHCTL		ctl;
+	int			flags;
+
+	/* Shared memory hash tables use ShmemAllocNoError, not a custom allocator */
+	Assert(opts == NULL || opts->alloc == NULL);
+
+	hash_opts_init(&ctl, &flags, keysize, entrysize, string_key, opts);
+
+	return ShmemInitHash(name, init_size, max_size, &ctl, flags);
+}
+
 /*
  * ShmemInitStruct -- Create/attach to a structure in shared memory.
  *
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index afffab77106..7a96e3e2039 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -32,6 +32,7 @@
 #include "storage/standby.h"
 #include "utils/hsearch.h"
 #include "utils/injection_point.h"
+#include "utils/memutils.h"
 #include "utils/ps_status.h"
 #include "utils/timeout.h"
 #include "utils/timestamp.h"
@@ -95,7 +96,6 @@ void
 InitRecoveryTransactionEnvironment(void)
 {
 	VirtualTransactionId vxid;
-	HASHCTL		hash_ctl;
 
 	Assert(RecoveryLockHash == NULL);	/* don't run this twice */
 
@@ -103,18 +103,12 @@ InitRecoveryTransactionEnvironment(void)
 	 * Initialize the hash tables for tracking the locks held by each
 	 * transaction.
 	 */
-	hash_ctl.keysize = sizeof(xl_standby_lock);
-	hash_ctl.entrysize = sizeof(RecoveryLockEntry);
-	RecoveryLockHash = hash_create("RecoveryLockHash",
-								   64,
-								   &hash_ctl,
-								   HASH_ELEM | HASH_BLOBS);
-	hash_ctl.keysize = sizeof(TransactionId);
-	hash_ctl.entrysize = sizeof(RecoveryLockXidEntry);
-	RecoveryLockXidHash = hash_create("RecoveryLockXidHash",
-									  64,
-									  &hash_ctl,
-									  HASH_ELEM | HASH_BLOBS);
+	RecoveryLockHash = hash_make_cxt(RecoveryLockEntry, key,
+									 "RecoveryLockHash", 64,
+									 TopMemoryContext);
+	RecoveryLockXidHash = hash_make_cxt(RecoveryLockXidEntry, xid,
+										"RecoveryLockXidHash", 64,
+										TopMemoryContext);
 
 	/*
 	 * Initialize shared invalidation management for Startup process, being
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 7f0cd784f79..3f20c85881a 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -443,7 +443,7 @@ static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
 void
 LockManagerShmemInit(void)
 {
-	HASHCTL		info;
+	HASHOPTS	opts;
 	int64		init_table_size,
 				max_table_size;
 	bool		found;
@@ -459,15 +459,11 @@ LockManagerShmemInit(void)
 	 * Allocate hash table for LOCK structs.  This stores per-locked-object
 	 * information.
 	 */
-	info.keysize = sizeof(LOCKTAG);
-	info.entrysize = sizeof(LOCK);
-	info.num_partitions = NUM_LOCK_PARTITIONS;
-
-	LockMethodLockHash = ShmemInitHash("LOCK hash",
-									   init_table_size,
-									   max_table_size,
-									   &info,
-									   HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.num_partitions = NUM_LOCK_PARTITIONS;
+	LockMethodLockHash = shmem_hash_make_ext(LOCK, tag, "LOCK hash",
+											 init_table_size, max_table_size,
+											 &opts);
 
 	/* Assume an average of 2 holders per lock */
 	max_table_size *= 2;
@@ -477,16 +473,12 @@ LockManagerShmemInit(void)
 	 * Allocate hash table for PROCLOCK structs.  This stores
 	 * per-lock-per-holder information.
 	 */
-	info.keysize = sizeof(PROCLOCKTAG);
-	info.entrysize = sizeof(PROCLOCK);
-	info.hash = proclock_hash;
-	info.num_partitions = NUM_LOCK_PARTITIONS;
-
-	LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
-										   init_table_size,
-										   max_table_size,
-										   &info,
-										   HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.hash = proclock_hash;
+	opts.num_partitions = NUM_LOCK_PARTITIONS;
+	LockMethodProcLockHash = shmem_hash_make_ext(PROCLOCK, tag, "PROCLOCK hash",
+												 init_table_size, max_table_size,
+												 &opts);
 
 	/*
 	 * Allocate fast-path structures.
@@ -508,15 +500,9 @@ InitLockManagerAccess(void)
 	 * Allocate non-shared hash table for LOCALLOCK structs.  This stores lock
 	 * counts and resource owner information.
 	 */
-	HASHCTL		info;
-
-	info.keysize = sizeof(LOCALLOCKTAG);
-	info.entrysize = sizeof(LOCALLOCK);
-
-	LockMethodLocalHash = hash_create("LOCALLOCK hash",
-									  16,
-									  &info,
-									  HASH_ELEM | HASH_BLOBS);
+	LockMethodLocalHash = hash_make_cxt(LOCALLOCK, tag,
+										"LOCALLOCK hash", 16,
+										TopMemoryContext);
 }
 
 
@@ -3394,20 +3380,13 @@ CheckForSessionAndXactLocks(void)
 		bool		xactLock;	/* is any lockmode held at xact level? */
 	} PerLockTagEntry;
 
-	HASHCTL		hash_ctl;
 	HTAB	   *lockhtab;
 	HASH_SEQ_STATUS status;
 	LOCALLOCK  *locallock;
 
 	/* Create a local hash table keyed by LOCKTAG only */
-	hash_ctl.keysize = sizeof(LOCKTAG);
-	hash_ctl.entrysize = sizeof(PerLockTagEntry);
-	hash_ctl.hcxt = CurrentMemoryContext;
-
-	lockhtab = hash_create("CheckForSessionAndXactLocks table",
-						   256, /* arbitrary initial size */
-						   &hash_ctl,
-						   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	lockhtab = hash_make(PerLockTagEntry, lock,
+						 "CheckForSessionAndXactLocks table", 256);
 
 	/* Scan local lock table to find entries for each LOCKTAG */
 	hash_seq_init(&status, LockMethodLocalHash);
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 517c55375b4..83c5b77f952 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -285,7 +285,6 @@ static lwlock_stats * get_lwlock_stats_entry(LWLock *lock);
 static void
 init_lwlock_stats(void)
 {
-	HASHCTL		ctl;
 	static MemoryContext lwlock_stats_cxt = NULL;
 	static bool exit_registered = false;
 
@@ -305,11 +304,8 @@ init_lwlock_stats(void)
 											 ALLOCSET_DEFAULT_SIZES);
 	MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true);
 
-	ctl.keysize = sizeof(lwlock_stats_key);
-	ctl.entrysize = sizeof(lwlock_stats);
-	ctl.hcxt = lwlock_stats_cxt;
-	lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
-									HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	lwlock_stats_htab = hash_make_cxt(lwlock_stats, key,
+									  "lwlock stats", 16384, lwlock_stats_cxt);
 	if (!exit_registered)
 	{
 		on_shmem_exit(print_lwlock_stats, 0);
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index fe75ead3501..9f3779b9aea 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -212,6 +212,7 @@
 #include "storage/proc.h"
 #include "storage/procarray.h"
 #include "utils/guc_hooks.h"
+#include "utils/memutils.h"
 #include "utils/rel.h"
 #include "utils/snapmgr.h"
 
@@ -1144,7 +1145,7 @@ CheckPointPredicate(void)
 void
 PredicateLockShmemInit(void)
 {
-	HASHCTL		info;
+	HASHOPTS	opts;
 	int64		max_table_size;
 	Size		requestSize;
 	bool		found;
@@ -1163,16 +1164,13 @@ PredicateLockShmemInit(void)
 	 * Allocate hash table for PREDICATELOCKTARGET structs.  This stores
 	 * per-predicate-lock-target information.
 	 */
-	info.keysize = sizeof(PREDICATELOCKTARGETTAG);
-	info.entrysize = sizeof(PREDICATELOCKTARGET);
-	info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
-
-	PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
-											max_table_size,
-											max_table_size,
-											&info,
-											HASH_ELEM | HASH_BLOBS |
-											HASH_PARTITION | HASH_FIXED_SIZE);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
+	opts.fixed_size = true;
+	PredicateLockTargetHash = shmem_hash_make_ext(PREDICATELOCKTARGET, tag,
+												  "PREDICATELOCKTARGET hash",
+												  max_table_size, max_table_size,
+												  &opts);
 
 	/*
 	 * Reserve a dummy entry in the hash table; we use it to make sure there's
@@ -1195,20 +1193,17 @@ PredicateLockShmemInit(void)
 	 * Allocate hash table for PREDICATELOCK structs.  This stores per
 	 * xact-lock-of-a-target information.
 	 */
-	info.keysize = sizeof(PREDICATELOCKTAG);
-	info.entrysize = sizeof(PREDICATELOCK);
-	info.hash = predicatelock_hash;
-	info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
-
 	/* Assume an average of 2 xacts per target */
 	max_table_size *= 2;
 
-	PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
-									  max_table_size,
-									  max_table_size,
-									  &info,
-									  HASH_ELEM | HASH_FUNCTION |
-									  HASH_PARTITION | HASH_FIXED_SIZE);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.hash = predicatelock_hash;
+	opts.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
+	opts.fixed_size = true;
+	PredicateLockHash = shmem_hash_make_ext(PREDICATELOCK, tag,
+											"PREDICATELOCK hash",
+											max_table_size, max_table_size,
+											&opts);
 
 	/*
 	 * Compute size for serializable transaction hashtable. Note these
@@ -1282,15 +1277,12 @@ PredicateLockShmemInit(void)
 	 * Allocate hash table for SERIALIZABLEXID structs.  This stores per-xid
 	 * information for serializable transactions which have accessed data.
 	 */
-	info.keysize = sizeof(SERIALIZABLEXIDTAG);
-	info.entrysize = sizeof(SERIALIZABLEXID);
-
-	SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
-										max_table_size,
-										max_table_size,
-										&info,
-										HASH_ELEM | HASH_BLOBS |
-										HASH_FIXED_SIZE);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.fixed_size = true;
+	SerializableXidHash = shmem_hash_make_ext(SERIALIZABLEXID, tag,
+											  "SERIALIZABLEXID hash",
+											  max_table_size, max_table_size,
+											  &opts);
 
 	/*
 	 * Allocate space for tracking rw-conflicts in lists attached to the
@@ -1937,16 +1929,12 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
 static void
 CreateLocalPredicateLockHash(void)
 {
-	HASHCTL		hash_ctl;
-
 	/* Initialize the backend-local hash table of parent locks */
 	Assert(LocalPredicateLockHash == NULL);
-	hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
-	hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
-	LocalPredicateLockHash = hash_create("Local predicate lock",
-										 max_predicate_locks_per_xact,
-										 &hash_ctl,
-										 HASH_ELEM | HASH_BLOBS);
+	LocalPredicateLockHash = hash_make_cxt(LOCALPREDICATELOCK, tag,
+										   "Local predicate lock",
+										   max_predicate_locks_per_xact,
+										   TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 378c2a03f39..e75bc9a2a08 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -73,6 +73,7 @@
 #include "storage/smgr.h"
 #include "utils/hsearch.h"
 #include "utils/inval.h"
+#include "utils/memutils.h"
 
 
 /*
@@ -250,12 +251,9 @@ smgropen(RelFileLocator rlocator, ProcNumber backend)
 	if (SMgrRelationHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(RelFileLocatorBackend);
-		ctl.entrysize = sizeof(SMgrRelationData);
-		SMgrRelationHash = hash_create("smgr relation table", 400,
-									   &ctl, HASH_ELEM | HASH_BLOBS);
+		SMgrRelationHash = hash_make_cxt(SMgrRelationData, smgr_rlocator,
+										 "smgr relation table", 400,
+										 TopMemoryContext);
 		dlist_init(&unpinned_relns);
 	}
 
diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c
index b1accc68b95..01acb30f1bb 100644
--- a/src/backend/storage/sync/sync.c
+++ b/src/backend/storage/sync/sync.c
@@ -130,8 +130,6 @@ InitSync(void)
 	 */
 	if (!IsUnderPostmaster || AmCheckpointerProcess())
 	{
-		HASHCTL		hash_ctl;
-
 		/*
 		 * XXX: The checkpointer needs to add entries to the pending ops table
 		 * when absorbing fsync requests.  That is done within a critical
@@ -146,13 +144,8 @@ InitSync(void)
 											  ALLOCSET_DEFAULT_SIZES);
 		MemoryContextAllowInCriticalSection(pendingOpsCxt, true);
 
-		hash_ctl.keysize = sizeof(FileTag);
-		hash_ctl.entrysize = sizeof(PendingFsyncEntry);
-		hash_ctl.hcxt = pendingOpsCxt;
-		pendingOps = hash_create("Pending Ops Table",
-								 100L,
-								 &hash_ctl,
-								 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		pendingOps = hash_make_cxt(PendingFsyncEntry, tag,
+								   "Pending Ops Table", 100L, pendingOpsCxt);
 		pendingUnlinks = NIL;
 	}
 }
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index 0c513d694e7..c2f3b63265d 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -149,7 +149,6 @@ compute_tsvector_stats(VacAttrStats *stats,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *lexemes_tab;
-	HASHCTL		hash_ctl;
 	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
@@ -180,15 +179,9 @@ compute_tsvector_stats(VacAttrStats *stats,
 	 * worry about overflowing the initial size. Also we don't need to pay any
 	 * attention to locking and memory management.
 	 */
-	hash_ctl.keysize = sizeof(LexemeHashKey);
-	hash_ctl.entrysize = sizeof(TrackItem);
-	hash_ctl.hash = lexeme_hash;
-	hash_ctl.match = lexeme_match;
-	hash_ctl.hcxt = CurrentMemoryContext;
-	lexemes_tab = hash_create("Analyzed lexemes table",
-							  num_mcelem,
-							  &hash_ctl,
-							  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+	lexemes_tab = hash_make_fn(TrackItem, key,
+							   "Analyzed lexemes table", num_mcelem,
+							   lexeme_hash, lexeme_match);
 
 	/* Initialize counters. */
 	b_current = 1;
diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c
index e4f2c440257..e5ac956daf0 100644
--- a/src/backend/utils/activity/wait_event.c
+++ b/src/backend/utils/activity/wait_event.c
@@ -119,7 +119,6 @@ void
 WaitEventCustomShmemInit(void)
 {
 	bool		found;
-	HASHCTL		info;
 
 	WaitEventCustomCounter = (WaitEventCustomCounterData *)
 		ShmemInitStruct("WaitEventCustomCounterData",
@@ -133,24 +132,18 @@ WaitEventCustomShmemInit(void)
 	}
 
 	/* initialize or attach the hash tables to store custom wait events */
-	info.keysize = sizeof(uint32);
-	info.entrysize = sizeof(WaitEventCustomEntryByInfo);
 	WaitEventCustomHashByInfo =
-		ShmemInitHash("WaitEventCustom hash by wait event information",
-					  WAIT_EVENT_CUSTOM_HASH_INIT_SIZE,
-					  WAIT_EVENT_CUSTOM_HASH_MAX_SIZE,
-					  &info,
-					  HASH_ELEM | HASH_BLOBS);
+		shmem_hash_make(WaitEventCustomEntryByInfo, wait_event_info,
+						"WaitEventCustom hash by wait event information",
+						WAIT_EVENT_CUSTOM_HASH_INIT_SIZE,
+						WAIT_EVENT_CUSTOM_HASH_MAX_SIZE);
 
 	/* key is a NULL-terminated string */
-	info.keysize = sizeof(char[NAMEDATALEN]);
-	info.entrysize = sizeof(WaitEventCustomEntryByName);
 	WaitEventCustomHashByName =
-		ShmemInitHash("WaitEventCustom hash by name",
-					  WAIT_EVENT_CUSTOM_HASH_INIT_SIZE,
-					  WAIT_EVENT_CUSTOM_HASH_MAX_SIZE,
-					  &info,
-					  HASH_ELEM | HASH_STRINGS);
+		shmem_hash_make(WaitEventCustomEntryByName, wait_event_name,
+						"WaitEventCustom hash by name",
+						WAIT_EVENT_CUSTOM_HASH_INIT_SIZE,
+						WAIT_EVENT_CUSTOM_HASH_MAX_SIZE);
 }
 
 /*
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index 7bb000ddbd3..bdc7e2237f6 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -223,7 +223,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *elements_tab;
-	HASHCTL		elem_hash_ctl;
 	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
@@ -236,7 +235,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 	TrackItem  *item;
 	int			slot_idx;
 	HTAB	   *count_tab;
-	HASHCTL		count_hash_ctl;
 	DECountItem *count_item;
 
 	extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;
@@ -276,24 +274,13 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 	 * worry about overflowing the initial size. Also we don't need to pay any
 	 * attention to locking and memory management.
 	 */
-	elem_hash_ctl.keysize = sizeof(Datum);
-	elem_hash_ctl.entrysize = sizeof(TrackItem);
-	elem_hash_ctl.hash = element_hash;
-	elem_hash_ctl.match = element_match;
-	elem_hash_ctl.hcxt = CurrentMemoryContext;
-	elements_tab = hash_create("Analyzed elements table",
-							   num_mcelem,
-							   &elem_hash_ctl,
-							   HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+	elements_tab = hash_make_fn(TrackItem, key,
+								"Analyzed elements table", num_mcelem,
+								element_hash, element_match);
 
 	/* hashtable for array distinct elements counts */
-	count_hash_ctl.keysize = sizeof(int);
-	count_hash_ctl.entrysize = sizeof(DECountItem);
-	count_hash_ctl.hcxt = CurrentMemoryContext;
-	count_tab = hash_create("Array distinct element count table",
-							64,
-							&count_hash_ctl,
-							HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	count_tab = hash_make(DECountItem, count,
+						  "Array distinct element count table", 64);
 
 	/* Initialize counters. */
 	b_current = 1;
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 78e84727fdc..08b02cc0f51 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -933,19 +933,8 @@ json_unique_hash_match(const void *key1, const void *key2, Size keysize)
 static void
 json_unique_check_init(JsonUniqueCheckState *cxt)
 {
-	HASHCTL		ctl;
-
-	memset(&ctl, 0, sizeof(ctl));
-	ctl.keysize = sizeof(JsonUniqueHashEntry);
-	ctl.entrysize = sizeof(JsonUniqueHashEntry);
-	ctl.hcxt = CurrentMemoryContext;
-	ctl.hash = json_unique_hash;
-	ctl.match = json_unique_hash_match;
-
-	*cxt = hash_create("json object hashtable",
-					   32,
-					   &ctl,
-					   HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION | HASH_COMPARE);
+	*cxt = hashset_make_fn(JsonUniqueHashEntry, "json object hashtable", 32,
+						   json_unique_hash, json_unique_hash_match);
 }
 
 static void
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 1e5b60801e4..4156453f8c3 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -3808,18 +3808,12 @@ static HTAB *
 get_json_object_as_hash(const char *json, int len, const char *funcname,
 						Node *escontext)
 {
-	HASHCTL		ctl;
 	HTAB	   *tab;
 	JHashState *state;
 	JsonSemAction *sem;
 
-	ctl.keysize = NAMEDATALEN;
-	ctl.entrysize = sizeof(JsonHashEntry);
-	ctl.hcxt = CurrentMemoryContext;
-	tab = hash_create("json object hashtable",
-					  100,
-					  &ctl,
-					  HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	tab = hash_make(JsonHashEntry, fname,
+					"json object hashtable", 100);
 
 	state = palloc0_object(JHashState);
 	sem = palloc0_object(JsonSemAction);
@@ -4213,7 +4207,6 @@ populate_recordset_object_start(void *state)
 {
 	PopulateRecordsetState *_state = (PopulateRecordsetState *) state;
 	int			lex_level = _state->lex->lex_level;
-	HASHCTL		ctl;
 
 	/* Reject object at top level: we must have an array at level 0 */
 	if (lex_level == 0)
@@ -4227,13 +4220,8 @@ populate_recordset_object_start(void *state)
 		return JSON_SUCCESS;
 
 	/* Object at level 1: set up a new hash table for this object */
-	ctl.keysize = NAMEDATALEN;
-	ctl.entrysize = sizeof(JsonHashEntry);
-	ctl.hcxt = CurrentMemoryContext;
-	_state->json_hash = hash_create("json object hashtable",
-									100,
-									&ctl,
-									HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	_state->json_hash = hash_make(JsonHashEntry, fname,
+								  "json object hashtable", 100);
 
 	return JSON_SUCCESS;
 }
diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c
index 12b8d4cefaf..694252cfed5 100644
--- a/src/backend/utils/adt/mcxtfuncs.c
+++ b/src/backend/utils/adt/mcxtfuncs.c
@@ -185,17 +185,10 @@ pg_get_backend_memory_contexts(PG_FUNCTION_ARGS)
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
 	int			context_id;
 	List	   *contexts;
-	HASHCTL		ctl;
 	HTAB	   *context_id_lookup;
 
-	ctl.keysize = sizeof(MemoryContext);
-	ctl.entrysize = sizeof(MemoryContextId);
-	ctl.hcxt = CurrentMemoryContext;
-
-	context_id_lookup = hash_create("pg_get_backend_memory_contexts",
-									256,
-									&ctl,
-									HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	context_id_lookup = hash_make(MemoryContextId, context,
+								  "pg_get_backend_memory_contexts", 256);
 
 	InitMaterializedSRF(fcinfo, 0);
 
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index bbadecef5f9..5334c9dd753 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -2859,30 +2859,25 @@ ri_NullCheck(TupleDesc tupDesc,
 static void
 ri_InitHashTables(void)
 {
-	HASHCTL		ctl;
-
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(RI_ConstraintInfo);
-	ri_constraint_cache = hash_create("RI constraint cache",
-									  RI_INIT_CONSTRAINTHASHSIZE,
-									  &ctl, HASH_ELEM | HASH_BLOBS);
+	ri_constraint_cache = hash_make_cxt(RI_ConstraintInfo, constraint_id,
+										"RI constraint cache",
+										RI_INIT_CONSTRAINTHASHSIZE,
+										TopMemoryContext);
 
 	/* Arrange to flush cache on pg_constraint changes */
 	CacheRegisterSyscacheCallback(CONSTROID,
 								  InvalidateConstraintCacheCallBack,
 								  (Datum) 0);
 
-	ctl.keysize = sizeof(RI_QueryKey);
-	ctl.entrysize = sizeof(RI_QueryHashEntry);
-	ri_query_cache = hash_create("RI query cache",
-								 RI_INIT_QUERYHASHSIZE,
-								 &ctl, HASH_ELEM | HASH_BLOBS);
-
-	ctl.keysize = sizeof(RI_CompareKey);
-	ctl.entrysize = sizeof(RI_CompareHashEntry);
-	ri_compare_cache = hash_create("RI compare cache",
+	ri_query_cache = hash_make_cxt(RI_QueryHashEntry, key,
+								   "RI query cache",
 								   RI_INIT_QUERYHASHSIZE,
-								   &ctl, HASH_ELEM | HASH_BLOBS);
+								   TopMemoryContext);
+
+	ri_compare_cache = hash_make_cxt(RI_CompareHashEntry, key,
+									 "RI compare cache",
+									 RI_INIT_QUERYHASHSIZE,
+									 TopMemoryContext);
 }
 
 
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index b5a7ad9066e..5468e07d0cf 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -3887,7 +3887,6 @@ static void
 set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
 				 Bitmapset *rels_used)
 {
-	HASHCTL		hash_ctl;
 	HTAB	   *names_hash;
 	NameHashEntry *hentry;
 	bool		found;
@@ -3903,13 +3902,9 @@ set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
 	 * We use a hash table to hold known names, so that this process is O(N)
 	 * not O(N^2) for N names.
 	 */
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = sizeof(NameHashEntry);
-	hash_ctl.hcxt = CurrentMemoryContext;
-	names_hash = hash_create("set_rtable_names names",
-							 list_length(dpns->rtable),
-							 &hash_ctl,
-							 HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	names_hash = hash_make(NameHashEntry, name,
+						   "set_rtable_names names",
+						   list_length(dpns->rtable));
 
 	/* Preload the hash table with names appearing in parent_namespaces */
 	foreach(lc, parent_namespaces)
@@ -4980,7 +4975,6 @@ expand_colnames_array_to(deparse_columns *colinfo, int n)
 static void
 build_colinfo_names_hash(deparse_columns *colinfo)
 {
-	HASHCTL		hash_ctl;
 	int			i;
 	ListCell   *lc;
 
@@ -4996,13 +4990,10 @@ build_colinfo_names_hash(deparse_columns *colinfo)
 	 * Set up the hash table.  The entries are just strings with no other
 	 * payload.
 	 */
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = NAMEDATALEN;
-	hash_ctl.hcxt = CurrentMemoryContext;
-	colinfo->names_hash = hash_create("deparse_columns names",
-									  colinfo->num_cols + colinfo->num_new_cols,
-									  &hash_ctl,
-									  HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	colinfo->names_hash =
+		hashset_make_cxt(NameData, "deparse_columns names",
+						 colinfo->num_cols + colinfo->num_new_cols,
+						 CurrentMemoryContext);
 
 	/*
 	 * Preload the hash table with any names already present (these would have
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 72edc8f665b..771bf77fa75 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -21,6 +21,7 @@
 #include "utils/catcache.h"
 #include "utils/hsearch.h"
 #include "utils/inval.h"
+#include "utils/memutils.h"
 #include "utils/syscache.h"
 #include "varatt.h"
 
@@ -96,22 +97,15 @@ relatt_cache_syshash(const void *key, Size keysize)
 static void
 InitializeAttoptCache(void)
 {
-	HASHCTL		ctl;
-
-	/* Initialize the hash table. */
-	ctl.keysize = sizeof(AttoptCacheKey);
-	ctl.entrysize = sizeof(AttoptCacheEntry);
-
 	/*
 	 * AttoptCacheEntry takes hash value from the system cache. For
 	 * AttoptCacheHash we use the same hash in order to speedup search by hash
 	 * value. This is used by hash_seq_init_with_hash_value().
 	 */
-	ctl.hash = relatt_cache_syshash;
-
-	AttoptCacheHash =
-		hash_create("Attopt cache", 256, &ctl,
-					HASH_ELEM | HASH_FUNCTION);
+	AttoptCacheHash = hash_make_fn_cxt(AttoptCacheEntry, key,
+									   "Attopt cache", 256,
+									   relatt_cache_syshash, NULL,
+									   TopMemoryContext);
 
 	/* Make sure we've initialized CacheMemoryContext. */
 	if (!CacheMemoryContext)
diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c
index 2b4453e54a7..652f6f25be3 100644
--- a/src/backend/utils/cache/evtcache.c
+++ b/src/backend/utils/cache/evtcache.c
@@ -76,7 +76,6 @@ EventCacheLookup(EventTriggerEvent event)
 static void
 BuildEventTriggerCache(void)
 {
-	HASHCTL		ctl;
 	HTAB	   *cache;
 	Relation	rel;
 	Relation	irel;
@@ -113,11 +112,9 @@ BuildEventTriggerCache(void)
 	EventTriggerCacheState = ETCS_REBUILD_STARTED;
 
 	/* Create new hash table. */
-	ctl.keysize = sizeof(EventTriggerEvent);
-	ctl.entrysize = sizeof(EventTriggerCacheEntry);
-	ctl.hcxt = EventTriggerCacheContext;
-	cache = hash_create("EventTriggerCacheHash", 32, &ctl,
-						HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	cache = hash_make_cxt(EventTriggerCacheEntry, event,
+						  "EventTriggerCacheHash", 32,
+						  EventTriggerCacheContext);
 
 	/*
 	 * Prepare to scan pg_event_trigger in name order.
diff --git a/src/backend/utils/cache/funccache.c b/src/backend/utils/cache/funccache.c
index 701c294b88d..bef938c37c0 100644
--- a/src/backend/utils/cache/funccache.c
+++ b/src/backend/utils/cache/funccache.c
@@ -58,19 +58,13 @@ static int	cfunc_match(const void *key1, const void *key2, Size keysize);
 static void
 cfunc_hashtable_init(void)
 {
-	HASHCTL		ctl;
-
 	/* don't allow double-initialization */
 	Assert(cfunc_hashtable == NULL);
 
-	ctl.keysize = sizeof(CachedFunctionHashKey);
-	ctl.entrysize = sizeof(CachedFunctionHashEntry);
-	ctl.hash = cfunc_hash;
-	ctl.match = cfunc_match;
-	cfunc_hashtable = hash_create("Cached function hash",
-								  FUNCS_PER_USER,
-								  &ctl,
-								  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
+	cfunc_hashtable = hash_make_fn_cxt(CachedFunctionHashEntry, key,
+									   "Cached function hash", FUNCS_PER_USER,
+									   cfunc_hash, cfunc_match,
+									   TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 6b634c9fff1..c15b127bdbf 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1672,17 +1672,14 @@ LookupOpclassInfo(Oid operatorClassOid,
 
 	if (OpClassCache == NULL)
 	{
-		/* First time through: initialize the opclass cache */
-		HASHCTL		ctl;
-
 		/* Also make sure CacheMemoryContext exists */
 		if (!CacheMemoryContext)
 			CreateCacheMemoryContext();
 
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(OpClassCacheEnt);
-		OpClassCache = hash_create("Operator class cache", 64,
-								   &ctl, HASH_ELEM | HASH_BLOBS);
+		/* First time through: initialize the opclass cache */
+		OpClassCache = hash_make_cxt(OpClassCacheEnt, opclassoid,
+									 "Operator class cache", 64,
+									 TopMemoryContext);
 	}
 
 	opcentry = (OpClassCacheEnt *) hash_search(OpClassCache,
@@ -3996,7 +3993,6 @@ RelationAssumeNewRelfilelocator(Relation relation)
 void
 RelationCacheInitialize(void)
 {
-	HASHCTL		ctl;
 	int			allocsize;
 
 	/*
@@ -4008,10 +4004,9 @@ RelationCacheInitialize(void)
 	/*
 	 * create hashtable that indexes the relcache
 	 */
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(RelIdCacheEnt);
-	RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE,
-								  &ctl, HASH_ELEM | HASH_BLOBS);
+	RelationIdCache = hash_make_cxt(RelIdCacheEnt, reloid,
+									"Relcache by OID", INITRELCACHESIZE,
+									TopMemoryContext);
 
 	/*
 	 * reserve enough in_progress_list slots for many cases
diff --git a/src/backend/utils/cache/relfilenumbermap.c b/src/backend/utils/cache/relfilenumbermap.c
index 6f970fafa05..09b05dec9d0 100644
--- a/src/backend/utils/cache/relfilenumbermap.c
+++ b/src/backend/utils/cache/relfilenumbermap.c
@@ -85,7 +85,6 @@ RelfilenumberMapInvalidateCallback(Datum arg, Oid relid)
 static void
 InitializeRelfilenumberMap(void)
 {
-	HASHCTL		ctl;
 	int			i;
 
 	/* Make sure we've initialized CacheMemoryContext. */
@@ -113,13 +112,9 @@ InitializeRelfilenumberMap(void)
 	 * initialized when fmgr_info_cxt() above ERRORs out with an out of memory
 	 * error.
 	 */
-	ctl.keysize = sizeof(RelfilenumberMapKey);
-	ctl.entrysize = sizeof(RelfilenumberMapEntry);
-	ctl.hcxt = CacheMemoryContext;
-
 	RelfilenumberMapHash =
-		hash_create("RelfilenumberMap cache", 64, &ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash_make_cxt(RelfilenumberMapEntry, key,
+					  "RelfilenumberMap cache", 64, CacheMemoryContext);
 
 	/* Watch for invalidation events. */
 	CacheRegisterRelcacheCallback(RelfilenumberMapInvalidateCallback,
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index 8f1a5e69595..bf007d69ad4 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -27,6 +27,7 @@
 #include "utils/catcache.h"
 #include "utils/hsearch.h"
 #include "utils/inval.h"
+#include "utils/memutils.h"
 #include "utils/spccache.h"
 #include "utils/syscache.h"
 #include "varatt.h"
@@ -77,14 +78,10 @@ InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 static void
 InitializeTableSpaceCache(void)
 {
-	HASHCTL		ctl;
-
 	/* Initialize the hash table. */
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(TableSpaceCacheEntry);
 	TableSpaceCacheHash =
-		hash_create("TableSpace cache", 16, &ctl,
-					HASH_ELEM | HASH_BLOBS);
+		hash_make_cxt(TableSpaceCacheEntry, oid,
+					  "TableSpace cache", 16, TopMemoryContext);
 
 	/* Make sure we've initialized CacheMemoryContext. */
 	if (!CacheMemoryContext)
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 71e49b2b919..e9ae4a5f093 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -117,12 +117,9 @@ lookup_ts_parser_cache(Oid prsId)
 	if (TSParserCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(TSParserCacheEntry);
-		TSParserCacheHash = hash_create("Tsearch parser cache", 4,
-										&ctl, HASH_ELEM | HASH_BLOBS);
+		TSParserCacheHash = hash_make_cxt(TSParserCacheEntry, prsId,
+										  "Tsearch parser cache", 4,
+										  TopMemoryContext);
 		/* Flush cache on pg_ts_parser changes */
 		CacheRegisterSyscacheCallback(TSPARSEROID, InvalidateTSCacheCallBack,
 									  PointerGetDatum(TSParserCacheHash));
@@ -212,12 +209,9 @@ lookup_ts_dictionary_cache(Oid dictId)
 	if (TSDictionaryCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(TSDictionaryCacheEntry);
-		TSDictionaryCacheHash = hash_create("Tsearch dictionary cache", 8,
-											&ctl, HASH_ELEM | HASH_BLOBS);
+		TSDictionaryCacheHash = hash_make_cxt(TSDictionaryCacheEntry, dictId,
+											  "Tsearch dictionary cache", 8,
+											  TopMemoryContext);
 		/* Flush cache on pg_ts_dict and pg_ts_template changes */
 		CacheRegisterSyscacheCallback(TSDICTOID, InvalidateTSCacheCallBack,
 									  PointerGetDatum(TSDictionaryCacheHash));
@@ -363,12 +357,9 @@ lookup_ts_dictionary_cache(Oid dictId)
 static void
 init_ts_config_cache(void)
 {
-	HASHCTL		ctl;
-
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(TSConfigCacheEntry);
-	TSConfigCacheHash = hash_create("Tsearch configuration cache", 16,
-									&ctl, HASH_ELEM | HASH_BLOBS);
+	TSConfigCacheHash = hash_make_cxt(TSConfigCacheEntry, cfgId,
+									  "Tsearch configuration cache", 16,
+									  TopMemoryContext);
 	/* Flush cache on pg_ts_config and pg_ts_config_map changes */
 	CacheRegisterSyscacheCallback(TSCONFIGOID, InvalidateTSCacheCallBack,
 								  PointerGetDatum(TSConfigCacheHash));
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index dc4b1a56414..74da418a77c 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -392,28 +392,23 @@ lookup_type_cache(Oid type_id, int flags)
 	if (TypeCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
 		int			allocsize;
 
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(TypeCacheEntry);
-
 		/*
 		 * TypeCacheEntry takes hash value from the system cache. For
 		 * TypeCacheHash we use the same hash in order to speedup search by
 		 * hash value. This is used by hash_seq_init_with_hash_value().
 		 */
-		ctl.hash = type_cache_syshash;
-
-		TypeCacheHash = hash_create("Type information cache", 64,
-									&ctl, HASH_ELEM | HASH_FUNCTION);
+		TypeCacheHash = hash_make_fn_cxt(TypeCacheEntry, type_id,
+										 "Type information cache", 64,
+										 type_cache_syshash, NULL,
+										 TopMemoryContext);
 
 		Assert(RelIdToTypeIdCacheHash == NULL);
 
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
-		RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
-											 &ctl, HASH_ELEM | HASH_BLOBS);
+		RelIdToTypeIdCacheHash = hash_make_cxt(RelIdToTypeIdCacheEntry, relid,
+											   "Map from relid to OID of cached composite type",
+											   64, TopMemoryContext);
 
 		/* Also set up callbacks for SI invalidations */
 		CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
@@ -2050,15 +2045,11 @@ assign_record_type_typmod(TupleDesc tupDesc)
 	if (RecordCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(TupleDesc);	/* just the pointer */
-		ctl.entrysize = sizeof(RecordCacheEntry);
-		ctl.hash = record_type_typmod_hash;
-		ctl.match = record_type_typmod_compare;
-		RecordCacheHash = hash_create("Record information cache", 64,
-									  &ctl,
-									  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
+		RecordCacheHash = hash_make_fn_cxt(RecordCacheEntry, tupdesc,
+										   "Record information cache", 64,
+										   record_type_typmod_hash,
+										   record_type_typmod_compare,
+										   TopMemoryContext);
 
 		/* Also make sure CacheMemoryContext exists */
 		if (!CacheMemoryContext)
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index e636cc81cf8..5d99467e2bd 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -26,6 +26,7 @@
 #include "storage/fd.h"
 #include "storage/shmem.h"
 #include "utils/hsearch.h"
+#include "utils/memutils.h"
 
 
 /* signature for PostgreSQL-specific library init function */
@@ -671,14 +672,9 @@ find_rendezvous_variable(const char *varName)
 	/* Create a hashtable if we haven't already done so in this process */
 	if (rendezvousHash == NULL)
 	{
-		HASHCTL		ctl;
-
-		ctl.keysize = NAMEDATALEN;
-		ctl.entrysize = sizeof(rendezvousHashEntry);
-		rendezvousHash = hash_create("Rendezvous variable hash",
-									 16,
-									 &ctl,
-									 HASH_ELEM | HASH_STRINGS);
+		rendezvousHash = hash_make_cxt(rendezvousHashEntry, varName,
+									   "Rendezvous variable hash", 16,
+									   TopMemoryContext);
 	}
 
 	/* Find or create the hashtable entry for this varName */
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 05984e7ef26..deb85511ad5 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -32,6 +32,7 @@
 #include "utils/fmgrtab.h"
 #include "utils/guc.h"
 #include "utils/lsyscache.h"
+#include "utils/memutils.h"
 #include "utils/syscache.h"
 
 /*
@@ -547,14 +548,8 @@ record_C_func(HeapTuple procedureTuple,
 	/* Create the hash table if it doesn't exist yet */
 	if (CFuncHash == NULL)
 	{
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = sizeof(Oid);
-		hash_ctl.entrysize = sizeof(CFuncHashTabEntry);
-		CFuncHash = hash_create("CFuncHash",
-								100,
-								&hash_ctl,
-								HASH_ELEM | HASH_BLOBS);
+		CFuncHash = hash_make_cxt(CFuncHashTabEntry, fn_oid,
+								  "CFuncHash", 100, TopMemoryContext);
 	}
 
 	entry = (CFuncHashTabEntry *)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index ae9d5f3fb70..618d0100d38 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -871,7 +871,6 @@ build_guc_variables(void)
 {
 	int			size_vars;
 	int			num_vars = 0;
-	HASHCTL		hash_ctl;
 	GUCHashEntry *hentry;
 	bool		found;
 
@@ -894,15 +893,10 @@ build_guc_variables(void)
 	 */
 	size_vars = num_vars + num_vars / 4;
 
-	hash_ctl.keysize = sizeof(char *);
-	hash_ctl.entrysize = sizeof(GUCHashEntry);
-	hash_ctl.hash = guc_name_hash;
-	hash_ctl.match = guc_name_match;
-	hash_ctl.hcxt = GUCMemoryContext;
-	guc_hashtab = hash_create("GUC hash table",
-							  size_vars,
-							  &hash_ctl,
-							  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+	guc_hashtab = hash_make_fn_cxt(GUCHashEntry, gucname,
+								   "GUC hash table", size_vars,
+								   guc_name_hash, guc_name_match,
+								   GUCMemoryContext);
 
 	for (int i = 0; ConfigureNames[i].name; i++)
 	{
diff --git a/src/backend/utils/misc/injection_point.c b/src/backend/utils/misc/injection_point.c
index c06b0e9b800..a6cb46402aa 100644
--- a/src/backend/utils/misc/injection_point.c
+++ b/src/backend/utils/misc/injection_point.c
@@ -127,16 +127,10 @@ injection_point_cache_add(const char *name,
 	/* If first time, initialize */
 	if (InjectionPointCache == NULL)
 	{
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = sizeof(char[INJ_NAME_MAXLEN]);
-		hash_ctl.entrysize = sizeof(InjectionPointCacheEntry);
-		hash_ctl.hcxt = TopMemoryContext;
-
-		InjectionPointCache = hash_create("InjectionPoint cache hash",
-										  MAX_INJECTION_POINTS,
-										  &hash_ctl,
-										  HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+		InjectionPointCache = hash_make_cxt(InjectionPointCacheEntry, name,
+											"InjectionPoint cache hash",
+											MAX_INJECTION_POINTS,
+											TopMemoryContext);
 	}
 
 	entry = (InjectionPointCacheEntry *)
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 4fa4d432021..0a1082f4845 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -103,23 +103,19 @@ static MemoryContext TopPortalContext = NULL;
 void
 EnablePortalManager(void)
 {
-	HASHCTL		ctl;
-
 	Assert(TopPortalContext == NULL);
 
 	TopPortalContext = AllocSetContextCreate(TopMemoryContext,
 											 "TopPortalContext",
 											 ALLOCSET_DEFAULT_SIZES);
 
-	ctl.keysize = MAX_PORTALNAME_LEN;
-	ctl.entrysize = sizeof(PortalHashEnt);
-
 	/*
 	 * use PORTALS_PER_USER as a guess of how many hash table entries to
 	 * create, initially
 	 */
-	PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
-								  &ctl, HASH_ELEM | HASH_STRINGS);
+	PortalHashTable = hash_make_cxt(PortalHashEnt, portalname,
+									"Portal hash", PORTALS_PER_USER,
+									TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index 614b7c1006b..4c31675535a 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -214,8 +214,6 @@ GetComboCommandId(CommandId cmin, CommandId cmax)
 	 */
 	if (comboHash == NULL)
 	{
-		HASHCTL		hash_ctl;
-
 		/* Make array first; existence of hash table asserts array exists */
 		comboCids = (ComboCidKeyData *)
 			MemoryContextAlloc(TopTransactionContext,
@@ -223,14 +221,9 @@ GetComboCommandId(CommandId cmin, CommandId cmax)
 		sizeComboCids = CCID_ARRAY_SIZE;
 		usedComboCids = 0;
 
-		hash_ctl.keysize = sizeof(ComboCidKeyData);
-		hash_ctl.entrysize = sizeof(ComboCidEntryData);
-		hash_ctl.hcxt = TopTransactionContext;
-
-		comboHash = hash_create("Combo CIDs",
-								CCID_HASH_SIZE,
-								&hash_ctl,
-								HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		comboHash = hash_make_cxt(ComboCidEntryData, key,
+								  "Combo CIDs", CCID_HASH_SIZE,
+								  TopTransactionContext);
 	}
 
 	/*
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 02eced3b2c5..49044bc808b 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -390,7 +390,6 @@ _PG_init(void)
 	 * "plperl.use_strict"
 	 */
 	static bool inited = false;
-	HASHCTL		hash_ctl;
 
 	if (inited)
 		return;
@@ -460,19 +459,13 @@ _PG_init(void)
 	/*
 	 * Create hash tables.
 	 */
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(plperl_interp_desc);
-	plperl_interp_hash = hash_create("PL/Perl interpreters",
-									 8,
-									 &hash_ctl,
-									 HASH_ELEM | HASH_BLOBS);
-
-	hash_ctl.keysize = sizeof(plperl_proc_key);
-	hash_ctl.entrysize = sizeof(plperl_proc_ptr);
-	plperl_proc_hash = hash_create("PL/Perl procedures",
-								   32,
-								   &hash_ctl,
-								   HASH_ELEM | HASH_BLOBS);
+	plperl_interp_hash = hash_make_cxt(plperl_interp_desc, user_id,
+									   "PL/Perl interpreters", 8,
+									   TopMemoryContext);
+
+	plperl_proc_hash = hash_make_cxt(plperl_proc_ptr, proc_key,
+									 "PL/Perl procedures", 32,
+									 TopMemoryContext);
 
 	/*
 	 * Save the default opmask.
@@ -578,14 +571,9 @@ select_perl_context(bool trusted)
 	/* Make sure we have a query_hash for this interpreter */
 	if (interp_desc->query_hash == NULL)
 	{
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = NAMEDATALEN;
-		hash_ctl.entrysize = sizeof(plperl_query_entry);
-		interp_desc->query_hash = hash_create("PL/Perl queries",
-											  32,
-											  &hash_ctl,
-											  HASH_ELEM | HASH_STRINGS);
+		interp_desc->query_hash = hash_make_cxt(plperl_query_entry, query_name,
+												"PL/Perl queries", 32,
+												TopMemoryContext);
 	}
 
 	/*
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 75325117ec9..fbb88affa15 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -3995,8 +3995,6 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
 					 EState *simple_eval_estate,
 					 ResourceOwner simple_eval_resowner)
 {
-	HASHCTL		ctl;
-
 	/* this link will be restored at exit from plpgsql_call_handler */
 	func->cur_estate = estate;
 
@@ -4051,12 +4049,10 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
 	/* Create the session-wide cast-expression hash if we didn't already */
 	if (cast_expr_hash == NULL)
 	{
-		ctl.keysize = sizeof(plpgsql_CastHashKey);
-		ctl.entrysize = sizeof(plpgsql_CastExprHashEntry);
-		cast_expr_hash = hash_create("PLpgSQL cast expressions",
-									 16,	/* start small and extend */
-									 &ctl,
-									 HASH_ELEM | HASH_BLOBS);
+		cast_expr_hash = hash_make_cxt(plpgsql_CastExprHashEntry, key,
+									   "PLpgSQL cast expressions",
+									   16,	/* start small and extend */
+									   TopMemoryContext);
 	}
 
 	/* set up for use of appropriate simple-expression EState and cast hash */
@@ -4064,13 +4060,9 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
 	{
 		estate->simple_eval_estate = simple_eval_estate;
 		/* Private cast hash just lives in function's main context */
-		ctl.keysize = sizeof(plpgsql_CastHashKey);
-		ctl.entrysize = sizeof(plpgsql_CastHashEntry);
-		ctl.hcxt = CurrentMemoryContext;
-		estate->cast_hash = hash_create("PLpgSQL private cast cache",
-										16, /* start small and extend */
-										&ctl,
-										HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		estate->cast_hash = hash_make(plpgsql_CastHashEntry, key,
+									  "PLpgSQL private cast cache",
+									  16);	/* start small and extend */
 	}
 	else
 	{
@@ -4078,12 +4070,10 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
 		/* Create the session-wide cast-info hash table if we didn't already */
 		if (shared_cast_hash == NULL)
 		{
-			ctl.keysize = sizeof(plpgsql_CastHashKey);
-			ctl.entrysize = sizeof(plpgsql_CastHashEntry);
-			shared_cast_hash = hash_create("PLpgSQL cast cache",
-										   16,	/* start small and extend */
-										   &ctl,
-										   HASH_ELEM | HASH_BLOBS);
+			shared_cast_hash = hash_make_cxt(plpgsql_CastHashEntry, key,
+											 "PLpgSQL cast cache",
+											 16,	/* start small and extend */
+											 TopMemoryContext);
 		}
 		estate->cast_hash = shared_cast_hash;
 	}
diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c
index 72806c17e17..71a76b85ec8 100644
--- a/src/pl/plpython/plpy_plpymodule.c
+++ b/src/pl/plpython/plpy_plpymodule.c
@@ -16,6 +16,7 @@
 #include "plpy_subxactobject.h"
 #include "plpy_util.h"
 #include "utils/builtins.h"
+#include "utils/memutils.h"
 
 HTAB	   *PLy_spi_exceptions = NULL;
 
@@ -145,7 +146,6 @@ static void
 PLy_add_exceptions(PyObject *plpy)
 {
 	PyObject   *excmod;
-	HASHCTL		hash_ctl;
 
 	PLy_exc_error = PLy_create_exception("plpy.Error", NULL, NULL,
 										 "Error", plpy);
@@ -158,10 +158,9 @@ PLy_add_exceptions(PyObject *plpy)
 	if (excmod == NULL)
 		PLy_elog(ERROR, "could not create the spiexceptions module");
 
-	hash_ctl.keysize = sizeof(int);
-	hash_ctl.entrysize = sizeof(PLyExceptionEntry);
-	PLy_spi_exceptions = hash_create("PL/Python SPI exceptions", 256,
-									 &hash_ctl, HASH_ELEM | HASH_BLOBS);
+	PLy_spi_exceptions = hash_make_cxt(PLyExceptionEntry, sqlstate,
+									   "PL/Python SPI exceptions", 256,
+									   TopMemoryContext);
 
 	PLy_generate_spi_exceptions(excmod, PLy_exc_spi_error);
 
diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c
index 750ba586e0c..fc728f7ab8a 100644
--- a/src/pl/plpython/plpy_procedure.c
+++ b/src/pl/plpython/plpy_procedure.c
@@ -29,12 +29,9 @@ static char *PLy_procedure_munge_source(const char *name, const char *src);
 void
 init_procedure_caches(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = sizeof(PLyProcedureKey);
-	hash_ctl.entrysize = sizeof(PLyProcedureEntry);
-	PLy_procedure_cache = hash_create("PL/Python procedures", 32, &hash_ctl,
-									  HASH_ELEM | HASH_BLOBS);
+	PLy_procedure_cache = hash_make_cxt(PLyProcedureEntry, key,
+										"PL/Python procedures", 32,
+										TopMemoryContext);
 }
 
 /*
diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c
index 187698ccdd2..7070051463d 100644
--- a/src/pl/tcl/pltcl.c
+++ b/src/pl/tcl/pltcl.c
@@ -408,7 +408,6 @@ void
 _PG_init(void)
 {
 	Tcl_NotifierProcs notifier;
-	HASHCTL		hash_ctl;
 
 	/* Be sure we do initialization only once (should be redundant now) */
 	if (pltcl_pm_init_done)
@@ -446,22 +445,16 @@ _PG_init(void)
 	/************************************************************
 	 * Create the hash table for working interpreters
 	 ************************************************************/
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(pltcl_interp_desc);
-	pltcl_interp_htab = hash_create("PL/Tcl interpreters",
-									8,
-									&hash_ctl,
-									HASH_ELEM | HASH_BLOBS);
+	pltcl_interp_htab = hash_make_cxt(pltcl_interp_desc, user_id,
+									  "PL/Tcl interpreters", 8,
+									  TopMemoryContext);
 
 	/************************************************************
 	 * Create the hash table for function lookup
 	 ************************************************************/
-	hash_ctl.keysize = sizeof(pltcl_proc_key);
-	hash_ctl.entrysize = sizeof(pltcl_proc_ptr);
-	pltcl_proc_htab = hash_create("PL/Tcl functions",
-								  100,
-								  &hash_ctl,
-								  HASH_ELEM | HASH_BLOBS);
+	pltcl_proc_htab = hash_make_cxt(pltcl_proc_ptr, proc_key,
+									"PL/Tcl functions", 100,
+									TopMemoryContext);
 
 	/************************************************************
 	 * Define PL/Tcl's custom GUCs
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index eac988c21e7..c68de93b535 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -22,6 +22,7 @@
 #include "pgtz.h"
 #include "storage/fd.h"
 #include "utils/hsearch.h"
+#include "utils/memutils.h"
 
 
 /* Current session timezone (controlled by TimeZone GUC) */
@@ -201,15 +202,8 @@ static HTAB *timezone_cache = NULL;
 static bool
 init_timezone_hashtable(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = TZ_STRLEN_MAX + 1;
-	hash_ctl.entrysize = sizeof(pg_tz_cache);
-
-	timezone_cache = hash_create("Timezones",
-								 4,
-								 &hash_ctl,
-								 HASH_ELEM | HASH_STRINGS);
+	timezone_cache = hash_make_cxt(pg_tz_cache, tznameupper,
+								   "Timezones", 4, TopMemoryContext);
 	if (!timezone_cache)
 		return false;
 
-- 
2.52.0

