From f6d9426cc94f04cdb6878e0a48dde680388c7e24 Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <postgres@jeltef.nl>
Date: Thu, 4 Dec 2025 15:39:09 +0100
Subject: [PATCH v7 4/5] Use foreach_hash macro throughout the codebase

This starts using the new foreach_hash macro throughout the codebase.
This makes code easier to read, but obviously does introduce
backpatching problems. We can choose not to do this refactor to avoid
that. Or we could instead choose to do the refactor and then backpatch
these new macros so they can be used in backpatched code.

At the very least we should choose a few places where we use the new
macros to make sure they have coverage.
---
 contrib/dblink/dblink.c                       |  5 +-
 .../pg_stat_statements/pg_stat_statements.c   | 39 +++------
 contrib/pg_trgm/trgm_regexp.c                 | 18 +---
 contrib/postgres_fdw/connection.c             | 26 ++----
 contrib/postgres_fdw/shippable.c              |  6 +-
 src/backend/access/heap/rewriteheap.c         | 18 +---
 src/backend/access/transam/xlogutils.c        | 20 +----
 src/backend/catalog/pg_enum.c                 | 16 ++--
 src/backend/catalog/storage.c                 | 18 ++--
 src/backend/commands/prepare.c                | 12 +--
 src/backend/commands/tablecmds.c              |  7 +-
 src/backend/optimizer/util/predtest.c         |  7 +-
 src/backend/parser/parse_oper.c               |  7 +-
 src/backend/partitioning/partdesc.c           |  6 +-
 src/backend/postmaster/autovacuum.c           |  7 +-
 src/backend/replication/logical/relation.c    | 37 ++------
 .../replication/logical/reorderbuffer.c       | 12 +--
 src/backend/replication/pgoutput/pgoutput.c   | 24 ++----
 src/backend/storage/buffer/bufmgr.c           | 10 +--
 src/backend/storage/ipc/shmem.c               | 12 +--
 src/backend/storage/ipc/standby.c             | 12 +--
 src/backend/storage/lmgr/lock.c               | 67 +++------------
 src/backend/storage/lmgr/lwlock.c             |  7 +-
 src/backend/storage/lmgr/predicate.c          | 18 +---
 src/backend/storage/smgr/smgr.c               |  7 +-
 src/backend/storage/sync/sync.c               | 13 +--
 src/backend/tsearch/ts_typanalyze.c           | 11 +--
 src/backend/utils/activity/wait_event.c       |  6 +-
 src/backend/utils/adt/array_typanalyze.c      | 17 ++--
 src/backend/utils/cache/relcache.c            | 53 ++++--------
 src/backend/utils/cache/relfilenumbermap.c    |  6 +-
 src/backend/utils/cache/spccache.c            |  6 +-
 src/backend/utils/cache/ts_cache.c            |  5 +-
 src/backend/utils/cache/typcache.c            | 11 +--
 src/backend/utils/misc/guc.c                  | 30 ++-----
 src/backend/utils/mmgr/portalmem.c            | 85 +++----------------
 src/pl/plperl/plperl.c                        |  6 +-
 37 files changed, 141 insertions(+), 526 deletions(-)

diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 192418df4af..43d61c9cf65 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -1275,14 +1275,11 @@ PG_FUNCTION_INFO_V1(dblink_get_connections);
 Datum
 dblink_get_connections(PG_FUNCTION_ARGS)
 {
-	HASH_SEQ_STATUS status;
-	remoteConnHashEnt *hentry;
 	ArrayBuildState *astate = NULL;
 
 	if (remoteConnHash)
 	{
-		hash_seq_init(&status, remoteConnHash);
-		while ((hentry = (remoteConnHashEnt *) hash_seq_search(&status)) != NULL)
+		foreach_hash(remoteConnHashEnt, hentry, remoteConnHash)
 		{
 			/* ignore it if it's not an open connection */
 			if (hentry->rconn.conn == NULL)
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 8a67390e561..9193d3d18c3 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -741,9 +741,7 @@ pgss_shmem_shutdown(int code, Datum arg)
 	FILE	   *file;
 	char	   *qbuffer = NULL;
 	Size		qbuffer_size = 0;
-	HASH_SEQ_STATUS hash_seq;
 	int32		num_entries;
-	pgssEntry  *entry;
 
 	/* Don't try to dump during a crash. */
 	if (code)
@@ -777,8 +775,7 @@ pgss_shmem_shutdown(int code, Datum arg)
 	 * When serializing to disk, we store query texts immediately after their
 	 * entry data.  Any orphaned query texts are thereby excluded.
 	 */
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		int			len = entry->query_len;
 		char	   *qstr = qtext_fetch(entry->query_offset, len,
@@ -790,8 +787,8 @@ pgss_shmem_shutdown(int code, Datum arg)
 		if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 ||
 			fwrite(qstr, 1, len + 1, file) != len + 1)
 		{
-			/* note: we assume hash_seq_term won't change errno */
-			hash_seq_term(&hash_seq);
+			/* note: we assume foreach_hash_term won't change errno */
+			foreach_hash_term(entry);
 			goto error;
 		}
 	}
@@ -1695,8 +1692,6 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
 	Size		qbuffer_size = 0;
 	Size		extent = 0;
 	int			gc_count = 0;
-	HASH_SEQ_STATUS hash_seq;
-	pgssEntry  *entry;
 
 	/*
 	 * Superusers or roles with the privileges of pg_read_all_stats members
@@ -1825,8 +1820,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
 		}
 	}
 
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		Datum		values[PG_STAT_STATEMENTS_COLS];
 		bool		nulls[PG_STAT_STATEMENTS_COLS];
@@ -2170,9 +2164,7 @@ entry_cmp(const void *lhs, const void *rhs)
 static void
 entry_dealloc(void)
 {
-	HASH_SEQ_STATUS hash_seq;
 	pgssEntry **entries;
-	pgssEntry  *entry;
 	int			nvictims;
 	int			i;
 	Size		tottextlen;
@@ -2196,8 +2188,7 @@ entry_dealloc(void)
 	tottextlen = 0;
 	nvalidtexts = 0;
 
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		entries[i++] = entry;
 		/* "Sticky" entries get a different usage decay rate. */
@@ -2509,8 +2500,6 @@ gc_qtexts(void)
 	char	   *qbuffer;
 	Size		qbuffer_size;
 	FILE	   *qfile = NULL;
-	HASH_SEQ_STATUS hash_seq;
-	pgssEntry  *entry;
 	Size		extent;
 	int			nentries;
 
@@ -2552,8 +2541,7 @@ gc_qtexts(void)
 	extent = 0;
 	nentries = 0;
 
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		int			query_len = entry->query_len;
 		char	   *qry = qtext_fetch(entry->query_offset,
@@ -2576,7 +2564,7 @@ gc_qtexts(void)
 					(errcode_for_file_access(),
 					 errmsg("could not write file \"%s\": %m",
 							PGSS_TEXT_FILE)));
-			hash_seq_term(&hash_seq);
+			foreach_hash_term(entry);
 			goto gc_fail;
 		}
 
@@ -2643,8 +2631,7 @@ gc_fail:
 	 * Since the contents of the external file are now uncertain, mark all
 	 * hashtable entries as having invalid texts.
 	 */
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		entry->query_offset = 0;
 		entry->query_len = -1;
@@ -2708,8 +2695,6 @@ if (e) { \
 static TimestampTz
 entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 {
-	HASH_SEQ_STATUS hash_seq;
-	pgssEntry  *entry;
 	FILE	   *qfile;
 	int64		num_entries;
 	int64		num_remove = 0;
@@ -2729,6 +2714,8 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 	if (userid != 0 && dbid != 0 && queryid != INT64CONST(0))
 	{
 		/* If all the parameters are available, use the fast path. */
+		pgssEntry  *entry;
+
 		memset(&key, 0, sizeof(pgssHashKey));
 		key.userid = userid;
 		key.dbid = dbid;
@@ -2752,8 +2739,7 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 	else if (userid != 0 || dbid != 0 || queryid != INT64CONST(0))
 	{
 		/* Reset entries corresponding to valid parameters. */
-		hash_seq_init(&hash_seq, pgss_hash);
-		while ((entry = hash_seq_search(&hash_seq)) != NULL)
+		foreach_hash(pgssEntry, entry, pgss_hash)
 		{
 			if ((!userid || entry->key.userid == userid) &&
 				(!dbid || entry->key.dbid == dbid) &&
@@ -2766,8 +2752,7 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 	else
 	{
 		/* Reset all entries. */
-		hash_seq_init(&hash_seq, pgss_hash);
-		while ((entry = hash_seq_search(&hash_seq)) != NULL)
+		foreach_hash(pgssEntry, entry, pgss_hash)
 		{
 			SINGLE_ENTRY_RESET(entry);
 		}
diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c
index ecd4d677948..67e86981476 100644
--- a/contrib/pg_trgm/trgm_regexp.c
+++ b/contrib/pg_trgm/trgm_regexp.c
@@ -1449,10 +1449,8 @@ prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2)
 static bool
 selectColorTrigrams(TrgmNFA *trgmNFA)
 {
-	HASH_SEQ_STATUS scan_status;
 	int			arcsCount = trgmNFA->arcsCount,
 				i;
-	TrgmState  *state;
 	ColorTrgmInfo *colorTrgms;
 	int64		totalTrgmCount;
 	float4		totalTrgmPenalty;
@@ -1463,8 +1461,7 @@ selectColorTrigrams(TrgmNFA *trgmNFA)
 	trgmNFA->colorTrgms = colorTrgms;
 
 	i = 0;
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		ListCell   *cell;
 
@@ -1926,8 +1923,6 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
 	int			snumber = 2,
 				arcIndex,
 				arcsCount;
-	HASH_SEQ_STATUS scan_status;
-	TrgmState  *state;
 	TrgmPackArcInfo *arcs;
 	TrgmPackedArc *packedArcs;
 	TrgmPackedGraph *result;
@@ -1935,8 +1930,7 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
 				j;
 
 	/* Enumerate surviving states, giving init and fin reserved numbers */
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		while (state->parent)
 			state = state->parent;
@@ -1958,8 +1952,7 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
 	/* Collect array of all arcs */
 	arcs = palloc_array(TrgmPackArcInfo, trgmNFA->arcsCount);
 	arcIndex = 0;
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		TrgmState  *source = state;
 		ListCell   *cell;
@@ -2202,16 +2195,13 @@ static void
 printTrgmNFA(TrgmNFA *trgmNFA)
 {
 	StringInfoData buf;
-	HASH_SEQ_STATUS scan_status;
-	TrgmState  *state;
 	TrgmState  *initstate = NULL;
 
 	initStringInfo(&buf);
 
 	appendStringInfoString(&buf, "\ndigraph transformedNFA {\n");
 
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		ListCell   *cell;
 
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 5c77c9ecde5..9902567f1fb 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -1044,8 +1044,6 @@ pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn,
 static void
 pgfdw_xact_callback(XactEvent event, void *arg)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 	List	   *pending_entries = NIL;
 	List	   *cancel_requested = NIL;
 
@@ -1057,8 +1055,7 @@ pgfdw_xact_callback(XactEvent event, void *arg)
 	 * Scan all connection cache entries to find open remote transactions, and
 	 * close them.
 	 */
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		PGresult   *res;
 
@@ -1195,8 +1192,6 @@ static void
 pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
 					   SubTransactionId parentSubid, void *arg)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 	int			curlevel;
 	List	   *pending_entries = NIL;
 	List	   *cancel_requested = NIL;
@@ -1215,8 +1210,7 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
 	 * of the current level, and close them.
 	 */
 	curlevel = GetCurrentTransactionNestLevel();
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		char		sql[100];
 
@@ -1307,14 +1301,10 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
 static void
 pgfdw_inval_callback(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
-
 	Assert(cacheid == FOREIGNSERVEROID || cacheid == USERMAPPINGOID);
 
 	/* ConnectionHash must exist already, if we're registered */
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		/* Ignore invalid entries */
 		if (entry->conn == NULL)
@@ -2165,8 +2155,6 @@ postgres_fdw_get_connections_internal(FunctionCallInfo fcinfo,
 									  enum pgfdwVersion api_version)
 {
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 
 	InitMaterializedSRF(fcinfo, 0);
 
@@ -2189,8 +2177,7 @@ postgres_fdw_get_connections_internal(FunctionCallInfo fcinfo,
 			elog(ERROR, "incorrect number of output arguments");
 	}
 
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		ForeignServer *server;
 		Datum		values[POSTGRES_FDW_GET_CONNECTIONS_COLS] = {0};
@@ -2392,8 +2379,6 @@ postgres_fdw_disconnect_all(PG_FUNCTION_ARGS)
 static bool
 disconnect_cached_connections(Oid serverid)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 	bool		all = !OidIsValid(serverid);
 	bool		result = false;
 
@@ -2404,8 +2389,7 @@ disconnect_cached_connections(Oid serverid)
 	if (!ConnectionHash)
 		return false;
 
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		/* Ignore cache entry if no open connection right now. */
 		if (!entry->conn)
diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c
index ec1852490e0..c1d60e582a6 100644
--- a/contrib/postgres_fdw/shippable.c
+++ b/contrib/postgres_fdw/shippable.c
@@ -65,17 +65,13 @@ typedef struct
 static void
 InvalidateShippableCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	ShippableCacheEntry *entry;
-
 	/*
 	 * In principle we could flush only cache entries relating to the
 	 * pg_foreign_server entry being outdated; but that would be more
 	 * complicated, and it's probably not worth the trouble.  So for now, just
 	 * flush all entries.
 	 */
-	hash_seq_init(&status, ShippableCacheHash);
-	while ((entry = (ShippableCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(ShippableCacheEntry, entry, ShippableCacheHash)
 	{
 		if (hash_search(ShippableCacheHash,
 						&entry->key,
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index fbe4d6e6e98..d9042767da2 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -290,16 +290,11 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
 void
 end_heap_rewrite(RewriteState state)
 {
-	HASH_SEQ_STATUS seq_status;
-	UnresolvedTup unresolved;
-
 	/*
 	 * Write any remaining tuples in the UnresolvedTups table. If we have any
 	 * left, they should in fact be dead, but let's err on the safe side.
 	 */
-	hash_seq_init(&seq_status, state->rs_unresolved_tups);
-
-	while ((unresolved = hash_seq_search(&seq_status)) != NULL)
+	foreach_hash(UnresolvedTupData, unresolved, state->rs_unresolved_tups)
 	{
 		ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
 		raw_heap_insert(state, unresolved->tuple);
@@ -797,8 +792,6 @@ logical_begin_heap_rewrite(RewriteState state)
 static void
 logical_heap_rewrite_flush_mappings(RewriteState state)
 {
-	HASH_SEQ_STATUS seq_status;
-	RewriteMappingFile *src;
 	dlist_mutable_iter iter;
 
 	Assert(state->rs_logical_rewrite);
@@ -810,8 +803,7 @@ logical_heap_rewrite_flush_mappings(RewriteState state)
 	elog(DEBUG1, "flushing %u logical rewrite mapping entries",
 		 state->rs_num_rewrite_mappings);
 
-	hash_seq_init(&seq_status, state->rs_logical_mappings);
-	while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
+	foreach_hash(RewriteMappingFile, src, state->rs_logical_mappings)
 	{
 		char	   *waldata;
 		char	   *waldata_start;
@@ -895,9 +887,6 @@ logical_heap_rewrite_flush_mappings(RewriteState state)
 static void
 logical_end_heap_rewrite(RewriteState state)
 {
-	HASH_SEQ_STATUS seq_status;
-	RewriteMappingFile *src;
-
 	/* done, no logical rewrite in progress */
 	if (!state->rs_logical_rewrite)
 		return;
@@ -907,8 +896,7 @@ logical_end_heap_rewrite(RewriteState state)
 		logical_heap_rewrite_flush_mappings(state);
 
 	/* Iterate over all mappings we have written and fsync the files. */
-	hash_seq_init(&seq_status, state->rs_logical_mappings);
-	while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
+	foreach_hash(RewriteMappingFile, src, state->rs_logical_mappings)
 	{
 		if (FileSync(src->vfd, WAIT_EVENT_LOGICAL_REWRITE_SYNC) != 0)
 			ereport(data_sync_elevel(ERROR),
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index d11e42c9490..3c7fc65b8d4 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -160,15 +160,10 @@ static void
 forget_invalid_pages(RelFileLocator locator, ForkNumber forkno,
 					 BlockNumber minblkno)
 {
-	HASH_SEQ_STATUS status;
-	xl_invalid_page *hentry;
-
 	if (invalid_page_tab == NULL)
 		return;					/* nothing to do */
 
-	hash_seq_init(&status, invalid_page_tab);
-
-	while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
+	foreach_hash(xl_invalid_page, hentry, invalid_page_tab)
 	{
 		if (RelFileLocatorEquals(hentry->key.locator, locator) &&
 			hentry->key.forkno == forkno &&
@@ -190,15 +185,10 @@ forget_invalid_pages(RelFileLocator locator, ForkNumber forkno,
 static void
 forget_invalid_pages_db(Oid dbid)
 {
-	HASH_SEQ_STATUS status;
-	xl_invalid_page *hentry;
-
 	if (invalid_page_tab == NULL)
 		return;					/* nothing to do */
 
-	hash_seq_init(&status, invalid_page_tab);
-
-	while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
+	foreach_hash(xl_invalid_page, hentry, invalid_page_tab)
 	{
 		if (hentry->key.locator.dbOid == dbid)
 		{
@@ -228,20 +218,16 @@ XLogHaveInvalidPages(void)
 void
 XLogCheckInvalidPages(void)
 {
-	HASH_SEQ_STATUS status;
-	xl_invalid_page *hentry;
 	bool		foundone = false;
 
 	if (invalid_page_tab == NULL)
 		return;					/* nothing to do */
 
-	hash_seq_init(&status, invalid_page_tab);
-
 	/*
 	 * Our strategy is to emit WARNING messages for all remaining entries and
 	 * only PANIC after we've dumped all the available info.
 	 */
-	while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
+	foreach_hash(xl_invalid_page, hentry, invalid_page_tab)
 	{
 		report_invalid_page(WARNING, hentry->key.locator, hentry->key.forkno,
 							hentry->key.blkno, hentry->present);
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 86c8bada557..be4a8ecd9e6 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -838,12 +838,10 @@ SerializeUncommittedEnums(void *space, Size size)
 	/* Write out all the OIDs from the types hash table, if there is one. */
 	if (uncommitted_enum_types)
 	{
-		HASH_SEQ_STATUS status;
-		Oid		   *value;
-
-		hash_seq_init(&status, uncommitted_enum_types);
-		while ((value = (Oid *) hash_seq_search(&status)))
+		foreach_hash(Oid, value, uncommitted_enum_types)
+		{
 			*serialized++ = *value;
+		}
 	}
 
 	/* Write out the terminator. */
@@ -852,12 +850,10 @@ SerializeUncommittedEnums(void *space, Size size)
 	/* Write out all the OIDs from the values hash table, if there is one. */
 	if (uncommitted_enum_values)
 	{
-		HASH_SEQ_STATUS status;
-		Oid		   *value;
-
-		hash_seq_init(&status, uncommitted_enum_values);
-		while ((value = (Oid *) hash_seq_search(&status)))
+		foreach_hash(Oid, value, uncommitted_enum_values)
+		{
 			*serialized++ = *value;
+		}
 	}
 
 	/* Write out the terminator. */
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index db3e08319b5..27d21b556be 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -594,10 +594,7 @@ void
 SerializePendingSyncs(Size maxSize, char *startAddress)
 {
 	HTAB	   *tmphash;
-	HASH_SEQ_STATUS scan;
-	PendingRelSync *sync;
 	PendingRelDelete *delete;
-	RelFileLocator *src;
 	RelFileLocator *dest = (RelFileLocator *) startAddress;
 
 	if (!pendingSyncHash)
@@ -608,9 +605,10 @@ SerializePendingSyncs(Size maxSize, char *startAddress)
 						   hash_get_num_entries(pendingSyncHash));
 
 	/* collect all rlocator from pending syncs */
-	hash_seq_init(&scan, pendingSyncHash);
-	while ((sync = (PendingRelSync *) hash_seq_search(&scan)))
+	foreach_hash(PendingRelSync, sync, pendingSyncHash)
+	{
 		(void) hash_search(tmphash, &sync->rlocator, HASH_ENTER, NULL);
+	}
 
 	/* remove deleted rnodes */
 	for (delete = pendingDeletes; delete != NULL; delete = delete->next)
@@ -618,9 +616,10 @@ SerializePendingSyncs(Size maxSize, char *startAddress)
 			(void) hash_search(tmphash, &delete->rlocator,
 							   HASH_REMOVE, NULL);
 
-	hash_seq_init(&scan, tmphash);
-	while ((src = (RelFileLocator *) hash_seq_search(&scan)))
+	foreach_hash(RelFileLocator, src, tmphash)
+	{
 		*dest++ = *src;
+	}
 
 	hash_destroy(tmphash);
 
@@ -733,8 +732,6 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
 	int			nrels = 0,
 				maxrels = 0;
 	SMgrRelation *srels = NULL;
-	HASH_SEQ_STATUS scan;
-	PendingRelSync *pendingsync;
 
 	Assert(GetCurrentTransactionNestLevel() == 1);
 
@@ -763,8 +760,7 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
 			(void) hash_search(pendingSyncHash, &pending->rlocator,
 							   HASH_REMOVE, NULL);
 
-	hash_seq_init(&scan, pendingSyncHash);
-	while ((pendingsync = (PendingRelSync *) hash_seq_search(&scan)))
+	foreach_hash(PendingRelSync, pendingsync, pendingSyncHash)
 	{
 		ForkNumber	fork;
 		BlockNumber nblocks[MAX_FORKNUM + 1];
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index eb1a7ecfebc..4ad9c10a7f8 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -533,16 +533,12 @@ DropPreparedStatement(const char *stmt_name, bool showError)
 void
 DropAllPreparedStatements(void)
 {
-	HASH_SEQ_STATUS seq;
-	PreparedStatement *entry;
-
 	/* nothing cached */
 	if (!prepared_queries)
 		return;
 
 	/* walk over cache */
-	hash_seq_init(&seq, prepared_queries);
-	while ((entry = hash_seq_search(&seq)) != NULL)
+	foreach_hash(PreparedStatement, entry, prepared_queries)
 	{
 		/* Release the plancache entry */
 		DropCachedPlan(entry->plansource);
@@ -689,11 +685,7 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
 	/* hash table might be uninitialized */
 	if (prepared_queries)
 	{
-		HASH_SEQ_STATUS hash_seq;
-		PreparedStatement *prep_stmt;
-
-		hash_seq_init(&hash_seq, prepared_queries);
-		while ((prep_stmt = hash_seq_search(&hash_seq)) != NULL)
+		foreach_hash(PreparedStatement, prep_stmt, prepared_queries)
 		{
 			TupleDesc	result_desc;
 			Datum		values[8];
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 40a39989eaa..7722507c838 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2242,14 +2242,9 @@ ExecuteTruncateGuts(List *explicit_rels,
 	/* Now go through the hash table, and truncate foreign tables */
 	if (ft_htab)
 	{
-		ForeignTruncateInfo *ft_info;
-		HASH_SEQ_STATUS seq;
-
-		hash_seq_init(&seq, ft_htab);
-
 		PG_TRY();
 		{
-			while ((ft_info = hash_seq_search(&seq)) != NULL)
+			foreach_hash(ForeignTruncateInfo, ft_info, ft_htab)
 			{
 				FdwRoutine *routine = GetFdwRoutineByServerId(ft_info->serverid);
 
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 3bf5e7ac8bb..4ae1454894b 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -2342,15 +2342,10 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
 static void
 InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	OprProofCacheEntry *hentry;
-
 	Assert(OprProofCacheHash != NULL);
 
 	/* Currently we just reset all entries; hard to be smarter ... */
-	hash_seq_init(&status, OprProofCacheHash);
-
-	while ((hentry = (OprProofCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(OprProofCacheEntry, hentry, OprProofCacheHash)
 	{
 		hentry->have_implic = false;
 		hentry->have_refute = false;
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index d36e7ad3030..b25ab0e6b7c 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -1076,15 +1076,10 @@ make_oper_cache_entry(OprCacheKey *key, Oid opr_oid)
 static void
 InvalidateOprCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	OprCacheEntry *hentry;
-
 	Assert(OprCacheHash != NULL);
 
 	/* Currently we just flush all entries; hard to be smarter ... */
-	hash_seq_init(&status, OprCacheHash);
-
-	while ((hentry = (OprCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(OprCacheEntry, hentry, OprCacheHash)
 	{
 		if (hash_search(OprCacheHash,
 						&hentry->key,
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index db26e4a82b6..3ecc938723b 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -478,11 +478,7 @@ PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
 void
 DestroyPartitionDirectory(PartitionDirectory pdir)
 {
-	HASH_SEQ_STATUS status;
-	PartitionDirectoryEntry *pde;
-
-	hash_seq_init(&status, pdir->pdir_hash);
-	while ((pde = hash_seq_search(&status)) != NULL)
+	foreach_hash(PartitionDirectoryEntry, pde, pdir->pdir_hash)
 		RelationDecrementReferenceCount(pde->rel);
 }
 
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index d4a29bbf87b..0540a4e2d7f 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -1018,8 +1018,6 @@ rebuild_database_list(Oid newdb)
 		TimestampTz current_time;
 		int			millis_increment;
 		avl_dbase  *dbary;
-		avl_dbase  *db;
-		HASH_SEQ_STATUS seq;
 		int			i;
 
 		/* put all the hash elements into an array */
@@ -1030,8 +1028,7 @@ rebuild_database_list(Oid newdb)
 #endif
 
 		i = 0;
-		hash_seq_init(&seq, dbhash);
-		while ((db = hash_seq_search(&seq)) != NULL)
+		foreach_hash(avl_dbase, db, dbhash)
 			memcpy(&(dbary[i++]), db, sizeof(avl_dbase));
 
 		/* sort the array */
@@ -1056,7 +1053,7 @@ rebuild_database_list(Oid newdb)
 		 */
 		for (i = 0; i < nelems; i++)
 		{
-			db = &(dbary[i]);
+			avl_dbase  *db = &(dbary[i]);
 
 			current_time = TimestampTzPlusMilliseconds(current_time,
 													   millis_increment);
diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c
index fcf295f1df1..fef9c357a3f 100644
--- a/src/backend/replication/logical/relation.c
+++ b/src/backend/replication/logical/relation.c
@@ -64,25 +64,19 @@ static Oid	FindLogicalRepLocalIndex(Relation localrel, LogicalRepRelation *remot
 static void
 logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 {
-	LogicalRepRelMapEntry *entry;
-
 	/* Just to be sure. */
 	if (LogicalRepRelMap == NULL)
 		return;
 
 	if (reloid != InvalidOid)
 	{
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepRelMap);
-
 		/* TODO, use inverse lookup hashtable? */
-		while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepRelMapEntry, entry, LogicalRepRelMap)
 		{
 			if (entry->localreloid == reloid)
 			{
 				entry->localrelvalid = false;
-				hash_seq_term(&status);
+				foreach_hash_term(entry);
 				break;
 			}
 		}
@@ -90,11 +84,7 @@ logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 	else
 	{
 		/* invalidate all cache entries */
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepRelMap);
-
-		while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepRelMapEntry, entry, LogicalRepRelMap)
 			entry->localrelvalid = false;
 	}
 }
@@ -531,25 +521,19 @@ logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode)
 static void
 logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
 {
-	LogicalRepPartMapEntry *entry;
-
 	/* Just to be sure. */
 	if (LogicalRepPartMap == NULL)
 		return;
 
 	if (reloid != InvalidOid)
 	{
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepPartMap);
-
 		/* TODO, use inverse lookup hashtable? */
-		while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepPartMapEntry, entry, LogicalRepPartMap)
 		{
 			if (entry->relmapentry.localreloid == reloid)
 			{
 				entry->relmapentry.localrelvalid = false;
-				hash_seq_term(&status);
+				foreach_hash_term(entry);
 				break;
 			}
 		}
@@ -557,11 +541,7 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
 	else
 	{
 		/* invalidate all cache entries */
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepPartMap);
-
-		while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepPartMapEntry, entry, LogicalRepPartMap)
 			entry->relmapentry.localrelvalid = false;
 	}
 }
@@ -579,15 +559,12 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
 void
 logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel)
 {
-	HASH_SEQ_STATUS status;
-	LogicalRepPartMapEntry *part_entry;
 	LogicalRepRelMapEntry *entry;
 
 	if (LogicalRepPartMap == NULL)
 		return;
 
-	hash_seq_init(&status, LogicalRepPartMap);
-	while ((part_entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LogicalRepPartMapEntry, part_entry, LogicalRepPartMap)
 	{
 		entry = &part_entry->relmapentry;
 
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index fa93a1bbde1..add31514444 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -5247,15 +5247,11 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
 static void
 ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
 {
-	HASH_SEQ_STATUS hstat;
-	ReorderBufferToastEnt *ent;
-
 	if (txn->toast_hash == NULL)
 		return;
 
 	/* sequentially walk over the hash and free everything */
-	hash_seq_init(&hstat, txn->toast_hash);
-	while ((ent = (ReorderBufferToastEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ReorderBufferToastEnt, ent, txn->toast_hash)
 	{
 		dlist_mutable_iter it;
 
@@ -5318,11 +5314,7 @@ typedef struct RewriteMappingFile
 static void
 DisplayMapping(HTAB *tuplecid_data)
 {
-	HASH_SEQ_STATUS hstat;
-	ReorderBufferTupleCidEnt *ent;
-
-	hash_seq_init(&hstat, tuplecid_data);
-	while ((ent = (ReorderBufferTupleCidEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ReorderBufferTupleCidEnt, ent, tuplecid_data)
 	{
 		elog(DEBUG3, "mapping: node: %u/%u/%u tid: %u/%u cmin: %u, cmax: %u",
 			 ent->key.rlocator.dbOid,
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 0dcfbfdd609..507a873ca1b 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -2343,13 +2343,9 @@ get_rel_sync_entry(PGOutputData *data, Relation relation)
 static void
 cleanup_rel_sync_cache(TransactionId xid, bool is_commit)
 {
-	HASH_SEQ_STATUS hash_seq;
-	RelationSyncEntry *entry;
-
 	Assert(RelationSyncCache != NULL);
 
-	hash_seq_init(&hash_seq, RelationSyncCache);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(RelationSyncEntry, entry, RelationSyncCache)
 	{
 		/*
 		 * We can set the schema_sent flag for an entry that has committed xid
@@ -2378,8 +2374,6 @@ cleanup_rel_sync_cache(TransactionId xid, bool is_commit)
 static void
 rel_sync_cache_relation_cb(Datum arg, Oid relid)
 {
-	RelationSyncEntry *entry;
-
 	/*
 	 * We can get here if the plugin was used in SQL interface as the
 	 * RelationSyncCache is destroyed when the decoding finishes, but there is
@@ -2402,18 +2396,16 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
 		 * Getting invalidations for relations that aren't in the table is
 		 * entirely normal.  So we don't care if it's found or not.
 		 */
-		entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
-												  HASH_FIND, NULL);
+		RelationSyncEntry *entry = hash_search(RelationSyncCache, &relid,
+											   HASH_FIND, NULL);
+
 		if (entry != NULL)
 			entry->replicate_valid = false;
 	}
 	else
 	{
 		/* Whole cache must be flushed. */
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, RelationSyncCache);
-		while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(RelationSyncEntry, entry, RelationSyncCache)
 		{
 			entry->replicate_valid = false;
 		}
@@ -2428,9 +2420,6 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
 static void
 rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	RelationSyncEntry *entry;
-
 	/*
 	 * We can get here if the plugin was used in SQL interface as the
 	 * RelationSyncCache is destroyed when the decoding finishes, but there is
@@ -2443,8 +2432,7 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
 	 * We have no easy way to identify which cache entries this invalidation
 	 * event might have affected, so just mark them all invalid.
 	 */
-	hash_seq_init(&status, RelationSyncCache);
-	while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelationSyncEntry, entry, RelationSyncCache)
 	{
 		entry->replicate_valid = false;
 	}
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 2a97ecdbba6..52bc88a0b4f 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -4171,7 +4171,6 @@ CheckForBufferLeaks(void)
 {
 #ifdef USE_ASSERT_CHECKING
 	int			RefCountErrors = 0;
-	PrivateRefCountEntry *res;
 	int			i;
 	char	   *s;
 
@@ -4180,7 +4179,7 @@ CheckForBufferLeaks(void)
 	{
 		if (PrivateRefCountArrayKeys[i] != InvalidBuffer)
 		{
-			res = &PrivateRefCountArray[i];
+			PrivateRefCountEntry *res = &PrivateRefCountArray[i];
 
 			s = DebugPrintBufferRefcount(res->buffer);
 			elog(WARNING, "buffer refcount leak: %s", s);
@@ -4193,12 +4192,9 @@ CheckForBufferLeaks(void)
 	/* if necessary search the hash */
 	if (PrivateRefCountOverflowed)
 	{
-		HASH_SEQ_STATUS hstat;
-
-		hash_seq_init(&hstat, PrivateRefCountHash);
-		while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
+		foreach_hash(PrivateRefCountEntry, ent, PrivateRefCountHash)
 		{
-			s = DebugPrintBufferRefcount(res->buffer);
+			s = DebugPrintBufferRefcount(ent->buffer);
 			elog(WARNING, "buffer refcount leak: %s", s);
 			pfree(s);
 			RefCountErrors++;
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index b6a8d1ec776..5fd1e31f346 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -539,8 +539,6 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS)
 {
 #define PG_GET_SHMEM_SIZES_COLS 4
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS hstat;
-	ShmemIndexEnt *ent;
 	Size		named_allocated = 0;
 	Datum		values[PG_GET_SHMEM_SIZES_COLS];
 	bool		nulls[PG_GET_SHMEM_SIZES_COLS];
@@ -549,11 +547,9 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS)
 
 	LWLockAcquire(ShmemIndexLock, LW_SHARED);
 
-	hash_seq_init(&hstat, ShmemIndex);
-
 	/* output all allocated entries */
 	memset(nulls, 0, sizeof(nulls));
-	while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ShmemIndexEnt, ent, ShmemIndex)
 	{
 		values[0] = CStringGetTextDatum(ent->key);
 		values[1] = Int64GetDatum((char *) ent->location - (char *) ShmemSegHdr);
@@ -596,8 +592,6 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
 {
 #define PG_GET_SHMEM_NUMA_SIZES_COLS 3
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS hstat;
-	ShmemIndexEnt *ent;
 	Datum		values[PG_GET_SHMEM_NUMA_SIZES_COLS];
 	bool		nulls[PG_GET_SHMEM_NUMA_SIZES_COLS];
 	Size		os_page_size;
@@ -647,11 +641,9 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
 
 	LWLockAcquire(ShmemIndexLock, LW_SHARED);
 
-	hash_seq_init(&hstat, ShmemIndex);
-
 	/* output all allocated entries */
 	memset(nulls, 0, sizeof(nulls));
-	while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ShmemIndexEnt, ent, ShmemIndex)
 	{
 		int			i;
 		char	   *startptr,
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 7a96e3e2039..8f00a8e1f02 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -1099,13 +1099,9 @@ StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids)
 void
 StandbyReleaseAllLocks(void)
 {
-	HASH_SEQ_STATUS status;
-	RecoveryLockXidEntry *entry;
-
 	elog(DEBUG2, "release all standby locks");
 
-	hash_seq_init(&status, RecoveryLockXidHash);
-	while ((entry = hash_seq_search(&status)))
+	foreach_hash(RecoveryLockXidEntry, entry, RecoveryLockXidHash)
 	{
 		StandbyReleaseXidEntryLocks(entry);
 		hash_search(RecoveryLockXidHash, entry, HASH_REMOVE, NULL);
@@ -1123,11 +1119,7 @@ StandbyReleaseAllLocks(void)
 void
 StandbyReleaseOldLocks(TransactionId oldxid)
 {
-	HASH_SEQ_STATUS status;
-	RecoveryLockXidEntry *entry;
-
-	hash_seq_init(&status, RecoveryLockXidHash);
-	while ((entry = hash_seq_search(&status)))
+	foreach_hash(RecoveryLockXidEntry, entry, RecoveryLockXidHash)
 	{
 		Assert(TransactionIdIsValid(entry->xid));
 
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 3f20c85881a..ebfb76cc476 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -2292,11 +2292,9 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 void
 LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 {
-	HASH_SEQ_STATUS status;
 	LockMethod	lockMethodTable;
 	int			i,
 				numLockModes;
-	LOCALLOCK  *locallock;
 	LOCK	   *lock;
 	int			partition;
 	bool		have_fast_path_lwlock = false;
@@ -2329,9 +2327,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 	 * pointers.  Fast-path locks are cleaned up during the locallock table
 	 * scan, though.
 	 */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		/*
 		 * If the LOCALLOCK entry is unused, something must've gone wrong
@@ -2566,15 +2562,10 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 void
 LockReleaseSession(LOCKMETHODID lockmethodid)
 {
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
-
 	if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
 		elog(ERROR, "unrecognized lock method: %d", lockmethodid);
 
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		/* Ignore items that are not of the specified lock method */
 		if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
@@ -2598,12 +2589,7 @@ LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
 {
 	if (locallocks == NULL)
 	{
-		HASH_SEQ_STATUS status;
-		LOCALLOCK  *locallock;
-
-		hash_seq_init(&status, LockMethodLocalHash);
-
-		while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 			ReleaseLockIfHeld(locallock, false);
 	}
 	else
@@ -2697,12 +2683,7 @@ LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
 
 	if (locallocks == NULL)
 	{
-		HASH_SEQ_STATUS status;
-		LOCALLOCK  *locallock;
-
-		hash_seq_init(&status, LockMethodLocalHash);
-
-		while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 			LockReassignOwner(locallock, parent);
 	}
 	else
@@ -3381,17 +3362,13 @@ CheckForSessionAndXactLocks(void)
 	} PerLockTagEntry;
 
 	HTAB	   *lockhtab;
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
 
 	/* Create a local hash table keyed by LOCKTAG only */
 	lockhtab = hash_make(PerLockTagEntry, lock,
 						 "CheckForSessionAndXactLocks table", 256);
 
 	/* Scan local lock table to find entries for each LOCKTAG */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
 		PerLockTagEntry *hentry;
@@ -3454,16 +3431,11 @@ CheckForSessionAndXactLocks(void)
 void
 AtPrepare_Locks(void)
 {
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
-
 	/* First, verify there aren't locks of both xact and session level */
 	CheckForSessionAndXactLocks();
 
 	/* Now do the per-locallock cleanup work */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		TwoPhaseLockRecord record;
 		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
@@ -3551,8 +3523,6 @@ void
 PostPrepare_Locks(FullTransactionId fxid)
 {
 	PGPROC	   *newproc = TwoPhaseGetDummyProc(fxid, false);
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
 	LOCK	   *lock;
 	PROCLOCK   *proclock;
 	PROCLOCKTAG proclocktag;
@@ -3574,9 +3544,7 @@ PostPrepare_Locks(FullTransactionId fxid)
 	 * pointing to the same proclock, and we daren't end up with any dangling
 	 * pointers.
 	 */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
 		bool		haveSessionLock;
@@ -3772,8 +3740,6 @@ LockData *
 GetLockStatusData(void)
 {
 	LockData   *data;
-	PROCLOCK   *proclock;
-	HASH_SEQ_STATUS seqstat;
 	int			els;
 	int			el;
 	int			i;
@@ -3909,9 +3875,7 @@ GetLockStatusData(void)
 	}
 
 	/* Now scan the tables to copy the data */
-	hash_seq_init(&seqstat, LockMethodProcLockHash);
-
-	while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
+	foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash)
 	{
 		PGPROC	   *proc = proclock->tag.myProc;
 		LOCK	   *lock = proclock->tag.myLock;
@@ -4150,8 +4114,6 @@ xl_standby_lock *
 GetRunningTransactionLocks(int *nlocks)
 {
 	xl_standby_lock *accessExclusiveLocks;
-	PROCLOCK   *proclock;
-	HASH_SEQ_STATUS seqstat;
 	int			i;
 	int			index;
 	int			els;
@@ -4173,10 +4135,9 @@ GetRunningTransactionLocks(int *nlocks)
 	 */
 	accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
 
-	/* Now scan the tables to copy the data */
-	hash_seq_init(&seqstat, LockMethodProcLockHash);
-
 	/*
+	 * Now scan the tables to copy the data.
+	 *
 	 * If lock is a currently granted AccessExclusiveLock then it will have
 	 * just one proclock holder, so locks are never accessed twice in this
 	 * particular case. Don't copy this code for use elsewhere because in the
@@ -4184,7 +4145,7 @@ GetRunningTransactionLocks(int *nlocks)
 	 * non-exclusive lock types.
 	 */
 	index = 0;
-	while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
+	foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash)
 	{
 		/* make sure this definition matches the one used in LockAcquire */
 		if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
@@ -4279,18 +4240,14 @@ void
 DumpAllLocks(void)
 {
 	PGPROC	   *proc;
-	PROCLOCK   *proclock;
 	LOCK	   *lock;
-	HASH_SEQ_STATUS status;
 
 	proc = MyProc;
 
 	if (proc && proc->waitLock)
 		LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
 
-	hash_seq_init(&status, LockMethodProcLockHash);
-
-	while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash)
 	{
 		PROCLOCK_PRINT("DumpAllLocks", proclock);
 
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 83c5b77f952..c26b8d6996d 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -316,15 +316,10 @@ init_lwlock_stats(void)
 static void
 print_lwlock_stats(int code, Datum arg)
 {
-	HASH_SEQ_STATUS scan;
-	lwlock_stats *lwstats;
-
-	hash_seq_init(&scan, lwlock_stats_htab);
-
 	/* Grab an LWLock to keep different backends from mixing reports */
 	LWLockAcquire(&MainLWLockArray[0].lock, LW_EXCLUSIVE);
 
-	while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
+	foreach_hash(lwlock_stats, lwstats, lwlock_stats_htab)
 	{
 		fprintf(stderr,
 				"PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 9f3779b9aea..63e260de4eb 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -1440,8 +1440,6 @@ GetPredicateLockStatusData(void)
 	int			i;
 	int			els,
 				el;
-	HASH_SEQ_STATUS seqstat;
-	PREDICATELOCK *predlock;
 
 	data = palloc_object(PredicateLockData);
 
@@ -1461,11 +1459,9 @@ GetPredicateLockStatusData(void)
 
 
 	/* Scan through PredicateLockHash and copy contents */
-	hash_seq_init(&seqstat, PredicateLockHash);
-
 	el = 0;
 
-	while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat)))
+	foreach_hash(PREDICATELOCK, predlock, PredicateLockHash)
 	{
 		data->locktags[el] = predlock->tag.myTarget->tag;
 		data->xacts[el] = *predlock->tag.myXact;
@@ -2922,8 +2918,6 @@ exit:
 static void
 DropAllPredicateLocksFromTable(Relation relation, bool transfer)
 {
-	HASH_SEQ_STATUS seqstat;
-	PREDICATELOCKTARGET *oldtarget;
 	PREDICATELOCKTARGET *heaptarget;
 	Oid			dbId;
 	Oid			relId;
@@ -2979,9 +2973,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
 		RemoveScratchTarget(true);
 
 	/* Scan through target map */
-	hash_seq_init(&seqstat, PredicateLockTargetHash);
-
-	while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+	foreach_hash(PREDICATELOCKTARGET, oldtarget, PredicateLockTargetHash)
 	{
 		dlist_mutable_iter iter;
 
@@ -4404,8 +4396,6 @@ CheckForSerializableConflictIn(Relation relation, const ItemPointerData *tid, Bl
 void
 CheckTableForSerializableConflictIn(Relation relation)
 {
-	HASH_SEQ_STATUS seqstat;
-	PREDICATELOCKTARGET *target;
 	Oid			dbId;
 	Oid			heapId;
 	int			i;
@@ -4439,9 +4429,7 @@ CheckTableForSerializableConflictIn(Relation relation)
 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
 
 	/* Scan through target list */
-	hash_seq_init(&seqstat, PredicateLockTargetHash);
-
-	while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+	foreach_hash(PREDICATELOCKTARGET, target, PredicateLockTargetHash)
 	{
 		dlist_mutable_iter iter;
 
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index e75bc9a2a08..6fffb420905 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -409,9 +409,6 @@ smgrdestroyall(void)
 void
 smgrreleaseall(void)
 {
-	HASH_SEQ_STATUS status;
-	SMgrRelation reln;
-
 	/* Nothing to do if hashtable not set up */
 	if (SMgrRelationHash == NULL)
 		return;
@@ -419,9 +416,7 @@ smgrreleaseall(void)
 	/* seems unsafe to accept interrupts while iterating */
 	HOLD_INTERRUPTS();
 
-	hash_seq_init(&status, SMgrRelationHash);
-
-	while ((reln = (SMgrRelation) hash_seq_search(&status)) != NULL)
+	foreach_hash(SMgrRelationData, reln, SMgrRelationHash)
 	{
 		smgrrelease(reln);
 	}
diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c
index 01acb30f1bb..b2d5c76ef78 100644
--- a/src/backend/storage/sync/sync.c
+++ b/src/backend/storage/sync/sync.c
@@ -280,8 +280,6 @@ ProcessSyncRequests(void)
 {
 	static bool sync_in_progress = false;
 
-	HASH_SEQ_STATUS hstat;
-	PendingFsyncEntry *entry;
 	int			absorb_counter;
 
 	/* Statistics on sync times */
@@ -338,8 +336,7 @@ ProcessSyncRequests(void)
 	if (sync_in_progress)
 	{
 		/* prior try failed, so update any stale cycle_ctr values */
-		hash_seq_init(&hstat, pendingOps);
-		while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
+		foreach_hash(PendingFsyncEntry, entry, pendingOps)
 		{
 			entry->cycle_ctr = sync_cycle_ctr;
 		}
@@ -353,8 +350,7 @@ ProcessSyncRequests(void)
 
 	/* Now scan the hashtable for fsync requests to process */
 	absorb_counter = FSYNCS_PER_ABSORB;
-	hash_seq_init(&hstat, pendingOps);
-	while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(PendingFsyncEntry, entry, pendingOps)
 	{
 		int			failures;
 
@@ -495,13 +491,10 @@ RememberSyncRequest(const FileTag *ftag, SyncRequestType type)
 	}
 	else if (type == SYNC_FILTER_REQUEST)
 	{
-		HASH_SEQ_STATUS hstat;
-		PendingFsyncEntry *pfe;
 		ListCell   *cell;
 
 		/* Cancel matching fsync requests */
-		hash_seq_init(&hstat, pendingOps);
-		while ((pfe = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
+		foreach_hash(PendingFsyncEntry, pfe, pendingOps)
 		{
 			if (pfe->tag.handler == ftag->handler &&
 				syncsw[ftag->handler].sync_filetagmatches(ftag, &pfe->tag))
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index c2f3b63265d..1a4b328c4cd 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -149,7 +149,6 @@ compute_tsvector_stats(VacAttrStats *stats,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *lexemes_tab;
-	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
 	int			b_current;
@@ -288,7 +287,6 @@ compute_tsvector_stats(VacAttrStats *stats,
 		int			nonnull_cnt = samplerows - null_cnt;
 		int			i;
 		TrackItem **sort_table;
-		TrackItem  *item;
 		int			track_len;
 		int			cutoff_freq;
 		int			minfreq,
@@ -315,10 +313,9 @@ compute_tsvector_stats(VacAttrStats *stats,
 		i = hash_get_num_entries(lexemes_tab);	/* surely enough space */
 		sort_table = palloc_array(TrackItem *, i);
 
-		hash_seq_init(&scan_status, lexemes_tab);
 		track_len = 0;
 		maxfreq = 0;
-		while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+		foreach_hash(TrackItem, item, lexemes_tab)
 		{
 			if (item->frequency > cutoff_freq)
 			{
@@ -462,11 +459,7 @@ compute_tsvector_stats(VacAttrStats *stats,
 static void
 prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current)
 {
-	HASH_SEQ_STATUS scan_status;
-	TrackItem  *item;
-
-	hash_seq_init(&scan_status, lexemes_tab);
-	while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrackItem, item, lexemes_tab)
 	{
 		if (item->frequency + item->delta <= b_current)
 		{
diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c
index e5ac956daf0..e7b834b2408 100644
--- a/src/backend/utils/activity/wait_event.c
+++ b/src/backend/utils/activity/wait_event.c
@@ -299,8 +299,6 @@ char	  **
 GetWaitEventCustomNames(uint32 classId, int *nwaitevents)
 {
 	char	  **waiteventnames;
-	WaitEventCustomEntryByName *hentry;
-	HASH_SEQ_STATUS hash_seq;
 	int			index;
 	int			els;
 
@@ -313,10 +311,8 @@ GetWaitEventCustomNames(uint32 classId, int *nwaitevents)
 	waiteventnames = palloc_array(char *, els);
 
 	/* Now scan the hash table to copy the data */
-	hash_seq_init(&hash_seq, WaitEventCustomHashByName);
-
 	index = 0;
-	while ((hentry = (WaitEventCustomEntryByName *) hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(WaitEventCustomEntryByName, hentry, WaitEventCustomHashByName)
 	{
 		if ((hentry->wait_event_info & WAIT_EVENT_CLASS_MASK) != classId)
 			continue;
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index bdc7e2237f6..281f6800310 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -223,7 +223,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *elements_tab;
-	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
 	int			b_current;
@@ -232,10 +231,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 	int			bucket_width;
 	int			array_no;
 	int64		element_no;
-	TrackItem  *item;
 	int			slot_idx;
 	HTAB	   *count_tab;
-	DECountItem *count_item;
 
 	extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;
 
@@ -300,6 +297,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 		int64		prev_element_no = element_no;
 		int			distinct_count;
 		bool		count_item_found;
+		DECountItem *count_item;
 
 		vacuum_delay_point(true);
 
@@ -338,6 +336,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 		{
 			Datum		elem_value;
 			bool		found;
+			TrackItem  *item;
 
 			/* No null element processing other than flag setting here */
 			if (elem_nulls[j])
@@ -458,10 +457,9 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 		i = hash_get_num_entries(elements_tab); /* surely enough space */
 		sort_table = palloc_array(TrackItem *, i);
 
-		hash_seq_init(&scan_status, elements_tab);
 		track_len = 0;
 		maxfreq = 0;
-		while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+		foreach_hash(TrackItem, item, elements_tab)
 		{
 			if (item->frequency > cutoff_freq)
 			{
@@ -594,9 +592,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 			 * increasing count order.
 			 */
 			sorted_count_items = palloc_array(DECountItem *, count_items_count);
-			hash_seq_init(&scan_status, count_tab);
 			j = 0;
-			while ((count_item = (DECountItem *) hash_seq_search(&scan_status)) != NULL)
+			foreach_hash(DECountItem, count_item, count_tab)
 			{
 				sorted_count_items[j++] = count_item;
 			}
@@ -683,11 +680,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 static void
 prune_element_hashtable(HTAB *elements_tab, int b_current)
 {
-	HASH_SEQ_STATUS scan_status;
-	TrackItem  *item;
-
-	hash_seq_init(&scan_status, elements_tab);
-	while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrackItem, item, elements_tab)
 	{
 		if (item->frequency + item->delta <= b_current)
 		{
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index c15b127bdbf..f773eaffa6c 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2985,8 +2985,6 @@ RelationCacheInvalidateEntry(Oid relationId)
 void
 RelationCacheInvalidate(bool debug_discard)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	Relation	relation;
 	List	   *rebuildFirstList = NIL;
 	List	   *rebuildList = NIL;
@@ -2999,9 +2997,7 @@ RelationCacheInvalidate(bool debug_discard)
 	RelationMapInvalidateAll();
 
 	/* Phase 1 */
-	hash_seq_init(&status, RelationIdCache);
-
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 	{
 		relation = idhentry->reldesc;
 
@@ -3146,12 +3142,9 @@ AssertPendingSyncConsistency(Relation relation)
 void
 AssertPendingSyncs_RelationCache(void)
 {
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
 	Relation   *rels;
 	int			maxrels;
 	int			nrels;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -3165,8 +3158,7 @@ AssertPendingSyncs_RelationCache(void)
 	maxrels = 1;
 	rels = palloc(maxrels * sizeof(*rels));
 	nrels = 0;
-	hash_seq_init(&status, GetLockMethodLocalHash());
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, GetLockMethodLocalHash())
 	{
 		Oid			relid;
 		Relation	r;
@@ -3188,8 +3180,7 @@ AssertPendingSyncs_RelationCache(void)
 		rels[nrels++] = r;
 	}
 
-	hash_seq_init(&status, RelationIdCache);
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 		AssertPendingSyncConsistency(idhentry->reldesc);
 
 	for (i = 0; i < nrels; i++)
@@ -3217,8 +3208,6 @@ AssertPendingSyncs_RelationCache(void)
 void
 AtEOXact_RelationCache(bool isCommit)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -3241,8 +3230,7 @@ AtEOXact_RelationCache(bool isCommit)
 	 */
 	if (eoxact_list_overflowed)
 	{
-		hash_seq_init(&status, RelationIdCache);
-		while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+		foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 		{
 			AtEOXact_cleanup(idhentry->reldesc, isCommit);
 		}
@@ -3251,10 +3239,11 @@ AtEOXact_RelationCache(bool isCommit)
 	{
 		for (i = 0; i < eoxact_list_len; i++)
 		{
-			idhentry = (RelIdCacheEnt *) hash_search(RelationIdCache,
-													 &eoxact_list[i],
-													 HASH_FIND,
-													 NULL);
+			RelIdCacheEnt *idhentry = hash_search(RelationIdCache,
+												  &eoxact_list[i],
+												  HASH_FIND,
+												  NULL);
+
 			if (idhentry != NULL)
 				AtEOXact_cleanup(idhentry->reldesc, isCommit);
 		}
@@ -3370,8 +3359,6 @@ void
 AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
 						  SubTransactionId parentSubid)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -3389,8 +3376,7 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
 	 */
 	if (eoxact_list_overflowed)
 	{
-		hash_seq_init(&status, RelationIdCache);
-		while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+		foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 		{
 			AtEOSubXact_cleanup(idhentry->reldesc, isCommit,
 								mySubid, parentSubid);
@@ -3400,6 +3386,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
 	{
 		for (i = 0; i < eoxact_list_len; i++)
 		{
+			RelIdCacheEnt *idhentry;
+
 			idhentry = (RelIdCacheEnt *) hash_search(RelationIdCache,
 													 &eoxact_list[i],
 													 HASH_FIND,
@@ -4096,8 +4084,6 @@ RelationCacheInitializePhase2(void)
 void
 RelationCacheInitializePhase3(void)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	MemoryContext oldcxt;
 	bool		needNewCacheFile = !criticalSharedRelcachesBuilt;
 
@@ -4234,9 +4220,7 @@ RelationCacheInitializePhase3(void)
 	 * This is theoretically O(N^2), but the number of entries that actually
 	 * need to be fixed is small enough that it doesn't matter.
 	 */
-	hash_seq_init(&status, RelationIdCache);
-
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 	{
 		Relation	relation = idhentry->reldesc;
 		bool		restart = false;
@@ -4346,10 +4330,7 @@ RelationCacheInitializePhase3(void)
 
 		/* Now, restart the hashtable scan if needed */
 		if (restart)
-		{
-			hash_seq_term(&status);
-			hash_seq_init(&status, RelationIdCache);
-		}
+			foreach_hash_restart(idhentry, RelationIdCache);
 	}
 
 	/*
@@ -6578,8 +6559,6 @@ write_relcache_init_file(bool shared)
 	char		tempfilename[MAXPGPATH];
 	char		finalfilename[MAXPGPATH];
 	int			magic;
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -6639,9 +6618,7 @@ write_relcache_init_file(bool shared)
 	/*
 	 * Write all the appropriate reldescs (in no particular order).
 	 */
-	hash_seq_init(&status, RelationIdCache);
-
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 	{
 		Relation	rel = idhentry->reldesc;
 		Form_pg_class relform = rel->rd_rel;
diff --git a/src/backend/utils/cache/relfilenumbermap.c b/src/backend/utils/cache/relfilenumbermap.c
index 09b05dec9d0..00df7db4811 100644
--- a/src/backend/utils/cache/relfilenumbermap.c
+++ b/src/backend/utils/cache/relfilenumbermap.c
@@ -51,14 +51,10 @@ typedef struct
 static void
 RelfilenumberMapInvalidateCallback(Datum arg, Oid relid)
 {
-	HASH_SEQ_STATUS status;
-	RelfilenumberMapEntry *entry;
-
 	/* callback only gets registered after creating the hash */
 	Assert(RelfilenumberMapHash != NULL);
 
-	hash_seq_init(&status, RelfilenumberMapHash);
-	while ((entry = (RelfilenumberMapEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelfilenumberMapEntry, entry, RelfilenumberMapHash)
 	{
 		/*
 		 * If relid is InvalidOid, signaling a complete reset, we must remove
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index bf007d69ad4..44e19476c16 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -55,11 +55,7 @@ typedef struct
 static void
 InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	TableSpaceCacheEntry *spc;
-
-	hash_seq_init(&status, TableSpaceCacheHash);
-	while ((spc = (TableSpaceCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(TableSpaceCacheEntry, spc, TableSpaceCacheHash)
 	{
 		if (spc->opts)
 			pfree(spc->opts);
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index e9ae4a5f093..0101aaa6410 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -94,11 +94,8 @@ static void
 InvalidateTSCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
 {
 	HTAB	   *hash = (HTAB *) DatumGetPointer(arg);
-	HASH_SEQ_STATUS status;
-	TSAnyCacheEntry *entry;
 
-	hash_seq_init(&status, hash);
-	while ((entry = (TSAnyCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(TSAnyCacheEntry, entry, hash)
 		entry->isvalid = false;
 
 	/* Also invalidate the current-config cache if it's pg_ts_config */
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 74da418a77c..54a418a2503 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -2464,15 +2464,12 @@ TypeCacheRelCallback(Datum arg, Oid relid)
 	}
 	else
 	{
-		HASH_SEQ_STATUS status;
-
 		/*
 		 * Relid is invalid. By convention, we need to reset all composite
 		 * types in cache. Also, we should reset flags for domain types, and
 		 * we loop over all entries in hash, so, do it in a single scan.
 		 */
-		hash_seq_init(&status, TypeCacheHash);
-		while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(TypeCacheEntry, typentry, TypeCacheHash)
 		{
 			if (typentry->typtype == TYPTYPE_COMPOSITE)
 			{
@@ -2562,12 +2559,8 @@ TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
 static void
 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	TypeCacheEntry *typentry;
-
 	/* TypeCacheHash must exist, else this callback wouldn't be registered */
-	hash_seq_init(&status, TypeCacheHash);
-	while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(TypeCacheEntry, typentry, TypeCacheHash)
 	{
 		bool		hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
 
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 618d0100d38..66e313d531e 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -288,8 +288,6 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel)
 	const char *ConfFileWithError;
 	ConfigVariable *head,
 			   *tail;
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
 
 	/* Parse the main config file into a list of option names and values */
 	ConfFileWithError = ConfigFileName;
@@ -364,8 +362,7 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel)
 	 * need this so that we can tell below which ones have been removed from
 	 * the file since we last processed it.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *gconf = hentry->gucvar;
 
@@ -449,8 +446,7 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel)
 	 * boot-time defaults.  If such a variable can't be changed after startup,
 	 * report that and continue.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *gconf = hentry->gucvar;
 
@@ -839,8 +835,6 @@ struct config_generic **
 get_guc_variables(int *num_vars)
 {
 	struct config_generic **result;
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
 	int			i;
 
 	*num_vars = hash_get_num_entries(guc_hashtab);
@@ -848,8 +842,7 @@ get_guc_variables(int *num_vars)
 
 	/* Extract pointers from the hash table */
 	i = 0;
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 		result[i++] = hentry->gucvar;
 	Assert(i == *num_vars);
 
@@ -1400,9 +1393,6 @@ check_GUC_init(const struct config_generic *gconf)
 void
 InitializeGUCOptions(void)
 {
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
-
 	/*
 	 * Before log_line_prefix could possibly receive a nonempty setting, make
 	 * sure that timezone processing is minimally alive (see elog.c).
@@ -1418,8 +1408,7 @@ InitializeGUCOptions(void)
 	 * Load all variables with their compiled-in defaults, and initialize
 	 * status fields as needed.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		/* Check mapping between initial and default value */
 		Assert(check_GUC_init(hentry->gucvar));
@@ -2414,9 +2403,6 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
 void
 BeginReportingGUCOptions(void)
 {
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
-
 	/*
 	 * Don't do anything unless talking to an interactive frontend.
 	 */
@@ -2438,8 +2424,7 @@ BeginReportingGUCOptions(void)
 						PGC_INTERNAL, PGC_S_OVERRIDE);
 
 	/* Transmit initial values of interesting variables */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *conf = hentry->gucvar;
 
@@ -5142,16 +5127,13 @@ void
 MarkGUCPrefixReserved(const char *className)
 {
 	int			classLen = strlen(className);
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
 	MemoryContext oldcontext;
 
 	/*
 	 * Check for existing placeholders.  We must actually remove invalid
 	 * placeholders, else future parallel worker startups will fail.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *var = hentry->gucvar;
 
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 0a1082f4845..a31f5e0e6ca 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -602,14 +602,10 @@ PortalDrop(Portal portal, bool isTopCommit)
 void
 PortalHashTableDeleteAll(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
 	if (PortalHashTable == NULL)
 		return;
 
-	hash_seq_init(&status, PortalHashTable);
-	while ((hentry = hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -620,8 +616,7 @@ PortalHashTableDeleteAll(void)
 		PortalDrop(portal, false);
 
 		/* Restart the iteration in case that led to other drops */
-		hash_seq_term(&status);
-		hash_seq_init(&status, PortalHashTable);
+		foreach_hash_restart(hentry, PortalHashTable);
 	}
 }
 
@@ -673,12 +668,8 @@ bool
 PreCommit_Portals(bool isPrepare)
 {
 	bool		result = false;
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
 
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -760,8 +751,7 @@ PreCommit_Portals(bool isPrepare)
 		 * iteration, because we could have invoked user-defined code that
 		 * caused a drop of the next portal in the hash chain.
 		 */
-		hash_seq_term(&status);
-		hash_seq_init(&status, PortalHashTable);
+		foreach_hash_restart(hentry, PortalHashTable);
 	}
 
 	return result;
@@ -776,12 +766,7 @@ PreCommit_Portals(bool isPrepare)
 void
 AtAbort_Portals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -854,12 +839,7 @@ AtAbort_Portals(void)
 void
 AtCleanup_Portals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -913,12 +893,7 @@ AtCleanup_Portals(void)
 void
 PortalErrorCleanup(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -942,12 +917,7 @@ AtSubCommit_Portals(SubTransactionId mySubid,
 					int parentLevel,
 					ResourceOwner parentXactOwner)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -978,12 +948,7 @@ AtSubAbort_Portals(SubTransactionId mySubid,
 				   ResourceOwner myXactOwner,
 				   ResourceOwner parentXactOwner)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1088,12 +1053,7 @@ AtSubAbort_Portals(SubTransactionId mySubid,
 void
 AtSubCleanup_Portals(SubTransactionId mySubid)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1128,8 +1088,6 @@ Datum
 pg_cursor(PG_FUNCTION_ARGS)
 {
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS hash_seq;
-	PortalHashEnt *hentry;
 
 	/*
 	 * We put all the tuples into a tuplestore in one scan of the hashtable.
@@ -1137,8 +1095,7 @@ pg_cursor(PG_FUNCTION_ARGS)
 	 */
 	InitMaterializedSRF(fcinfo, 0);
 
-	hash_seq_init(&hash_seq, PortalHashTable);
-	while ((hentry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 		Datum		values[6];
@@ -1167,12 +1124,7 @@ pg_cursor(PG_FUNCTION_ARGS)
 bool
 ThereAreNoReadyPortals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1203,12 +1155,7 @@ ThereAreNoReadyPortals(void)
 void
 HoldPinnedPortals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1252,15 +1199,11 @@ HoldPinnedPortals(void)
 void
 ForgetPortalSnapshots(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
 	int			numPortalSnaps = 0;
 	int			numActiveSnaps = 0;
 
 	/* First, scan PortalHashTable and clear portalSnapshot fields */
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 49044bc808b..2cfdf239ee4 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -503,9 +503,6 @@ set_interp_require(bool trusted)
 static void
 plperl_fini(int code, Datum arg)
 {
-	HASH_SEQ_STATUS hash_seq;
-	plperl_interp_desc *interp_desc;
-
 	elog(DEBUG3, "plperl_fini");
 
 	/*
@@ -527,8 +524,7 @@ plperl_fini(int code, Datum arg)
 	plperl_destroy_interp(&plperl_held_interp);
 
 	/* Zap any fully-initialized interpreters */
-	hash_seq_init(&hash_seq, plperl_interp_hash);
-	while ((interp_desc = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(plperl_interp_desc, interp_desc, plperl_interp_hash)
 	{
 		if (interp_desc->interp)
 		{
-- 
2.52.0

