From b01d33106c75ae993657aae65eddb6c9600df452 Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <postgres@jeltef.nl>
Date: Thu, 4 Dec 2025 15:39:09 +0100
Subject: [PATCH v3 5/5] Use foreach_hash macro throughout the codebase

This starts using the new foreach_hash macro throughout the codebase.
This makes code easier to read, but obviously does introduce
backpatching problems. We can choose not to do this refactor to avoid
that. Or we could instead choose to do the refactor and then backpatch
these new macros so they can be used in backpatched code.

At the very least we should choose a few places where we use the new
macros to make sure they have coverage.
---
 contrib/dblink/dblink.c                       |  5 +-
 .../pg_stat_statements/pg_stat_statements.c   | 39 ++++-------
 contrib/pg_trgm/trgm_regexp.c                 | 18 ++---
 contrib/postgres_fdw/connection.c             | 26 ++-----
 contrib/postgres_fdw/shippable.c              |  6 +-
 src/backend/access/heap/rewriteheap.c         | 18 +----
 src/backend/access/transam/xlogutils.c        | 20 +-----
 src/backend/catalog/pg_enum.c                 | 16 ++---
 src/backend/catalog/storage.c                 | 18 ++---
 src/backend/commands/prepare.c                | 12 +---
 src/backend/commands/tablecmds.c              |  7 +-
 src/backend/optimizer/util/predtest.c         |  7 +-
 src/backend/parser/parse_oper.c               |  7 +-
 src/backend/partitioning/partdesc.c           |  6 +-
 src/backend/postmaster/autovacuum.c           |  7 +-
 src/backend/replication/logical/relation.c    | 37 ++--------
 .../replication/logical/reorderbuffer.c       | 12 +---
 src/backend/replication/pgoutput/pgoutput.c   | 24 ++-----
 src/backend/storage/buffer/bufmgr.c           | 10 +--
 src/backend/storage/ipc/shmem.c               | 12 +---
 src/backend/storage/ipc/standby.c             | 12 +---
 src/backend/storage/lmgr/lock.c               | 67 ++++---------------
 src/backend/storage/lmgr/lwlock.c             |  7 +-
 src/backend/storage/lmgr/predicate.c          | 18 +----
 src/backend/storage/smgr/smgr.c               |  7 +-
 src/backend/storage/sync/sync.c               | 13 +---
 src/backend/tsearch/ts_typanalyze.c           | 11 +--
 src/backend/utils/activity/wait_event.c       |  6 +-
 src/backend/utils/adt/array_typanalyze.c      | 17 ++---
 src/backend/utils/cache/relcache.c            | 42 ++++--------
 src/backend/utils/cache/relfilenumbermap.c    |  6 +-
 src/backend/utils/cache/spccache.c            |  6 +-
 src/backend/utils/cache/ts_cache.c            |  5 +-
 src/backend/utils/misc/guc.c                  | 30 ++-------
 src/backend/utils/mmgr/portalmem.c            | 67 +++----------------
 35 files changed, 131 insertions(+), 490 deletions(-)

diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 2951c39d69f..d621465bd3b 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -1275,14 +1275,11 @@ PG_FUNCTION_INFO_V1(dblink_get_connections);
 Datum
 dblink_get_connections(PG_FUNCTION_ARGS)
 {
-	HASH_SEQ_STATUS status;
-	remoteConnHashEnt *hentry;
 	ArrayBuildState *astate = NULL;
 
 	if (remoteConnHash)
 	{
-		hash_seq_init(&status, remoteConnHash);
-		while ((hentry = (remoteConnHashEnt *) hash_seq_search(&status)) != NULL)
+		foreach_hash(remoteConnHashEnt, hentry, remoteConnHash)
 		{
 			/* ignore it if it's not an open connection */
 			if (hentry->rconn.conn == NULL)
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 2c55b78e9ba..a2e13f0a5d8 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -741,9 +741,7 @@ pgss_shmem_shutdown(int code, Datum arg)
 	FILE	   *file;
 	char	   *qbuffer = NULL;
 	Size		qbuffer_size = 0;
-	HASH_SEQ_STATUS hash_seq;
 	int32		num_entries;
-	pgssEntry  *entry;
 
 	/* Don't try to dump during a crash. */
 	if (code)
@@ -777,8 +775,7 @@ pgss_shmem_shutdown(int code, Datum arg)
 	 * When serializing to disk, we store query texts immediately after their
 	 * entry data.  Any orphaned query texts are thereby excluded.
 	 */
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		int			len = entry->query_len;
 		char	   *qstr = qtext_fetch(entry->query_offset, len,
@@ -790,8 +787,8 @@ pgss_shmem_shutdown(int code, Datum arg)
 		if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 ||
 			fwrite(qstr, 1, len + 1, file) != len + 1)
 		{
-			/* note: we assume hash_seq_term won't change errno */
-			hash_seq_term(&hash_seq);
+			/* note: we assume foreach_hash_term won't change errno */
+			foreach_hash_term(entry);
 			goto error;
 		}
 	}
@@ -1695,8 +1692,6 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
 	Size		qbuffer_size = 0;
 	Size		extent = 0;
 	int			gc_count = 0;
-	HASH_SEQ_STATUS hash_seq;
-	pgssEntry  *entry;
 
 	/*
 	 * Superusers or roles with the privileges of pg_read_all_stats members
@@ -1825,8 +1820,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
 		}
 	}
 
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		Datum		values[PG_STAT_STATEMENTS_COLS];
 		bool		nulls[PG_STAT_STATEMENTS_COLS];
@@ -2170,9 +2164,7 @@ entry_cmp(const void *lhs, const void *rhs)
 static void
 entry_dealloc(void)
 {
-	HASH_SEQ_STATUS hash_seq;
 	pgssEntry **entries;
-	pgssEntry  *entry;
 	int			nvictims;
 	int			i;
 	Size		tottextlen;
@@ -2196,8 +2188,7 @@ entry_dealloc(void)
 	tottextlen = 0;
 	nvalidtexts = 0;
 
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		entries[i++] = entry;
 		/* "Sticky" entries get a different usage decay rate. */
@@ -2509,8 +2500,6 @@ gc_qtexts(void)
 	char	   *qbuffer;
 	Size		qbuffer_size;
 	FILE	   *qfile = NULL;
-	HASH_SEQ_STATUS hash_seq;
-	pgssEntry  *entry;
 	Size		extent;
 	int			nentries;
 
@@ -2552,8 +2541,7 @@ gc_qtexts(void)
 	extent = 0;
 	nentries = 0;
 
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		int			query_len = entry->query_len;
 		char	   *qry = qtext_fetch(entry->query_offset,
@@ -2576,7 +2564,7 @@ gc_qtexts(void)
 					(errcode_for_file_access(),
 					 errmsg("could not write file \"%s\": %m",
 							PGSS_TEXT_FILE)));
-			hash_seq_term(&hash_seq);
+			foreach_hash_term(entry);
 			goto gc_fail;
 		}
 
@@ -2643,8 +2631,7 @@ gc_fail:
 	 * Since the contents of the external file are now uncertain, mark all
 	 * hashtable entries as having invalid texts.
 	 */
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		entry->query_offset = 0;
 		entry->query_len = -1;
@@ -2708,8 +2695,6 @@ if (e) { \
 static TimestampTz
 entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 {
-	HASH_SEQ_STATUS hash_seq;
-	pgssEntry  *entry;
 	FILE	   *qfile;
 	int64		num_entries;
 	int64		num_remove = 0;
@@ -2729,6 +2714,8 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 	if (userid != 0 && dbid != 0 && queryid != INT64CONST(0))
 	{
 		/* If all the parameters are available, use the fast path. */
+		pgssEntry  *entry;
+
 		memset(&key, 0, sizeof(pgssHashKey));
 		key.userid = userid;
 		key.dbid = dbid;
@@ -2752,8 +2739,7 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 	else if (userid != 0 || dbid != 0 || queryid != INT64CONST(0))
 	{
 		/* Reset entries corresponding to valid parameters. */
-		hash_seq_init(&hash_seq, pgss_hash);
-		while ((entry = hash_seq_search(&hash_seq)) != NULL)
+		foreach_hash(pgssEntry, entry, pgss_hash)
 		{
 			if ((!userid || entry->key.userid == userid) &&
 				(!dbid || entry->key.dbid == dbid) &&
@@ -2766,8 +2752,7 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 	else
 	{
 		/* Reset all entries. */
-		hash_seq_init(&hash_seq, pgss_hash);
-		while ((entry = hash_seq_search(&hash_seq)) != NULL)
+		foreach_hash(pgssEntry, entry, pgss_hash)
 		{
 			SINGLE_ENTRY_RESET(entry);
 		}
diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c
index df7d3b70d04..53a9f885ec2 100644
--- a/contrib/pg_trgm/trgm_regexp.c
+++ b/contrib/pg_trgm/trgm_regexp.c
@@ -1449,10 +1449,8 @@ prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2)
 static bool
 selectColorTrigrams(TrgmNFA *trgmNFA)
 {
-	HASH_SEQ_STATUS scan_status;
 	int			arcsCount = trgmNFA->arcsCount,
 				i;
-	TrgmState  *state;
 	ColorTrgmInfo *colorTrgms;
 	int64		totalTrgmCount;
 	float4		totalTrgmPenalty;
@@ -1463,8 +1461,7 @@ selectColorTrigrams(TrgmNFA *trgmNFA)
 	trgmNFA->colorTrgms = colorTrgms;
 
 	i = 0;
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		ListCell   *cell;
 
@@ -1926,8 +1923,6 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
 	int			snumber = 2,
 				arcIndex,
 				arcsCount;
-	HASH_SEQ_STATUS scan_status;
-	TrgmState  *state;
 	TrgmPackArcInfo *arcs;
 	TrgmPackedArc *packedArcs;
 	TrgmPackedGraph *result;
@@ -1935,8 +1930,7 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
 				j;
 
 	/* Enumerate surviving states, giving init and fin reserved numbers */
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		while (state->parent)
 			state = state->parent;
@@ -1958,8 +1952,7 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
 	/* Collect array of all arcs */
 	arcs = palloc_array(TrgmPackArcInfo, trgmNFA->arcsCount);
 	arcIndex = 0;
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		TrgmState  *source = state;
 		ListCell   *cell;
@@ -2202,16 +2195,13 @@ static void
 printTrgmNFA(TrgmNFA *trgmNFA)
 {
 	StringInfoData buf;
-	HASH_SEQ_STATUS scan_status;
-	TrgmState  *state;
 	TrgmState  *initstate = NULL;
 
 	initStringInfo(&buf);
 
 	appendStringInfoString(&buf, "\ndigraph transformedNFA {\n");
 
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		ListCell   *cell;
 
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 042a7fb3ac1..42863b02909 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -1044,8 +1044,6 @@ pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn,
 static void
 pgfdw_xact_callback(XactEvent event, void *arg)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 	List	   *pending_entries = NIL;
 	List	   *cancel_requested = NIL;
 
@@ -1057,8 +1055,7 @@ pgfdw_xact_callback(XactEvent event, void *arg)
 	 * Scan all connection cache entries to find open remote transactions, and
 	 * close them.
 	 */
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		PGresult   *res;
 
@@ -1195,8 +1192,6 @@ static void
 pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
 					   SubTransactionId parentSubid, void *arg)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 	int			curlevel;
 	List	   *pending_entries = NIL;
 	List	   *cancel_requested = NIL;
@@ -1215,8 +1210,7 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
 	 * of the current level, and close them.
 	 */
 	curlevel = GetCurrentTransactionNestLevel();
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		char		sql[100];
 
@@ -1307,14 +1301,10 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
 static void
 pgfdw_inval_callback(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
-
 	Assert(cacheid == FOREIGNSERVEROID || cacheid == USERMAPPINGOID);
 
 	/* ConnectionHash must exist already, if we're registered */
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		/* Ignore invalid entries */
 		if (entry->conn == NULL)
@@ -2165,8 +2155,6 @@ postgres_fdw_get_connections_internal(FunctionCallInfo fcinfo,
 									  enum pgfdwVersion api_version)
 {
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 
 	InitMaterializedSRF(fcinfo, 0);
 
@@ -2189,8 +2177,7 @@ postgres_fdw_get_connections_internal(FunctionCallInfo fcinfo,
 			elog(ERROR, "incorrect number of output arguments");
 	}
 
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		ForeignServer *server;
 		Datum		values[POSTGRES_FDW_GET_CONNECTIONS_COLS] = {0};
@@ -2392,8 +2379,6 @@ postgres_fdw_disconnect_all(PG_FUNCTION_ARGS)
 static bool
 disconnect_cached_connections(Oid serverid)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 	bool		all = !OidIsValid(serverid);
 	bool		result = false;
 
@@ -2404,8 +2389,7 @@ disconnect_cached_connections(Oid serverid)
 	if (!ConnectionHash)
 		return false;
 
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		/* Ignore cache entry if no open connection right now. */
 		if (!entry->conn)
diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c
index 66d2d0b9ff4..bc5dc90e541 100644
--- a/contrib/postgres_fdw/shippable.c
+++ b/contrib/postgres_fdw/shippable.c
@@ -65,17 +65,13 @@ typedef struct
 static void
 InvalidateShippableCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	ShippableCacheEntry *entry;
-
 	/*
 	 * In principle we could flush only cache entries relating to the
 	 * pg_foreign_server entry being outdated; but that would be more
 	 * complicated, and it's probably not worth the trouble.  So for now, just
 	 * flush all entries.
 	 */
-	hash_seq_init(&status, ShippableCacheHash);
-	while ((entry = (ShippableCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(ShippableCacheEntry, entry, ShippableCacheHash)
 	{
 		if (hash_search(ShippableCacheHash,
 						&entry->key,
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index dc2c46742fd..32433fce4a4 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -290,16 +290,11 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
 void
 end_heap_rewrite(RewriteState state)
 {
-	HASH_SEQ_STATUS seq_status;
-	UnresolvedTup unresolved;
-
 	/*
 	 * Write any remaining tuples in the UnresolvedTups table. If we have any
 	 * left, they should in fact be dead, but let's err on the safe side.
 	 */
-	hash_seq_init(&seq_status, state->rs_unresolved_tups);
-
-	while ((unresolved = hash_seq_search(&seq_status)) != NULL)
+	foreach_hash(UnresolvedTupData, unresolved, state->rs_unresolved_tups)
 	{
 		ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
 		raw_heap_insert(state, unresolved->tuple);
@@ -794,8 +789,6 @@ logical_begin_heap_rewrite(RewriteState state)
 static void
 logical_heap_rewrite_flush_mappings(RewriteState state)
 {
-	HASH_SEQ_STATUS seq_status;
-	RewriteMappingFile *src;
 	dlist_mutable_iter iter;
 
 	Assert(state->rs_logical_rewrite);
@@ -807,8 +800,7 @@ logical_heap_rewrite_flush_mappings(RewriteState state)
 	elog(DEBUG1, "flushing %u logical rewrite mapping entries",
 		 state->rs_num_rewrite_mappings);
 
-	hash_seq_init(&seq_status, state->rs_logical_mappings);
-	while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
+	foreach_hash(RewriteMappingFile, src, state->rs_logical_mappings)
 	{
 		char	   *waldata;
 		char	   *waldata_start;
@@ -892,9 +884,6 @@ logical_heap_rewrite_flush_mappings(RewriteState state)
 static void
 logical_end_heap_rewrite(RewriteState state)
 {
-	HASH_SEQ_STATUS seq_status;
-	RewriteMappingFile *src;
-
 	/* done, no logical rewrite in progress */
 	if (!state->rs_logical_rewrite)
 		return;
@@ -904,8 +893,7 @@ logical_end_heap_rewrite(RewriteState state)
 		logical_heap_rewrite_flush_mappings(state);
 
 	/* Iterate over all mappings we have written and fsync the files. */
-	hash_seq_init(&seq_status, state->rs_logical_mappings);
-	while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
+	foreach_hash(RewriteMappingFile, src, state->rs_logical_mappings)
 	{
 		if (FileSync(src->vfd, WAIT_EVENT_LOGICAL_REWRITE_SYNC) != 0)
 			ereport(data_sync_elevel(ERROR),
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 9e24759f5cc..ef13f04e08d 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -160,15 +160,10 @@ static void
 forget_invalid_pages(RelFileLocator locator, ForkNumber forkno,
 					 BlockNumber minblkno)
 {
-	HASH_SEQ_STATUS status;
-	xl_invalid_page *hentry;
-
 	if (invalid_page_tab == NULL)
 		return;					/* nothing to do */
 
-	hash_seq_init(&status, invalid_page_tab);
-
-	while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
+	foreach_hash(xl_invalid_page, hentry, invalid_page_tab)
 	{
 		if (RelFileLocatorEquals(hentry->key.locator, locator) &&
 			hentry->key.forkno == forkno &&
@@ -190,15 +185,10 @@ forget_invalid_pages(RelFileLocator locator, ForkNumber forkno,
 static void
 forget_invalid_pages_db(Oid dbid)
 {
-	HASH_SEQ_STATUS status;
-	xl_invalid_page *hentry;
-
 	if (invalid_page_tab == NULL)
 		return;					/* nothing to do */
 
-	hash_seq_init(&status, invalid_page_tab);
-
-	while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
+	foreach_hash(xl_invalid_page, hentry, invalid_page_tab)
 	{
 		if (hentry->key.locator.dbOid == dbid)
 		{
@@ -228,20 +218,16 @@ XLogHaveInvalidPages(void)
 void
 XLogCheckInvalidPages(void)
 {
-	HASH_SEQ_STATUS status;
-	xl_invalid_page *hentry;
 	bool		foundone = false;
 
 	if (invalid_page_tab == NULL)
 		return;					/* nothing to do */
 
-	hash_seq_init(&status, invalid_page_tab);
-
 	/*
 	 * Our strategy is to emit WARNING messages for all remaining entries and
 	 * only PANIC after we've dumped all the available info.
 	 */
-	while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
+	foreach_hash(xl_invalid_page, hentry, invalid_page_tab)
 	{
 		report_invalid_page(WARNING, hentry->key.locator, hentry->key.forkno,
 							hentry->key.blkno, hentry->present);
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 4787a61c7d3..fca136b68ad 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -838,12 +838,10 @@ SerializeUncommittedEnums(void *space, Size size)
 	/* Write out all the OIDs from the types hash table, if there is one. */
 	if (uncommitted_enum_types)
 	{
-		HASH_SEQ_STATUS status;
-		Oid		   *value;
-
-		hash_seq_init(&status, uncommitted_enum_types);
-		while ((value = (Oid *) hash_seq_search(&status)))
+		foreach_hash(Oid, value, uncommitted_enum_types)
+		{
 			*serialized++ = *value;
+		}
 	}
 
 	/* Write out the terminator. */
@@ -852,12 +850,8 @@ SerializeUncommittedEnums(void *space, Size size)
 	/* Write out all the OIDs from the values hash table, if there is one. */
 	if (uncommitted_enum_values)
 	{
-		HASH_SEQ_STATUS status;
-		Oid		   *value;
-
-		hash_seq_init(&status, uncommitted_enum_values);
-		while ((value = (Oid *) hash_seq_search(&status)))
-			*serialized++ = *value;
+		foreach_hash(Oid, value, uncommitted_enum_values)
+			* serialized++ = *value;
 	}
 
 	/* Write out the terminator. */
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 1d13b4a1f39..dc44cae9e3e 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -594,10 +594,7 @@ void
 SerializePendingSyncs(Size maxSize, char *startAddress)
 {
 	HTAB	   *tmphash;
-	HASH_SEQ_STATUS scan;
-	PendingRelSync *sync;
 	PendingRelDelete *delete;
-	RelFileLocator *src;
 	RelFileLocator *dest = (RelFileLocator *) startAddress;
 
 	if (!pendingSyncHash)
@@ -608,9 +605,10 @@ SerializePendingSyncs(Size maxSize, char *startAddress)
 						   hash_get_num_entries(pendingSyncHash));
 
 	/* collect all rlocator from pending syncs */
-	hash_seq_init(&scan, pendingSyncHash);
-	while ((sync = (PendingRelSync *) hash_seq_search(&scan)))
+	foreach_hash(PendingRelSync, sync, pendingSyncHash)
+	{
 		(void) hash_search(tmphash, &sync->rlocator, HASH_ENTER, NULL);
+	}
 
 	/* remove deleted rnodes */
 	for (delete = pendingDeletes; delete != NULL; delete = delete->next)
@@ -618,9 +616,10 @@ SerializePendingSyncs(Size maxSize, char *startAddress)
 			(void) hash_search(tmphash, &delete->rlocator,
 							   HASH_REMOVE, NULL);
 
-	hash_seq_init(&scan, tmphash);
-	while ((src = (RelFileLocator *) hash_seq_search(&scan)))
+	foreach_hash(RelFileLocator, src, tmphash)
+	{
 		*dest++ = *src;
+	}
 
 	hash_destroy(tmphash);
 
@@ -733,8 +732,6 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
 	int			nrels = 0,
 				maxrels = 0;
 	SMgrRelation *srels = NULL;
-	HASH_SEQ_STATUS scan;
-	PendingRelSync *pendingsync;
 
 	Assert(GetCurrentTransactionNestLevel() == 1);
 
@@ -763,8 +760,7 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
 			(void) hash_search(pendingSyncHash, &pending->rlocator,
 							   HASH_REMOVE, NULL);
 
-	hash_seq_init(&scan, pendingSyncHash);
-	while ((pendingsync = (PendingRelSync *) hash_seq_search(&scan)))
+	foreach_hash(PendingRelSync, pendingsync, pendingSyncHash)
 	{
 		ForkNumber	fork;
 		BlockNumber nblocks[MAX_FORKNUM + 1];
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index cc950ce2887..7d4e4bd862c 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -522,16 +522,12 @@ DropPreparedStatement(const char *stmt_name, bool showError)
 void
 DropAllPreparedStatements(void)
 {
-	HASH_SEQ_STATUS seq;
-	PreparedStatement *entry;
-
 	/* nothing cached */
 	if (!prepared_queries)
 		return;
 
 	/* walk over cache */
-	hash_seq_init(&seq, prepared_queries);
-	while ((entry = hash_seq_search(&seq)) != NULL)
+	foreach_hash(PreparedStatement, entry, prepared_queries)
 	{
 		/* Release the plancache entry */
 		DropCachedPlan(entry->plansource);
@@ -678,11 +674,7 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
 	/* hash table might be uninitialized */
 	if (prepared_queries)
 	{
-		HASH_SEQ_STATUS hash_seq;
-		PreparedStatement *prep_stmt;
-
-		hash_seq_init(&hash_seq, prepared_queries);
-		while ((prep_stmt = hash_seq_search(&hash_seq)) != NULL)
+		foreach_hash(PreparedStatement, prep_stmt, prepared_queries)
 		{
 			TupleDesc	result_desc;
 			Datum		values[8];
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 7f0fb263772..9a55662af55 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2236,14 +2236,9 @@ ExecuteTruncateGuts(List *explicit_rels,
 	/* Now go through the hash table, and truncate foreign tables */
 	if (ft_htab)
 	{
-		ForeignTruncateInfo *ft_info;
-		HASH_SEQ_STATUS seq;
-
-		hash_seq_init(&seq, ft_htab);
-
 		PG_TRY();
 		{
-			while ((ft_info = hash_seq_search(&seq)) != NULL)
+			foreach_hash(ForeignTruncateInfo, ft_info, ft_htab)
 			{
 				FdwRoutine *routine = GetFdwRoutineByServerId(ft_info->serverid);
 
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 43291f128fd..fa52f7a776d 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -2342,15 +2342,10 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
 static void
 InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	OprProofCacheEntry *hentry;
-
 	Assert(OprProofCacheHash != NULL);
 
 	/* Currently we just reset all entries; hard to be smarter ... */
-	hash_seq_init(&status, OprProofCacheHash);
-
-	while ((hentry = (OprProofCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(OprProofCacheEntry, hentry, OprProofCacheHash)
 	{
 		hentry->have_implic = false;
 		hentry->have_refute = false;
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index f9588865cfc..91382962d94 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -1076,15 +1076,10 @@ make_oper_cache_entry(OprCacheKey *key, Oid opr_oid)
 static void
 InvalidateOprCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	OprCacheEntry *hentry;
-
 	Assert(OprCacheHash != NULL);
 
 	/* Currently we just flush all entries; hard to be smarter ... */
-	hash_seq_init(&status, OprCacheHash);
-
-	while ((hentry = (OprCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(OprCacheEntry, hentry, OprCacheHash)
 	{
 		if (hash_search(OprCacheHash,
 						&hentry->key,
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index b3fbd781494..908b0e39b8c 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -478,11 +478,7 @@ PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
 void
 DestroyPartitionDirectory(PartitionDirectory pdir)
 {
-	HASH_SEQ_STATUS status;
-	PartitionDirectoryEntry *pde;
-
-	hash_seq_init(&status, pdir->pdir_hash);
-	while ((pde = hash_seq_search(&status)) != NULL)
+	foreach_hash(PartitionDirectoryEntry, pde, pdir->pdir_hash)
 		RelationDecrementReferenceCount(pde->rel);
 }
 
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index f1b6b1a24a9..b0df7fba690 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -1021,8 +1021,6 @@ rebuild_database_list(Oid newdb)
 		TimestampTz current_time;
 		int			millis_increment;
 		avl_dbase  *dbary;
-		avl_dbase  *db;
-		HASH_SEQ_STATUS seq;
 		int			i;
 
 		/* put all the hash elements into an array */
@@ -1033,8 +1031,7 @@ rebuild_database_list(Oid newdb)
 #endif
 
 		i = 0;
-		hash_seq_init(&seq, dbhash);
-		while ((db = hash_seq_search(&seq)) != NULL)
+		foreach_hash(avl_dbase, db, dbhash)
 			memcpy(&(dbary[i++]), db, sizeof(avl_dbase));
 
 		/* sort the array */
@@ -1059,7 +1056,7 @@ rebuild_database_list(Oid newdb)
 		 */
 		for (i = 0; i < nelems; i++)
 		{
-			db = &(dbary[i]);
+			avl_dbase  *db = &(dbary[i]);
 
 			current_time = TimestampTzPlusMilliseconds(current_time,
 													   millis_increment);
diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c
index c4af85d74f1..57f5872984b 100644
--- a/src/backend/replication/logical/relation.c
+++ b/src/backend/replication/logical/relation.c
@@ -63,25 +63,19 @@ static Oid	FindLogicalRepLocalIndex(Relation localrel, LogicalRepRelation *remot
 static void
 logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 {
-	LogicalRepRelMapEntry *entry;
-
 	/* Just to be sure. */
 	if (LogicalRepRelMap == NULL)
 		return;
 
 	if (reloid != InvalidOid)
 	{
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepRelMap);
-
 		/* TODO, use inverse lookup hashtable? */
-		while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepRelMapEntry, entry, LogicalRepRelMap)
 		{
 			if (entry->localreloid == reloid)
 			{
 				entry->localrelvalid = false;
-				hash_seq_term(&status);
+				foreach_hash_term(entry);
 				break;
 			}
 		}
@@ -89,11 +83,7 @@ logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 	else
 	{
 		/* invalidate all cache entries */
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepRelMap);
-
-		while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepRelMapEntry, entry, LogicalRepRelMap)
 			entry->localrelvalid = false;
 	}
 }
@@ -530,25 +520,19 @@ logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode)
 static void
 logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
 {
-	LogicalRepPartMapEntry *entry;
-
 	/* Just to be sure. */
 	if (LogicalRepPartMap == NULL)
 		return;
 
 	if (reloid != InvalidOid)
 	{
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepPartMap);
-
 		/* TODO, use inverse lookup hashtable? */
-		while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepPartMapEntry, entry, LogicalRepPartMap)
 		{
 			if (entry->relmapentry.localreloid == reloid)
 			{
 				entry->relmapentry.localrelvalid = false;
-				hash_seq_term(&status);
+				foreach_hash_term(entry);
 				break;
 			}
 		}
@@ -556,11 +540,7 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
 	else
 	{
 		/* invalidate all cache entries */
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepPartMap);
-
-		while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepPartMapEntry, entry, LogicalRepPartMap)
 			entry->relmapentry.localrelvalid = false;
 	}
 }
@@ -578,15 +558,12 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
 void
 logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel)
 {
-	HASH_SEQ_STATUS status;
-	LogicalRepPartMapEntry *part_entry;
 	LogicalRepRelMapEntry *entry;
 
 	if (LogicalRepPartMap == NULL)
 		return;
 
-	hash_seq_init(&status, LogicalRepPartMap);
-	while ((part_entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LogicalRepPartMapEntry, part_entry, LogicalRepPartMap)
 	{
 		entry = &part_entry->relmapentry;
 
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index e09516d3d8b..562fd743d94 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -5250,15 +5250,11 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
 static void
 ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
 {
-	HASH_SEQ_STATUS hstat;
-	ReorderBufferToastEnt *ent;
-
 	if (txn->toast_hash == NULL)
 		return;
 
 	/* sequentially walk over the hash and free everything */
-	hash_seq_init(&hstat, txn->toast_hash);
-	while ((ent = (ReorderBufferToastEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ReorderBufferToastEnt, ent, txn->toast_hash)
 	{
 		dlist_mutable_iter it;
 
@@ -5321,11 +5317,7 @@ typedef struct RewriteMappingFile
 static void
 DisplayMapping(HTAB *tuplecid_data)
 {
-	HASH_SEQ_STATUS hstat;
-	ReorderBufferTupleCidEnt *ent;
-
-	hash_seq_init(&hstat, tuplecid_data);
-	while ((ent = (ReorderBufferTupleCidEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ReorderBufferTupleCidEnt, ent, tuplecid_data)
 	{
 		elog(DEBUG3, "mapping: node: %u/%u/%u tid: %u/%u cmin: %u, cmax: %u",
 			 ent->key.rlocator.dbOid,
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 6dea24ff0a6..3750a880aed 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -2339,13 +2339,9 @@ get_rel_sync_entry(PGOutputData *data, Relation relation)
 static void
 cleanup_rel_sync_cache(TransactionId xid, bool is_commit)
 {
-	HASH_SEQ_STATUS hash_seq;
-	RelationSyncEntry *entry;
-
 	Assert(RelationSyncCache != NULL);
 
-	hash_seq_init(&hash_seq, RelationSyncCache);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(RelationSyncEntry, entry, RelationSyncCache)
 	{
 		/*
 		 * We can set the schema_sent flag for an entry that has committed xid
@@ -2374,8 +2370,6 @@ cleanup_rel_sync_cache(TransactionId xid, bool is_commit)
 static void
 rel_sync_cache_relation_cb(Datum arg, Oid relid)
 {
-	RelationSyncEntry *entry;
-
 	/*
 	 * We can get here if the plugin was used in SQL interface as the
 	 * RelationSyncCache is destroyed when the decoding finishes, but there is
@@ -2398,18 +2392,16 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
 		 * Getting invalidations for relations that aren't in the table is
 		 * entirely normal.  So we don't care if it's found or not.
 		 */
-		entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
-												  HASH_FIND, NULL);
+		RelationSyncEntry *entry = hash_search(RelationSyncCache, &relid,
+											   HASH_FIND, NULL);
+
 		if (entry != NULL)
 			entry->replicate_valid = false;
 	}
 	else
 	{
 		/* Whole cache must be flushed. */
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, RelationSyncCache);
-		while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(RelationSyncEntry, entry, RelationSyncCache)
 		{
 			entry->replicate_valid = false;
 		}
@@ -2424,9 +2416,6 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
 static void
 rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	RelationSyncEntry *entry;
-
 	/*
 	 * We can get here if the plugin was used in SQL interface as the
 	 * RelationSyncCache is destroyed when the decoding finishes, but there is
@@ -2439,8 +2428,7 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
 	 * We have no easy way to identify which cache entries this invalidation
 	 * event might have affected, so just mark them all invalid.
 	 */
-	hash_seq_init(&status, RelationSyncCache);
-	while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelationSyncEntry, entry, RelationSyncCache)
 	{
 		entry->replicate_valid = false;
 	}
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 781738956af..edcf917fb7d 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -4057,14 +4057,13 @@ CheckForBufferLeaks(void)
 {
 #ifdef USE_ASSERT_CHECKING
 	int			RefCountErrors = 0;
-	PrivateRefCountEntry *res;
 	int			i;
 	char	   *s;
 
 	/* check the array */
 	for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
 	{
-		res = &PrivateRefCountArray[i];
+		PrivateRefCountEntry *res = &PrivateRefCountArray[i];
 
 		if (res->buffer != InvalidBuffer)
 		{
@@ -4079,12 +4078,9 @@ CheckForBufferLeaks(void)
 	/* if necessary search the hash */
 	if (PrivateRefCountOverflowed)
 	{
-		HASH_SEQ_STATUS hstat;
-
-		hash_seq_init(&hstat, PrivateRefCountHash);
-		while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
+		foreach_hash(PrivateRefCountEntry, ent, PrivateRefCountHash)
 		{
-			s = DebugPrintBufferRefcount(res->buffer);
+			s = DebugPrintBufferRefcount(ent->buffer);
 			elog(WARNING, "buffer refcount leak: %s", s);
 			pfree(s);
 			RefCountErrors++;
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 2d4c518dec2..8164a8c9174 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -539,8 +539,6 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS)
 {
 #define PG_GET_SHMEM_SIZES_COLS 4
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS hstat;
-	ShmemIndexEnt *ent;
 	Size		named_allocated = 0;
 	Datum		values[PG_GET_SHMEM_SIZES_COLS];
 	bool		nulls[PG_GET_SHMEM_SIZES_COLS];
@@ -549,11 +547,9 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS)
 
 	LWLockAcquire(ShmemIndexLock, LW_SHARED);
 
-	hash_seq_init(&hstat, ShmemIndex);
-
 	/* output all allocated entries */
 	memset(nulls, 0, sizeof(nulls));
-	while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ShmemIndexEnt, ent, ShmemIndex)
 	{
 		values[0] = CStringGetTextDatum(ent->key);
 		values[1] = Int64GetDatum((char *) ent->location - (char *) ShmemSegHdr);
@@ -596,8 +592,6 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
 {
 #define PG_GET_SHMEM_NUMA_SIZES_COLS 3
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS hstat;
-	ShmemIndexEnt *ent;
 	Datum		values[PG_GET_SHMEM_NUMA_SIZES_COLS];
 	bool		nulls[PG_GET_SHMEM_NUMA_SIZES_COLS];
 	Size		os_page_size;
@@ -647,11 +641,9 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
 
 	LWLockAcquire(ShmemIndexLock, LW_SHARED);
 
-	hash_seq_init(&hstat, ShmemIndex);
-
 	/* output all allocated entries */
 	memset(nulls, 0, sizeof(nulls));
-	while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ShmemIndexEnt, ent, ShmemIndex)
 	{
 		int			i;
 		char	   *startptr,
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 7b9f7e218aa..c63feaa9dd7 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -1099,13 +1099,9 @@ StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids)
 void
 StandbyReleaseAllLocks(void)
 {
-	HASH_SEQ_STATUS status;
-	RecoveryLockXidEntry *entry;
-
 	elog(DEBUG2, "release all standby locks");
 
-	hash_seq_init(&status, RecoveryLockXidHash);
-	while ((entry = hash_seq_search(&status)))
+	foreach_hash(RecoveryLockXidEntry, entry, RecoveryLockXidHash)
 	{
 		StandbyReleaseXidEntryLocks(entry);
 		hash_search(RecoveryLockXidHash, entry, HASH_REMOVE, NULL);
@@ -1123,11 +1119,7 @@ StandbyReleaseAllLocks(void)
 void
 StandbyReleaseOldLocks(TransactionId oldxid)
 {
-	HASH_SEQ_STATUS status;
-	RecoveryLockXidEntry *entry;
-
-	hash_seq_init(&status, RecoveryLockXidHash);
-	while ((entry = hash_seq_search(&status)))
+	foreach_hash(RecoveryLockXidEntry, entry, RecoveryLockXidHash)
 	{
 		Assert(TransactionIdIsValid(entry->xid));
 
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index e5ea54e7824..02852ee1a4c 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -2292,11 +2292,9 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 void
 LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 {
-	HASH_SEQ_STATUS status;
 	LockMethod	lockMethodTable;
 	int			i,
 				numLockModes;
-	LOCALLOCK  *locallock;
 	LOCK	   *lock;
 	int			partition;
 	bool		have_fast_path_lwlock = false;
@@ -2329,9 +2327,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 	 * pointers.  Fast-path locks are cleaned up during the locallock table
 	 * scan, though.
 	 */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		/*
 		 * If the LOCALLOCK entry is unused, something must've gone wrong
@@ -2566,15 +2562,10 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 void
 LockReleaseSession(LOCKMETHODID lockmethodid)
 {
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
-
 	if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
 		elog(ERROR, "unrecognized lock method: %d", lockmethodid);
 
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		/* Ignore items that are not of the specified lock method */
 		if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
@@ -2598,12 +2589,7 @@ LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
 {
 	if (locallocks == NULL)
 	{
-		HASH_SEQ_STATUS status;
-		LOCALLOCK  *locallock;
-
-		hash_seq_init(&status, LockMethodLocalHash);
-
-		while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 			ReleaseLockIfHeld(locallock, false);
 	}
 	else
@@ -2697,12 +2683,7 @@ LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
 
 	if (locallocks == NULL)
 	{
-		HASH_SEQ_STATUS status;
-		LOCALLOCK  *locallock;
-
-		hash_seq_init(&status, LockMethodLocalHash);
-
-		while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 			LockReassignOwner(locallock, parent);
 	}
 	else
@@ -3383,17 +3364,13 @@ CheckForSessionAndXactLocks(void)
 	} PerLockTagEntry;
 
 	HTAB	   *lockhtab;
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
 
 	/* Create a local hash table keyed by LOCKTAG only */
 	lockhtab = hash_make(PerLockTagEntry, lock,
 						 "CheckForSessionAndXactLocks table", 256);
 
 	/* Scan local lock table to find entries for each LOCKTAG */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
 		PerLockTagEntry *hentry;
@@ -3456,16 +3433,11 @@ CheckForSessionAndXactLocks(void)
 void
 AtPrepare_Locks(void)
 {
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
-
 	/* First, verify there aren't locks of both xact and session level */
 	CheckForSessionAndXactLocks();
 
 	/* Now do the per-locallock cleanup work */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		TwoPhaseLockRecord record;
 		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
@@ -3553,8 +3525,6 @@ void
 PostPrepare_Locks(FullTransactionId fxid)
 {
 	PGPROC	   *newproc = TwoPhaseGetDummyProc(fxid, false);
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
 	LOCK	   *lock;
 	PROCLOCK   *proclock;
 	PROCLOCKTAG proclocktag;
@@ -3576,9 +3546,7 @@ PostPrepare_Locks(FullTransactionId fxid)
 	 * pointing to the same proclock, and we daren't end up with any dangling
 	 * pointers.
 	 */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
 		bool		haveSessionLock;
@@ -3774,8 +3742,6 @@ LockData *
 GetLockStatusData(void)
 {
 	LockData   *data;
-	PROCLOCK   *proclock;
-	HASH_SEQ_STATUS seqstat;
 	int			els;
 	int			el;
 	int			i;
@@ -3911,9 +3877,7 @@ GetLockStatusData(void)
 	}
 
 	/* Now scan the tables to copy the data */
-	hash_seq_init(&seqstat, LockMethodProcLockHash);
-
-	while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
+	foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash)
 	{
 		PGPROC	   *proc = proclock->tag.myProc;
 		LOCK	   *lock = proclock->tag.myLock;
@@ -4152,8 +4116,6 @@ xl_standby_lock *
 GetRunningTransactionLocks(int *nlocks)
 {
 	xl_standby_lock *accessExclusiveLocks;
-	PROCLOCK   *proclock;
-	HASH_SEQ_STATUS seqstat;
 	int			i;
 	int			index;
 	int			els;
@@ -4175,10 +4137,9 @@ GetRunningTransactionLocks(int *nlocks)
 	 */
 	accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
 
-	/* Now scan the tables to copy the data */
-	hash_seq_init(&seqstat, LockMethodProcLockHash);
-
 	/*
+	 * Now scan the tables to copy the data.
+	 *
 	 * If lock is a currently granted AccessExclusiveLock then it will have
 	 * just one proclock holder, so locks are never accessed twice in this
 	 * particular case. Don't copy this code for use elsewhere because in the
@@ -4186,7 +4147,7 @@ GetRunningTransactionLocks(int *nlocks)
 	 * non-exclusive lock types.
 	 */
 	index = 0;
-	while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
+	foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash)
 	{
 		/* make sure this definition matches the one used in LockAcquire */
 		if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
@@ -4281,18 +4242,14 @@ void
 DumpAllLocks(void)
 {
 	PGPROC	   *proc;
-	PROCLOCK   *proclock;
 	LOCK	   *lock;
-	HASH_SEQ_STATUS status;
 
 	proc = MyProc;
 
 	if (proc && proc->waitLock)
 		LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
 
-	hash_seq_init(&status, LockMethodProcLockHash);
-
-	while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash)
 	{
 		PROCLOCK_PRINT("DumpAllLocks", proclock);
 
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 1f749beadce..b7466d3ca18 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -316,15 +316,10 @@ init_lwlock_stats(void)
 static void
 print_lwlock_stats(int code, Datum arg)
 {
-	HASH_SEQ_STATUS scan;
-	lwlock_stats *lwstats;
-
-	hash_seq_init(&scan, lwlock_stats_htab);
-
 	/* Grab an LWLock to keep different backends from mixing reports */
 	LWLockAcquire(&MainLWLockArray[0].lock, LW_EXCLUSIVE);
 
-	while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
+	foreach_hash(lwlock_stats, lwstats, lwlock_stats_htab)
 	{
 		fprintf(stderr,
 				"PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index c2d85032716..6b602ba89ea 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -1440,8 +1440,6 @@ GetPredicateLockStatusData(void)
 	int			i;
 	int			els,
 				el;
-	HASH_SEQ_STATUS seqstat;
-	PREDICATELOCK *predlock;
 
 	data = (PredicateLockData *) palloc(sizeof(PredicateLockData));
 
@@ -1463,11 +1461,9 @@ GetPredicateLockStatusData(void)
 
 
 	/* Scan through PredicateLockHash and copy contents */
-	hash_seq_init(&seqstat, PredicateLockHash);
-
 	el = 0;
 
-	while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat)))
+	foreach_hash(PREDICATELOCK, predlock, PredicateLockHash)
 	{
 		data->locktags[el] = predlock->tag.myTarget->tag;
 		data->xacts[el] = *predlock->tag.myXact;
@@ -2924,8 +2920,6 @@ exit:
 static void
 DropAllPredicateLocksFromTable(Relation relation, bool transfer)
 {
-	HASH_SEQ_STATUS seqstat;
-	PREDICATELOCKTARGET *oldtarget;
 	PREDICATELOCKTARGET *heaptarget;
 	Oid			dbId;
 	Oid			relId;
@@ -2981,9 +2975,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
 		RemoveScratchTarget(true);
 
 	/* Scan through target map */
-	hash_seq_init(&seqstat, PredicateLockTargetHash);
-
-	while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+	foreach_hash(PREDICATELOCKTARGET, oldtarget, PredicateLockTargetHash)
 	{
 		dlist_mutable_iter iter;
 
@@ -4406,8 +4398,6 @@ CheckForSerializableConflictIn(Relation relation, const ItemPointerData *tid, Bl
 void
 CheckTableForSerializableConflictIn(Relation relation)
 {
-	HASH_SEQ_STATUS seqstat;
-	PREDICATELOCKTARGET *target;
 	Oid			dbId;
 	Oid			heapId;
 	int			i;
@@ -4441,9 +4431,7 @@ CheckTableForSerializableConflictIn(Relation relation)
 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
 
 	/* Scan through target list */
-	hash_seq_init(&seqstat, PredicateLockTargetHash);
-
-	while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+	foreach_hash(PREDICATELOCKTARGET, target, PredicateLockTargetHash)
 	{
 		dlist_mutable_iter iter;
 
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index d4fa6a144f1..b2ae58662f4 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -409,9 +409,6 @@ smgrdestroyall(void)
 void
 smgrreleaseall(void)
 {
-	HASH_SEQ_STATUS status;
-	SMgrRelation reln;
-
 	/* Nothing to do if hashtable not set up */
 	if (SMgrRelationHash == NULL)
 		return;
@@ -419,9 +416,7 @@ smgrreleaseall(void)
 	/* seems unsafe to accept interrupts while iterating */
 	HOLD_INTERRUPTS();
 
-	hash_seq_init(&status, SMgrRelationHash);
-
-	while ((reln = (SMgrRelation) hash_seq_search(&status)) != NULL)
+	foreach_hash(SMgrRelationData, reln, SMgrRelationHash)
 	{
 		smgrrelease(reln);
 	}
diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c
index 42b7fc11288..f65c52eb9a2 100644
--- a/src/backend/storage/sync/sync.c
+++ b/src/backend/storage/sync/sync.c
@@ -280,8 +280,6 @@ ProcessSyncRequests(void)
 {
 	static bool sync_in_progress = false;
 
-	HASH_SEQ_STATUS hstat;
-	PendingFsyncEntry *entry;
 	int			absorb_counter;
 
 	/* Statistics on sync times */
@@ -338,8 +336,7 @@ ProcessSyncRequests(void)
 	if (sync_in_progress)
 	{
 		/* prior try failed, so update any stale cycle_ctr values */
-		hash_seq_init(&hstat, pendingOps);
-		while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
+		foreach_hash(PendingFsyncEntry, entry, pendingOps)
 		{
 			entry->cycle_ctr = sync_cycle_ctr;
 		}
@@ -353,8 +350,7 @@ ProcessSyncRequests(void)
 
 	/* Now scan the hashtable for fsync requests to process */
 	absorb_counter = FSYNCS_PER_ABSORB;
-	hash_seq_init(&hstat, pendingOps);
-	while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(PendingFsyncEntry, entry, pendingOps)
 	{
 		int			failures;
 
@@ -495,13 +491,10 @@ RememberSyncRequest(const FileTag *ftag, SyncRequestType type)
 	}
 	else if (type == SYNC_FILTER_REQUEST)
 	{
-		HASH_SEQ_STATUS hstat;
-		PendingFsyncEntry *pfe;
 		ListCell   *cell;
 
 		/* Cancel matching fsync requests */
-		hash_seq_init(&hstat, pendingOps);
-		while ((pfe = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
+		foreach_hash(PendingFsyncEntry, pfe, pendingOps)
 		{
 			if (pfe->tag.handler == ftag->handler &&
 				syncsw[ftag->handler].sync_filetagmatches(ftag, &pfe->tag))
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index a9bc6901a12..439f8788003 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -149,7 +149,6 @@ compute_tsvector_stats(VacAttrStats *stats,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *lexemes_tab;
-	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
 	int			b_current;
@@ -288,7 +287,6 @@ compute_tsvector_stats(VacAttrStats *stats,
 		int			nonnull_cnt = samplerows - null_cnt;
 		int			i;
 		TrackItem **sort_table;
-		TrackItem  *item;
 		int			track_len;
 		int			cutoff_freq;
 		int			minfreq,
@@ -315,10 +313,9 @@ compute_tsvector_stats(VacAttrStats *stats,
 		i = hash_get_num_entries(lexemes_tab);	/* surely enough space */
 		sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);
 
-		hash_seq_init(&scan_status, lexemes_tab);
 		track_len = 0;
 		maxfreq = 0;
-		while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+		foreach_hash(TrackItem, item, lexemes_tab)
 		{
 			if (item->frequency > cutoff_freq)
 			{
@@ -462,11 +459,7 @@ compute_tsvector_stats(VacAttrStats *stats,
 static void
 prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current)
 {
-	HASH_SEQ_STATUS scan_status;
-	TrackItem  *item;
-
-	hash_seq_init(&scan_status, lexemes_tab);
-	while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrackItem, item, lexemes_tab)
 	{
 		if (item->frequency + item->delta <= b_current)
 		{
diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c
index 35aa0208fc9..d0b90f40d32 100644
--- a/src/backend/utils/activity/wait_event.c
+++ b/src/backend/utils/activity/wait_event.c
@@ -299,8 +299,6 @@ char	  **
 GetWaitEventCustomNames(uint32 classId, int *nwaitevents)
 {
 	char	  **waiteventnames;
-	WaitEventCustomEntryByName *hentry;
-	HASH_SEQ_STATUS hash_seq;
 	int			index;
 	int			els;
 
@@ -313,10 +311,8 @@ GetWaitEventCustomNames(uint32 classId, int *nwaitevents)
 	waiteventnames = palloc(els * sizeof(char *));
 
 	/* Now scan the hash table to copy the data */
-	hash_seq_init(&hash_seq, WaitEventCustomHashByName);
-
 	index = 0;
-	while ((hentry = (WaitEventCustomEntryByName *) hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(WaitEventCustomEntryByName, hentry, WaitEventCustomHashByName)
 	{
 		if ((hentry->wait_event_info & WAIT_EVENT_CLASS_MASK) != classId)
 			continue;
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index cc310c044b3..a5be7e4c1c3 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -223,7 +223,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *elements_tab;
-	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
 	int			b_current;
@@ -232,10 +231,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 	int			bucket_width;
 	int			array_no;
 	int64		element_no;
-	TrackItem  *item;
 	int			slot_idx;
 	HTAB	   *count_tab;
-	DECountItem *count_item;
 
 	extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;
 
@@ -300,6 +297,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 		int64		prev_element_no = element_no;
 		int			distinct_count;
 		bool		count_item_found;
+		DECountItem *count_item;
 
 		vacuum_delay_point(true);
 
@@ -338,6 +336,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 		{
 			Datum		elem_value;
 			bool		found;
+			TrackItem  *item;
 
 			/* No null element processing other than flag setting here */
 			if (elem_nulls[j])
@@ -458,10 +457,9 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 		i = hash_get_num_entries(elements_tab); /* surely enough space */
 		sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);
 
-		hash_seq_init(&scan_status, elements_tab);
 		track_len = 0;
 		maxfreq = 0;
-		while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+		foreach_hash(TrackItem, item, elements_tab)
 		{
 			if (item->frequency > cutoff_freq)
 			{
@@ -595,9 +593,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 			 */
 			sorted_count_items = (DECountItem **)
 				palloc(sizeof(DECountItem *) * count_items_count);
-			hash_seq_init(&scan_status, count_tab);
 			j = 0;
-			while ((count_item = (DECountItem *) hash_seq_search(&scan_status)) != NULL)
+			foreach_hash(DECountItem, count_item, count_tab)
 			{
 				sorted_count_items[j++] = count_item;
 			}
@@ -684,11 +681,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 static void
 prune_element_hashtable(HTAB *elements_tab, int b_current)
 {
-	HASH_SEQ_STATUS scan_status;
-	TrackItem  *item;
-
-	hash_seq_init(&scan_status, elements_tab);
-	while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrackItem, item, elements_tab)
 	{
 		if (item->frequency + item->delta <= b_current)
 		{
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index b76d6bbd5c9..eb441a81552 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2990,8 +2990,6 @@ RelationCacheInvalidateEntry(Oid relationId)
 void
 RelationCacheInvalidate(bool debug_discard)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	Relation	relation;
 	List	   *rebuildFirstList = NIL;
 	List	   *rebuildList = NIL;
@@ -3004,9 +3002,7 @@ RelationCacheInvalidate(bool debug_discard)
 	RelationMapInvalidateAll();
 
 	/* Phase 1 */
-	hash_seq_init(&status, RelationIdCache);
-
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 	{
 		relation = idhentry->reldesc;
 
@@ -3151,12 +3147,9 @@ AssertPendingSyncConsistency(Relation relation)
 void
 AssertPendingSyncs_RelationCache(void)
 {
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
 	Relation   *rels;
 	int			maxrels;
 	int			nrels;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -3170,8 +3163,7 @@ AssertPendingSyncs_RelationCache(void)
 	maxrels = 1;
 	rels = palloc(maxrels * sizeof(*rels));
 	nrels = 0;
-	hash_seq_init(&status, GetLockMethodLocalHash());
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, GetLockMethodLocalHash())
 	{
 		Oid			relid;
 		Relation	r;
@@ -3193,8 +3185,7 @@ AssertPendingSyncs_RelationCache(void)
 		rels[nrels++] = r;
 	}
 
-	hash_seq_init(&status, RelationIdCache);
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 		AssertPendingSyncConsistency(idhentry->reldesc);
 
 	for (i = 0; i < nrels; i++)
@@ -3222,8 +3213,6 @@ AssertPendingSyncs_RelationCache(void)
 void
 AtEOXact_RelationCache(bool isCommit)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -3246,8 +3235,7 @@ AtEOXact_RelationCache(bool isCommit)
 	 */
 	if (eoxact_list_overflowed)
 	{
-		hash_seq_init(&status, RelationIdCache);
-		while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+		foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 		{
 			AtEOXact_cleanup(idhentry->reldesc, isCommit);
 		}
@@ -3256,10 +3244,11 @@ AtEOXact_RelationCache(bool isCommit)
 	{
 		for (i = 0; i < eoxact_list_len; i++)
 		{
-			idhentry = (RelIdCacheEnt *) hash_search(RelationIdCache,
-													 &eoxact_list[i],
-													 HASH_FIND,
-													 NULL);
+			RelIdCacheEnt *idhentry = hash_search(RelationIdCache,
+												  &eoxact_list[i],
+												  HASH_FIND,
+												  NULL);
+
 			if (idhentry != NULL)
 				AtEOXact_cleanup(idhentry->reldesc, isCommit);
 		}
@@ -3375,8 +3364,6 @@ void
 AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
 						  SubTransactionId parentSubid)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -3394,8 +3381,7 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
 	 */
 	if (eoxact_list_overflowed)
 	{
-		hash_seq_init(&status, RelationIdCache);
-		while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+		foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 		{
 			AtEOSubXact_cleanup(idhentry->reldesc, isCommit,
 								mySubid, parentSubid);
@@ -3405,6 +3391,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
 	{
 		for (i = 0; i < eoxact_list_len; i++)
 		{
+			RelIdCacheEnt *idhentry;
+
 			idhentry = (RelIdCacheEnt *) hash_search(RelationIdCache,
 													 &eoxact_list[i],
 													 HASH_FIND,
@@ -6583,8 +6571,6 @@ write_relcache_init_file(bool shared)
 	char		tempfilename[MAXPGPATH];
 	char		finalfilename[MAXPGPATH];
 	int			magic;
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -6644,9 +6630,7 @@ write_relcache_init_file(bool shared)
 	/*
 	 * Write all the appropriate reldescs (in no particular order).
 	 */
-	hash_seq_init(&status, RelationIdCache);
-
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 	{
 		Relation	rel = idhentry->reldesc;
 		Form_pg_class relform = rel->rd_rel;
diff --git a/src/backend/utils/cache/relfilenumbermap.c b/src/backend/utils/cache/relfilenumbermap.c
index 69aba463eae..2b9ca8b36a1 100644
--- a/src/backend/utils/cache/relfilenumbermap.c
+++ b/src/backend/utils/cache/relfilenumbermap.c
@@ -51,14 +51,10 @@ typedef struct
 static void
 RelfilenumberMapInvalidateCallback(Datum arg, Oid relid)
 {
-	HASH_SEQ_STATUS status;
-	RelfilenumberMapEntry *entry;
-
 	/* callback only gets registered after creating the hash */
 	Assert(RelfilenumberMapHash != NULL);
 
-	hash_seq_init(&status, RelfilenumberMapHash);
-	while ((entry = (RelfilenumberMapEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelfilenumberMapEntry, entry, RelfilenumberMapHash)
 	{
 		/*
 		 * If relid is InvalidOid, signaling a complete reset, we must remove
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index bcfd6a9f0d2..bd6a161e7de 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -55,11 +55,7 @@ typedef struct
 static void
 InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	TableSpaceCacheEntry *spc;
-
-	hash_seq_init(&status, TableSpaceCacheHash);
-	while ((spc = (TableSpaceCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(TableSpaceCacheEntry, spc, TableSpaceCacheHash)
 	{
 		if (spc->opts)
 			pfree(spc->opts);
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index cfa6d8b09a4..5446de5eb1a 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -94,11 +94,8 @@ static void
 InvalidateTSCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
 {
 	HTAB	   *hash = (HTAB *) DatumGetPointer(arg);
-	HASH_SEQ_STATUS status;
-	TSAnyCacheEntry *entry;
 
-	hash_seq_init(&status, hash);
-	while ((entry = (TSAnyCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(TSAnyCacheEntry, entry, hash)
 		entry->isvalid = false;
 
 	/* Also invalidate the current-config cache if it's pg_ts_config */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index f3897c953d3..f9ce7c5ddf3 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -288,8 +288,6 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel)
 	const char *ConfFileWithError;
 	ConfigVariable *head,
 			   *tail;
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
 
 	/* Parse the main config file into a list of option names and values */
 	ConfFileWithError = ConfigFileName;
@@ -364,8 +362,7 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel)
 	 * need this so that we can tell below which ones have been removed from
 	 * the file since we last processed it.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *gconf = hentry->gucvar;
 
@@ -449,8 +446,7 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel)
 	 * boot-time defaults.  If such a variable can't be changed after startup,
 	 * report that and continue.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *gconf = hentry->gucvar;
 
@@ -839,8 +835,6 @@ struct config_generic **
 get_guc_variables(int *num_vars)
 {
 	struct config_generic **result;
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
 	int			i;
 
 	*num_vars = hash_get_num_entries(guc_hashtab);
@@ -848,8 +842,7 @@ get_guc_variables(int *num_vars)
 
 	/* Extract pointers from the hash table */
 	i = 0;
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 		result[i++] = hentry->gucvar;
 	Assert(i == *num_vars);
 
@@ -1400,9 +1393,6 @@ check_GUC_init(const struct config_generic *gconf)
 void
 InitializeGUCOptions(void)
 {
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
-
 	/*
 	 * Before log_line_prefix could possibly receive a nonempty setting, make
 	 * sure that timezone processing is minimally alive (see elog.c).
@@ -1418,8 +1408,7 @@ InitializeGUCOptions(void)
 	 * Load all variables with their compiled-in defaults, and initialize
 	 * status fields as needed.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		/* Check mapping between initial and default value */
 		Assert(check_GUC_init(hentry->gucvar));
@@ -2414,9 +2403,6 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
 void
 BeginReportingGUCOptions(void)
 {
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
-
 	/*
 	 * Don't do anything unless talking to an interactive frontend.
 	 */
@@ -2438,8 +2424,7 @@ BeginReportingGUCOptions(void)
 						PGC_INTERNAL, PGC_S_OVERRIDE);
 
 	/* Transmit initial values of interesting variables */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *conf = hentry->gucvar;
 
@@ -5142,16 +5127,13 @@ void
 MarkGUCPrefixReserved(const char *className)
 {
 	int			classLen = strlen(className);
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
 	MemoryContext oldcontext;
 
 	/*
 	 * Check for existing placeholders.  We must actually remove invalid
 	 * placeholders, else future parallel worker startups will fail.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *var = hentry->gucvar;
 
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 8241d32f0c6..4a7b10d219b 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -776,12 +776,7 @@ PreCommit_Portals(bool isPrepare)
 void
 AtAbort_Portals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -853,12 +848,7 @@ AtAbort_Portals(void)
 void
 AtCleanup_Portals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -912,12 +902,7 @@ AtCleanup_Portals(void)
 void
 PortalErrorCleanup(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -941,12 +926,7 @@ AtSubCommit_Portals(SubTransactionId mySubid,
 					int parentLevel,
 					ResourceOwner parentXactOwner)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -977,12 +957,7 @@ AtSubAbort_Portals(SubTransactionId mySubid,
 				   ResourceOwner myXactOwner,
 				   ResourceOwner parentXactOwner)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1087,12 +1062,7 @@ AtSubAbort_Portals(SubTransactionId mySubid,
 void
 AtSubCleanup_Portals(SubTransactionId mySubid)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1127,8 +1097,6 @@ Datum
 pg_cursor(PG_FUNCTION_ARGS)
 {
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS hash_seq;
-	PortalHashEnt *hentry;
 
 	/*
 	 * We put all the tuples into a tuplestore in one scan of the hashtable.
@@ -1136,8 +1104,7 @@ pg_cursor(PG_FUNCTION_ARGS)
 	 */
 	InitMaterializedSRF(fcinfo, 0);
 
-	hash_seq_init(&hash_seq, PortalHashTable);
-	while ((hentry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 		Datum		values[6];
@@ -1166,12 +1133,7 @@ pg_cursor(PG_FUNCTION_ARGS)
 bool
 ThereAreNoReadyPortals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1202,12 +1164,7 @@ ThereAreNoReadyPortals(void)
 void
 HoldPinnedPortals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1251,15 +1208,11 @@ HoldPinnedPortals(void)
 void
 ForgetPortalSnapshots(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
 	int			numPortalSnaps = 0;
 	int			numActiveSnaps = 0;
 
 	/* First, scan PortalHashTable and clear portalSnapshot fields */
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
-- 
2.52.0

