From 7e4b0778c4144cfaeecd0bd874f9bc5202649c75 Mon Sep 17 00:00:00 2001
From: Ashutosh Bapat <ashutosh.bapat.oss@gmail.com>
Date: Fri, 13 Feb 2026 14:41:58 +0530
Subject: [PATCH 3/3] WIP: Resizable shared memory structures

This commits adds suppport for resizable shared memory structures.

Suggested by: Heikki Linnakangas
Author: Ashutosh Bapat <ashutosh.bapat.oss@gmail.com>
---
 doc/src/sgml/system-views.sgml                |   9 +
 src/backend/catalog/system_views.sql          |   7 +
 src/backend/port/sysv_shmem.c                 | 601 ++++++++++++++----
 src/backend/port/win32_shmem.c                | 296 +++++----
 src/backend/postmaster/launch_backend.c       |  39 +-
 src/backend/storage/ipc/ipci.c                |  27 +-
 src/backend/storage/ipc/shmem.c               | 558 ++++++++++++++--
 src/backend/storage/lmgr/predicate.c          |   2 +-
 src/include/catalog/pg_proc.dat               |  12 +-
 src/include/storage/pg_shmem.h                |  49 +-
 src/include/storage/shmem.h                   |  14 +-
 src/test/modules/Makefile                     |   1 +
 src/test/modules/meson.build                  |   1 +
 src/test/modules/resizable_shmem/Makefile     |  23 +
 .../expected/resizable_shmem.out              |  89 +++
 src/test/modules/resizable_shmem/meson.build  |  37 ++
 .../resizable_shmem/resizable_shmem--1.0.sql  |  22 +
 .../modules/resizable_shmem/resizable_shmem.c | 201 ++++++
 .../resizable_shmem/resizable_shmem.conf      |   1 +
 .../resizable_shmem/resizable_shmem.control   |   5 +
 .../specs/resizable_shmem.spec                |  37 ++
 src/test/regress/expected/rules.out           |   9 +-
 src/tools/pgindent/typedefs.list              |   3 +
 23 files changed, 1657 insertions(+), 386 deletions(-)
 create mode 100644 src/test/modules/resizable_shmem/Makefile
 create mode 100644 src/test/modules/resizable_shmem/expected/resizable_shmem.out
 create mode 100644 src/test/modules/resizable_shmem/meson.build
 create mode 100644 src/test/modules/resizable_shmem/resizable_shmem--1.0.sql
 create mode 100644 src/test/modules/resizable_shmem/resizable_shmem.c
 create mode 100644 src/test/modules/resizable_shmem/resizable_shmem.conf
 create mode 100644 src/test/modules/resizable_shmem/resizable_shmem.control
 create mode 100644 src/test/modules/resizable_shmem/specs/resizable_shmem.spec

diff --git a/doc/src/sgml/system-views.sgml b/doc/src/sgml/system-views.sgml
index 8b4abef8c68..60be5a67859 100644
--- a/doc/src/sgml/system-views.sgml
+++ b/doc/src/sgml/system-views.sgml
@@ -4216,6 +4216,15 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
       </para></entry>
      </row>
 
+     <row>
+      <entry role="catalog_table_entry"><para role="column_definition">
+       <structfield>segment</structfield> <type>text</type>
+      </para>
+      <para>
+       The name of the shared memory segment containing the allocation.
+      </para></entry>
+     </row>
+
      <row>
       <entry role="catalog_table_entry"><para role="column_definition">
        <structfield>off</structfield> <type>int8</type>
diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql
index 7553f31fef0..bc11589aeab 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -668,6 +668,13 @@ GRANT SELECT ON pg_shmem_allocations TO pg_read_all_stats;
 REVOKE EXECUTE ON FUNCTION pg_get_shmem_allocations() FROM PUBLIC;
 GRANT EXECUTE ON FUNCTION pg_get_shmem_allocations() TO pg_read_all_stats;
 
+CREATE VIEW pg_shmem_segments AS
+    SELECT * FROM pg_get_shmem_segments();
+
+REVOKE ALL ON pg_shmem_segments FROM PUBLIC;
+GRANT SELECT ON pg_shmem_segments TO pg_read_all_stats;
+REVOKE EXECUTE ON FUNCTION pg_get_shmem_segments() FROM PUBLIC;
+GRANT EXECUTE ON FUNCTION pg_get_shmem_segments() TO pg_read_all_stats;
 CREATE VIEW pg_shmem_allocations_numa AS
     SELECT * FROM pg_get_shmem_allocations_numa();
 
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 2e3886cf9fe..d54b9dcc1d2 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -39,7 +39,17 @@
 #include "utils/guc_hooks.h"
 #include "utils/pidfile.h"
 
-
+/*
+ * TODO: The first two sentences in the first paragraph below make me feel like
+ * we should have only one SysV segment. Is that true? Needs investigation.
+ */
+/*
+ * TODO: third paragraph should mention that we use memfd_create to create
+ * shared memory segment, and possibly there's a way to share that segment
+ * between two processes using the file descriptor instead of going through SysV
+ * shared memory segment. So one day EXEC_BACKEND can also use anonymous shared
+ * memory.
+ */
 /*
  * As of PostgreSQL 9.3, we normally allocate only a very small amount of
  * System V shared memory, and only for the purposes of providing an
@@ -91,12 +101,46 @@ typedef enum
 	SHMSTATE_UNATTACHED,		/* pertinent to DataDir, no attached PIDs */
 } IpcMemoryState;
 
+/*
+ * Resizable anonymous shared memory segments.
+ *
+ * We need to place resizable anonymous shared memory mappings in such a way,
+ * that there will be gaps between them in the address space. Those gaps have to
+ * be large enough to resize the mapping up to given maximum size, without
+ * counting towards the total memory consumption.
+ *
+ * To achieve this, for each shared memory segment we first create an anonymous
+ * file using memfd_create, which will accommodate actual shared memory mapping
+ * content. Then we create a mapping for this file using mmap reserving maximum
+ * address space using MAP_NORESERVE (prevents the space from being counted
+ * against memory limits). The mapping serves as an address space reservation,
+ * into which shared memory segment can be resized.
+ */
+
+PGUsedShmemInfo UsedShmemInfo[NUM_MEMORY_MAPPINGS];
+
+ /*
+  * Structure to hold anonymous shared memory segment properties.
+  */
+typedef struct AnonShmemData
+{
+	int			fd;				/* fd for the backing anon file */
+	void	   *addr;			/* Pointer to the start of the mapped memory */
+	Size		size;			/* Size of the mapped memory */
 
-unsigned long UsedShmemSegID = 0;
-void	   *UsedShmemSegAddr = NULL;
+} AnonShmemData;
 
-static Size AnonymousShmemSize;
-static void *AnonymousShmem = NULL;
+AnonShmemData AnonShmemInfo[NUM_MEMORY_MAPPINGS];
+
+/*
+ * TODO: the whole huge pages logic needs a revision. It's fragile right now.
+ *
+ * Flag telling that we have decided to use huge pages.
+ *
+ * XXX: It's possible to use GetConfigOption("huge_pages_status", false, false)
+ * instead, but it feels like an overkill.
+ */
+static bool huge_pages_on = false;
 
 static void *InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size);
 static void IpcMemoryDetach(int status, Datum shmaddr);
@@ -250,6 +294,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
 	}
 
 	/* Register on-exit routine to delete the new segment */
+	/* TODO: This happens for every shared memory that gets created. Do it only once. */
 	on_shmem_exit(IpcMemoryDelete, Int32GetDatum(shmid));
 
 	/* OK, should be able to attach to the segment */
@@ -260,6 +305,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
 			 shmid, requestedAddress, PG_SHMAT_FLAGS);
 
 	/* Register on-exit routine to detach new segment before deleting */
+	/* TODO: This happens for every shared memory that gets created. Do it only once. */
 	on_shmem_exit(IpcMemoryDetach, PointerGetDatum(memAddress));
 
 	/*
@@ -471,19 +517,20 @@ PGSharedMemoryAttach(IpcMemoryId shmId,
  * hugepage sizes, we might want to think about more invasive strategies,
  * such as increasing shared_buffers to absorb the extra space.
  *
- * Returns the (real, assumed or config provided) page size into
- * *hugepagesize, and the hugepage-related mmap flags to use into
- * *mmap_flags if requested by the caller.  If huge pages are not supported,
- * *hugepagesize and *mmap_flags are set to 0.
+ * Returns the (real, assumed or config provided) page size into *hugepagesize,
+ * the hugepage-related mmap and memfd flags to use into *mmap_flags and
+ * *memfd_flags if requested by the caller. If huge pages are not supported,
+ * *hugepagesize, *mmap_flags and *memfd_flags are set to 0.
  */
 void
-GetHugePageSize(Size *hugepagesize, int *mmap_flags)
+GetHugePageSize(Size *hugepagesize, int *mmap_flags, int *memfd_flags)
 {
 #ifdef MAP_HUGETLB
 
 	Size		default_hugepagesize = 0;
 	Size		hugepagesize_local = 0;
 	int			mmap_flags_local = 0;
+	int			memfd_flags_local = 0;
 
 	/*
 	 * System-dependent code to find out the default huge page size.
@@ -542,6 +589,7 @@ GetHugePageSize(Size *hugepagesize, int *mmap_flags)
 	}
 
 	mmap_flags_local = MAP_HUGETLB;
+	memfd_flags_local = MFD_HUGETLB;
 
 	/*
 	 * On recent enough Linux, also include the explicit page size, if
@@ -556,11 +604,22 @@ GetHugePageSize(Size *hugepagesize, int *mmap_flags)
 	}
 #endif
 
+#if defined(MFD_HUGE_MASK) && defined(MFD_HUGE_SHIFT)
+	if (hugepagesize_local != default_hugepagesize)
+	{
+		int			shift = pg_ceil_log2_64(hugepagesize_local);
+
+		memfd_flags_local |= (shift & MFD_HUGE_MASK) << MFD_HUGE_SHIFT;
+	}
+#endif
+
 	/* assign the results found */
 	if (mmap_flags)
 		*mmap_flags = mmap_flags_local;
 	if (hugepagesize)
 		*hugepagesize = hugepagesize_local;
+	if (memfd_flags)
+		*memfd_flags = memfd_flags_local;
 
 #else
 
@@ -568,6 +627,8 @@ GetHugePageSize(Size *hugepagesize, int *mmap_flags)
 		*hugepagesize = 0;
 	if (mmap_flags)
 		*mmap_flags = 0;
+	if (memfd_flags)
+		*memfd_flags = 0;
 
 #endif							/* MAP_HUGETLB */
 }
@@ -589,84 +650,231 @@ check_huge_page_size(int *newval, void **extra, GucSource source)
 	return true;
 }
 
+/*
+ * Wrapper around posix_fallocate() to allocate memory for a given shared memory
+ * segment.
+ *
+ * Performs retry on EINTR, and raises error upon failure.
+ */
+static void
+shmem_fallocate(int fd, const char *mapping_name, Size size, int elevel)
+{
+#if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__)
+	int			ret;
+
+
+	/*
+	 * If there is not enough memory, trying to access a hole in address space
+	 * will cause SIGBUS. If supported, avoid that by allocating memory
+	 * upfront.
+	 *
+	 * We still use a traditional EINTR retry loop to handle SIGCONT.
+	 * posix_fallocate() doesn't restart automatically, and we don't want this
+	 * to fail if you attach a debugger.
+	 */
+	do
+	{
+		ret = posix_fallocate(fd, 0, size);
+	} while (ret == EINTR);
+
+	if (ret != 0)
+	{
+		ereport(elevel,
+				(errmsg("segment[%s]: could not allocate space for anonymous file: %s",
+						mapping_name, strerror(ret)),
+				 (ret == ENOMEM) ?
+				 errhint("This error usually means that PostgreSQL's request "
+						 "for a shared memory segment exceeded available memory, "
+						 "swap space, or huge pages. To reduce the request size "
+						 "(currently %zu bytes), reduce PostgreSQL's shared "
+						 "memory usage, perhaps by reducing \"shared_buffers\" or "
+						 "\"max_connections\".",
+						 size) : 0));
+	}
+#endif
+}
+
 /*
  * Creates an anonymous mmap()ed shared memory segment.
  *
- * Pass the requested size in *size.  This function will modify *size to the
- * actual size of the allocation, if it ends up allocating a segment that is
- * larger than requested.
+ * This function will modify mapping size to the actual size of the allocation,
+ * if it ends up allocating a segment that is larger than requested. If needed,
+ * it also rounds up the mapping reserved size to be a multiple of huge page
+ * size.
+ *
+ * Note that we do not fallback from huge pages to regular pages in this
+ * function, this decision was already made in ReserveAnonymousMemory and we
+ * stick to it.
+ *
+ * TODO: Update the prologue to be consistent with the code.
  */
-static void *
-CreateAnonymousSegment(Size *size)
+static void
+CreateAnonymousSegment(int segment_id, const char *segname, Size init_size, Size max_size)
 {
-	Size		allocsize = *size;
 	void	   *ptr = MAP_FAILED;
-	int			mmap_errno = 0;
-	int			mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_HASSEMAPHORE;
+	int			mmap_flags = MAP_SHARED | MAP_HASSEMAPHORE | MAP_NORESERVE;
+	AnonShmemData *anonshmem = &AnonShmemInfo[segment_id];
+	int			memfd_flags = 0;
 
 #ifndef MAP_HUGETLB
-	/* PGSharedMemoryCreate should have dealt with this case */
-	Assert(huge_pages != HUGE_PAGES_ON);
+	/* PrepareHugePages should have dealt with this case */
+	Assert(huge_pages != HUGE_PAGES_ON && !huge_pages_on);
 #else
-	if (huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY)
+	if (huge_pages_on)
 	{
-		/*
-		 * Round up the request size to a suitable large value.
-		 */
 		Size		hugepagesize;
 		int			huge_mmap_flags;
+		int			huge_memfd_flags;
 
-		GetHugePageSize(&hugepagesize, &huge_mmap_flags);
+		/* Make sure nothing is messed up */
+		Assert(huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY);
 
-		if (allocsize % hugepagesize != 0)
-			allocsize = add_size(allocsize, hugepagesize - (allocsize % hugepagesize));
+		/* Round up the request size to a suitable large value */
+		GetHugePageSize(&hugepagesize, &huge_mmap_flags, &huge_memfd_flags);
+		init_size = align_size(init_size, hugepagesize);
+		max_size = align_size(max_size, hugepagesize);
 
-		ptr = mmap(NULL, allocsize, PROT_READ | PROT_WRITE,
-				   mmap_flags | huge_mmap_flags, -1, 0);
-		mmap_errno = errno;
-		if (huge_pages == HUGE_PAGES_TRY && ptr == MAP_FAILED)
-			elog(DEBUG1, "mmap(%zu) with MAP_HUGETLB failed, huge pages disabled: %m",
-				 allocsize);
+		/* Verify that the new size is withing the reserved boundaries */
+		Assert(max_size >= init_size);
+
+		mmap_flags = mmap_flags | huge_mmap_flags;
+		memfd_flags = memfd_flags | huge_memfd_flags;
 	}
 #endif
 
 	/*
-	 * Report whether huge pages are in use.  This needs to be tracked before
-	 * the second mmap() call if attempting to use huge pages failed
-	 * previously.
+	 * Prepare an anonymous file backing the segment. Its size will be
+	 * specified later via ftruncate.
+	 *
+	 * The file behaves like a regular file, but lives in memory. Once all
+	 * references to the file are dropped,  it is automatically released.
+	 * Anonymous memory is used for all backing pages of the file, thus it has
+	 * the same semantics as anonymous memory allocations using mmap with the
+	 * MAP_ANONYMOUS flag.
+	 *
+	 * TODO: Need a configuration test for memfd_create.
+	 *
+	 * TODO: Earlier releases did not use file backed shared memory segments.
+	 * By setting bit 1 in /proc/<PID>/coredump_filter, those shared memory
+	 * segments could be dumped to the core file. But dumping file backed
+	 * shared memory segments requires bit 3 to be set. We need to document
+	 * this change in the release notes.
 	 */
-	SetConfigOption("huge_pages_status", (ptr == MAP_FAILED) ? "off" : "on",
-					PGC_INTERNAL, PGC_S_DYNAMIC_DEFAULT);
+	anonshmem->fd = memfd_create(segname, memfd_flags);
+	if (anonshmem->fd == -1)
+		ereport(FATAL,
+				(errmsg("segment[%s]: could not create anonymous shared memory file: %m",
+						segname)));
 
-	if (ptr == MAP_FAILED && huge_pages != HUGE_PAGES_ON)
-	{
-		/*
-		 * Use the original size, not the rounded-up value, when falling back
-		 * to non-huge pages.
-		 */
-		allocsize = *size;
-		ptr = mmap(NULL, allocsize, PROT_READ | PROT_WRITE,
-				   mmap_flags, -1, 0);
-		mmap_errno = errno;
-	}
+	elog(DEBUG1, "segment[%s]: mmap(%zu)", segname, init_size);
 
+	/*
+	 * Reserve maximum required address space for future expansion of this
+	 * memory segment. The whole address space will be setup for read/write
+	 * access, so that memory allocated to this address space can be read or
+	 * written to even if it is resized in the future using just ftruncate.
+	 * MAP_NORESERVE alone should ensure that no memory is allocated. But when
+	 * using huge pages, the memory is allocated at mmap time if PROT_WRITE |
+	 * PROT_READ is used. Hence we create the mapping with PROT_NONE first and
+	 * then use mprotect to set the required permissions.
+	 */
+	ptr = mmap(NULL, max_size, PROT_NONE, mmap_flags, anonshmem->fd, 0);
 	if (ptr == MAP_FAILED)
+		ereport(FATAL,
+				(errmsg("segment[%s]: could not map anonymous shared memory: %m",
+						segname)));
+
+	if (mprotect(ptr, max_size, PROT_READ | PROT_WRITE) == -1)
+		ereport(FATAL,
+				(errmsg("segment[%s]: could not update anonymous shared memory permissions: %m",
+						segname)));
+
+
+	/*
+	 * Resize the backing file to the required size. On platforms where it is
+	 * supported, we also allocate the required memory upfront. On other
+	 * platform the memory upto the size of file will be allocated on demand.
+	 */
+	if (ftruncate(anonshmem->fd, init_size) == -1)
 	{
-		errno = mmap_errno;
+		int			save_errno = errno;
+
+		close(anonshmem->fd);
+		anonshmem->fd = -1;
+
+		errno = save_errno;
 		ereport(FATAL,
-				(errmsg("could not map anonymous shared memory: %m"),
-				 (mmap_errno == ENOMEM) ?
+				(errmsg("segment[%s]: could not truncate anonymous file to size %zu: %m",
+						segname, init_size),
+				 (save_errno == ENOMEM) ?
 				 errhint("This error usually means that PostgreSQL's request "
 						 "for a shared memory segment exceeded available memory, "
 						 "swap space, or huge pages. To reduce the request size "
 						 "(currently %zu bytes), reduce PostgreSQL's shared "
 						 "memory usage, perhaps by reducing \"shared_buffers\" or "
 						 "\"max_connections\".",
-						 allocsize) : 0));
+						 init_size) : 0));
+	}
+	/* TODO: This might make the backend startup slower. */
+	shmem_fallocate(anonshmem->fd, segname, init_size, FATAL);
+
+	anonshmem->addr = ptr;
+	anonshmem->size = max_size;
+}
+
+/*
+ * PrepareHugePages
+ *
+ * Figure out if there are enough huge pages to allocate all shared memory
+ * segments, and report that information via huge_pages_status and
+ * huge_pages_on. It needs to be called before creating shared memory segments.
+ *
+ * It is necessary to maintain the same semantic (simple on/off) for
+ * huge_pages_status, even if there are multiple shared memory segments: all
+ * segments either use huge pages or not, there is no mix of segments with
+ * different page size. The latter might be actually beneficial, in particular
+ * because only some segments may require large amount of memory, but for now
+ * we go with a simple solution.
+ */
+void
+PrepareHugePages()
+{
+	void	   *ptr = MAP_FAILED;
+	Size	total_size;
+	int			mmap_flags = (MAP_SHARED | MAP_HASSEMAPHORE);
+
+	/* TODO: this doesn't count the memory in on-demand shared memory segments */
+	total_size = CalculateShmemSize();
+
+	/* Complain if hugepages demanded but we can't possibly support them */
+#if !defined(MAP_HUGETLB)
+	if (huge_pages == HUGE_PAGES_ON)
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("huge pages not supported on this platform")));
+#else
+	if (huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY)
+	{
+		Size		hugepagesize;
+		int			huge_mmap_flags;
+
+		GetHugePageSize(&hugepagesize, &huge_mmap_flags, NULL);
+
+		/* Map total amount of memory to test its availability. */
+		elog(DEBUG1, "reserving space: probe mmap(%zu) with MAP_HUGETLB",
+			 total_size);
+		ptr = mmap(NULL, total_size, PROT_NONE,
+				   mmap_flags | MAP_ANONYMOUS | huge_mmap_flags, -1, 0);
 	}
+#endif
 
-	*size = allocsize;
-	return ptr;
+	/*
+	 * Report whether huge pages are in use. This needs to be tracked before
+	 * creating shared memory segments.
+	 */
+	SetConfigOption("huge_pages_status", (ptr == MAP_FAILED) ? "off" : "on",
+					PGC_INTERNAL, PGC_S_DYNAMIC_DEFAULT);
+	huge_pages_on = ptr != MAP_FAILED;
 }
 
 /*
@@ -676,16 +884,86 @@ CreateAnonymousSegment(Size *size)
 static void
 AnonymousShmemDetach(int status, Datum arg)
 {
-	/* Release anonymous shared memory block, if any. */
-	if (AnonymousShmem != NULL)
+	for (int i = 0; i < NUM_MEMORY_MAPPINGS; i++)
 	{
-		if (munmap(AnonymousShmem, AnonymousShmemSize) < 0)
-			elog(LOG, "munmap(%p, %zu) failed: %m",
-				 AnonymousShmem, AnonymousShmemSize);
-		AnonymousShmem = NULL;
+		AnonShmemData *segment = &AnonShmemInfo[i];
+
+		/* Release anonymous shared memory block, if any. */
+		if (segment->addr != NULL)
+		{
+			Assert(segment->fd != -1);
+
+			if (munmap(segment->addr, segment->size) < 0)
+				elog(LOG, "munmap(%p, %zu) failed: %m",
+					 segment->addr, segment->size);
+			segment->addr = NULL;
+			close(segment->fd);
+			segment->fd = -1;
+		}
 	}
 }
 
+/*
+ * Resize all shared memory segments based on the new shared_buffers value (saved
+ * in ShmemCtrl area). The actual segment resizing is done via ftruncate, which
+ * will fail if there is not sufficient space to expand the anon file.
+ *
+ * TODO: Rename this to BufferShmemResize() or something. Only buffer manager's
+ * memory should be resized in this function.
+ *
+ * TODO: This function changes the amount of shared memory used. So it should
+ * also update the show only GUCs shared_memory_size and
+ * shared_memory_size_in_huge_pages in all backends. SetConfigOption() may be
+ * used for that. But it's not clear whether is_reload parameter is safe to use
+ * while resizing is going on; also at what stage it should be done.
+ */
+static bool
+AnonymousShmemResize(int segment_id, const char *segname, Size new_size, bool expanding)
+{
+	Size		hugepagesize;
+	AnonShmemData *anonshmem = &AnonShmemInfo[segment_id];
+
+	if (anonshmem->fd == -1)
+		ereport(ERROR,
+				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+				 errmsg("segment[%s]: only anonymous (mmaped) file backed segments can be resized",
+						segname)));
+
+#ifndef MAP_HUGETLB
+	/* PrepareHugePages should have dealt with this case */
+	Assert(huge_pages != HUGE_PAGES_ON && !huge_pages_on);
+#else
+	if (huge_pages_on)
+	{
+		Assert(huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY);
+		GetHugePageSize(&hugepagesize, NULL, NULL);
+		new_size = align_size(new_size, hugepagesize);
+	}
+#endif
+	Assert(anonshmem->addr);
+
+	/*
+	 * Size of the reserved address space should not change, since it depends
+	 * upon MaxNBuffers, which can be changed only on restart.
+	 */
+	Assert(anonshmem->size >= new_size);
+
+	/*
+	 * Resize the backing file to resize the allocated memory, and allocate
+	 * more memory on supported platforms if required.
+	 */
+	if (ftruncate(anonshmem->fd, new_size) == -1)
+		ereport(ERROR,
+				(errcode(ERRCODE_SYSTEM_ERROR),
+				 errmsg("could not truncate anonymous file segment for \"%s\": %m",
+						segname)));
+	/* TODO: This might make the program slower. */
+	if (expanding)
+		shmem_fallocate(anonshmem->fd, segname, new_size, ERROR);
+
+	return true;
+}
+
 /*
  * PGSharedMemoryCreate
  *
@@ -699,7 +977,7 @@ AnonymousShmemDetach(int status, Datum arg)
  * postmaster or backend.
  */
 PGShmemHeader *
-PGSharedMemoryCreate(Size size,
+PGSharedMemoryCreate(int segment_id, const char *name, Size init_size, Size max_size,
 					 PGShmemHeader **shim)
 {
 	IpcMemoryKey NextShmemSegID;
@@ -707,6 +985,8 @@ PGSharedMemoryCreate(Size size,
 	PGShmemHeader *hdr;
 	struct stat statbuf;
 	Size		sysvsize;
+	AnonShmemData *anonshmem = &AnonShmemInfo[segment_id];
+	PGUsedShmemInfo *usedShmem = &UsedShmemInfo[segment_id];
 
 	/*
 	 * We use the data directory's ID info (inode and device numbers) to
@@ -719,14 +999,6 @@ PGSharedMemoryCreate(Size size,
 				 errmsg("could not stat data directory \"%s\": %m",
 						DataDir)));
 
-	/* Complain if hugepages demanded but we can't possibly support them */
-#if !defined(MAP_HUGETLB)
-	if (huge_pages == HUGE_PAGES_ON)
-		ereport(ERROR,
-				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("huge pages not supported on this platform")));
-#endif
-
 	/* For now, we don't support huge pages in SysV memory */
 	if (huge_pages == HUGE_PAGES_ON && shared_memory_type != SHMEM_TYPE_MMAP)
 		ereport(ERROR,
@@ -734,14 +1006,15 @@ PGSharedMemoryCreate(Size size,
 				 errmsg("huge pages not supported with the current \"shared_memory_type\" setting")));
 
 	/* Room for a header? */
-	Assert(size > MAXALIGN(sizeof(PGShmemHeader)));
+	Assert(init_size > MAXALIGN(sizeof(PGShmemHeader)));
 
 	if (shared_memory_type == SHMEM_TYPE_MMAP)
 	{
-		AnonymousShmem = CreateAnonymousSegment(&size);
-		AnonymousShmemSize = size;
+		/* On success, mapping data will be modified. */
+		CreateAnonymousSegment(segment_id, name, init_size, max_size);
 
 		/* Register on-exit routine to unmap the anonymous segment */
+		/* TODO: This happens for every shared memory that gets created. Do it only once. */
 		on_shmem_exit(AnonymousShmemDetach, (Datum) 0);
 
 		/* Now we need only allocate a minimal-sized SysV shmem block. */
@@ -749,7 +1022,7 @@ PGSharedMemoryCreate(Size size,
 	}
 	else
 	{
-		sysvsize = size;
+		sysvsize = init_size;
 
 		/* huge pages are only available with mmap */
 		SetConfigOption("huge_pages_status", "off",
@@ -762,7 +1035,7 @@ PGSharedMemoryCreate(Size size,
 	 * loop simultaneously.  (CreateDataDirLockFile() does not entirely ensure
 	 * that, but prefer fixing it over coping here.)
 	 */
-	NextShmemSegID = statbuf.st_ino;
+	NextShmemSegID = statbuf.st_ino + usedShmem->UsedShmemSegID;
 
 	for (;;)
 	{
@@ -800,6 +1073,7 @@ PGSharedMemoryCreate(Size size,
 						 errmsg("pre-existing shared memory block (key %lu, ID %lu) is still in use",
 								(unsigned long) NextShmemSegID,
 								(unsigned long) shmid),
+						 errdetail("when trying to create shared memory block for segment \"%s\"", name),
 						 errhint("Terminate any old server processes associated with data directory \"%s\".",
 								 DataDir)));
 				break;
@@ -854,24 +1128,57 @@ PGSharedMemoryCreate(Size size,
 	/*
 	 * Initialize space allocation status for segment.
 	 */
-	hdr->totalsize = size;
+	hdr->totalsize = init_size;
+	hdr->reservedsize = max_size;
 	hdr->content_offset = MAXALIGN(sizeof(PGShmemHeader));
 	*shim = hdr;
 
 	/* Save info for possible future use */
-	UsedShmemSegAddr = memAddress;
-	UsedShmemSegID = (unsigned long) NextShmemSegID;
+	usedShmem->UsedShmemSegAddr = memAddress;
+	usedShmem->UsedShmemSegID = (unsigned long) NextShmemSegID;
 
 	/*
-	 * If AnonymousShmem is NULL here, then we're not using anonymous shared
-	 * memory, and should return a pointer to the System V shared memory
-	 * block. Otherwise, the System V shared memory block is only a shim, and
-	 * we must return a pointer to the real block.
+	 * If we're not using anonymous shared memory, return a pointer to the
+	 * System V shared memory block. Otherwise, the System V shared memory
+	 * block is only a shim, and we must return a pointer to the real block.
 	 */
-	if (AnonymousShmem == NULL)
+	if (anonshmem->addr == NULL)
 		return hdr;
-	memcpy(AnonymousShmem, hdr, sizeof(PGShmemHeader));
-	return (PGShmemHeader *) AnonymousShmem;
+	memcpy(anonshmem->addr, hdr, sizeof(PGShmemHeader));
+	return anonshmem->addr;
+}
+
+bool
+PGSharedMemoryResize(int segment_id, const char *name, Size new_size)
+{
+	AnonShmemData *anonshmem = &AnonShmemInfo[segment_id];
+	PGShmemHeader *hdr;
+
+	/* For now, we allow only mmapped memory to be resized. */
+	if (shared_memory_type != SHMEM_TYPE_MMAP || anonshmem->fd == -1)
+		elog(ERROR, "only anonymous (mmaped) file backed memory can be resized");
+
+	/* Anonymous memory has header as the first chunk. */
+	hdr = (PGShmemHeader *) anonshmem->addr;
+
+	/*
+	 * We should have reserved enough address space for resizing. PANIC if
+	 * that's not the case.
+	 */
+	if (hdr->reservedsize < new_size)
+		ereport(ERROR,
+				(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+				 errmsg("not enough address space is reserved")));
+
+	/* Nothing to do if size is unchanged */
+	if (hdr->totalsize == new_size)
+		return true;
+
+	AnonymousShmemResize(segment_id, name, new_size, new_size > hdr->totalsize);
+
+	/* Update the available size. */
+	hdr->totalsize = new_size;
+	return true;
 }
 
 #ifdef EXEC_BACKEND
@@ -884,9 +1191,9 @@ PGSharedMemoryCreate(Size size,
  * EXEC_BACKEND case; otherwise postmaster children inherit the shared memory
  * segment attachment via fork().
  *
- * UsedShmemSegID and UsedShmemSegAddr are implicit parameters to this
- * routine.  The caller must have already restored them to the postmaster's
- * values.
+ * UsedShmemInfo array is an implicit parameter to this
+ * routine.  The caller must have already restored it to the postmaster's
+ * state.
  */
 void
 PGSharedMemoryReAttach(void)
@@ -894,32 +1201,42 @@ PGSharedMemoryReAttach(void)
 	IpcMemoryId shmid;
 	PGShmemHeader *hdr;
 	IpcMemoryState state;
-	void	   *origUsedShmemSegAddr = UsedShmemSegAddr;
+	void	   *origUsedShmemSegAddr;
 
-	Assert(UsedShmemSegAddr != NULL);
-	Assert(IsUnderPostmaster);
+	for (int i = 0; i < NUM_MEMORY_MAPPINGS; i++)
+	{
+		PGUsedShmemInfo *usedShmem = &UsedShmemInfo[i];
+
+		origUsedShmemSegAddr = usedShmem->UsedShmemSegAddr;
+
+		Assert(usedShmem->UsedShmemSegAddr != NULL);
+		Assert(IsUnderPostmaster);
 
 #ifdef __CYGWIN__
-	/* cygipc (currently) appears to not detach on exec. */
-	PGSharedMemoryDetach();
-	UsedShmemSegAddr = origUsedShmemSegAddr;
+		/* cygipc (currently) appears to not detach on exec. */
+		PGSharedMemoryDetach();
+		usedShmem->UsedShmemSegAddr = origUsedShmemSegAddr;
 #endif
 
-	elog(DEBUG3, "attaching to %p", UsedShmemSegAddr);
-	shmid = shmget(UsedShmemSegID, sizeof(PGShmemHeader), 0);
-	if (shmid < 0)
-		state = SHMSTATE_FOREIGN;
-	else
-		state = PGSharedMemoryAttach(shmid, UsedShmemSegAddr, &hdr);
-	if (state != SHMSTATE_ATTACHED)
-		elog(FATAL, "could not reattach to shared memory (key=%d, addr=%p): %m",
-			 (int) UsedShmemSegID, UsedShmemSegAddr);
-	if (hdr != origUsedShmemSegAddr)
-		elog(FATAL, "reattaching to shared memory returned unexpected address (got %p, expected %p)",
-			 hdr, origUsedShmemSegAddr);
-	dsm_set_control_handle(hdr->dsm_control);
-
-	UsedShmemSegAddr = hdr;		/* probably redundant */
+		elog(DEBUG3, "attaching to %p", usedShmem->UsedShmemSegAddr);
+		shmid = shmget(usedShmem->UsedShmemSegID, sizeof(PGShmemHeader), 0);
+		if (shmid < 0)
+			state = SHMSTATE_FOREIGN;
+		else
+			state = PGSharedMemoryAttach(shmid, usedShmem->UsedShmemSegAddr, &hdr);
+		if (state != SHMSTATE_ATTACHED)
+			elog(FATAL, "could not reattach to shared memory (key=%d, addr=%p): %m",
+				 (int) usedShmem->UsedShmemSegID, usedShmem->UsedShmemSegAddr);
+		if (hdr != origUsedShmemSegAddr)
+			elog(FATAL, "reattaching to shared memory returned unexpected address (got %p, expected %p)",
+				 hdr, origUsedShmemSegAddr);
+
+		/* Re-establish dsm_control mapping, if any */
+		if (hdr->dsm_control != 0)
+			dsm_set_control_handle(hdr->dsm_control);
+
+		usedShmem->UsedShmemSegAddr = hdr;	/* probably redundant */
+	}
 }
 
 /*
@@ -933,14 +1250,13 @@ PGSharedMemoryReAttach(void)
  * The child process startup logic might or might not call PGSharedMemoryDetach
  * after this; make sure that it will be a no-op if called.
  *
- * UsedShmemSegID and UsedShmemSegAddr are implicit parameters to this
- * routine.  The caller must have already restored them to the postmaster's
- * values.
+ * UsedShmemInfo array is an implicit parameter to this
+ * routine.  The caller must have already restored it to the postmaster's
+ * state.
  */
 void
 PGSharedMemoryNoReAttach(void)
 {
-	Assert(UsedShmemSegAddr != NULL);
 	Assert(IsUnderPostmaster);
 
 #ifdef __CYGWIN__
@@ -948,10 +1264,16 @@ PGSharedMemoryNoReAttach(void)
 	PGSharedMemoryDetach();
 #endif
 
-	/* For cleanliness, reset UsedShmemSegAddr to show we're not attached. */
-	UsedShmemSegAddr = NULL;
-	/* And the same for UsedShmemSegID. */
-	UsedShmemSegID = 0;
+	for (int i = 0; i < NUM_MEMORY_MAPPINGS; i++)
+	{
+		PGUsedShmemInfo *usedShmem = &UsedShmemInfo[i];
+
+		Assert(usedShmem->UsedShmemSegAddr != NULL);
+		/* For cleanliness, reset UsedShmemSegAddr to show we're not attached. */
+		usedShmem->UsedShmemSegAddr = NULL;
+		/* And the same for UsedShmemSegID. */
+		usedShmem->UsedShmemSegID = 0;
+	}
 }
 
 #endif							/* EXEC_BACKEND */
@@ -959,35 +1281,44 @@ PGSharedMemoryNoReAttach(void)
 /*
  * PGSharedMemoryDetach
  *
- * Detach from the shared memory segment, if still attached.  This is not
+ * Detach from the shared memory segments, if still attached.  This is not
  * intended to be called explicitly by the process that originally created the
- * segment (it will have on_shmem_exit callback(s) registered to do that).
+ * segments (it will have on_shmem_exit callback(s) registered to do that).
  * Rather, this is for subprocesses that have inherited an attachment and want
  * to get rid of it.
  *
- * UsedShmemSegID and UsedShmemSegAddr are implicit parameters to this
- * routine, also AnonymousShmem and AnonymousShmemSize.
+ * PGUsedShmemInfo::UsedShmemSegID and PGUsedShmemInfo::UsedShmemSegAddr are
+ * implicit parameters to this routine obtained from entries in UsedShmemInfo
+ * array.
  */
 void
 PGSharedMemoryDetach(void)
 {
-	if (UsedShmemSegAddr != NULL)
+	for (int i = 0; i < NUM_MEMORY_MAPPINGS; i++)
 	{
-		if ((shmdt(UsedShmemSegAddr) < 0)
+		PGUsedShmemInfo *usedShmem = &UsedShmemInfo[i];
+		AnonShmemData *anonshmem = &AnonShmemInfo[i];
+
+		if (usedShmem->UsedShmemSegAddr != NULL)
+		{
+			if ((shmdt(usedShmem->UsedShmemSegAddr) < 0)
 #if defined(EXEC_BACKEND) && defined(__CYGWIN__)
-		/* Work-around for cygipc exec bug */
-			&& shmdt(NULL) < 0
+			/* Work-around for cygipc exec bug */
+				&& shmdt(NULL) < 0
 #endif
-			)
-			elog(LOG, "shmdt(%p) failed: %m", UsedShmemSegAddr);
-		UsedShmemSegAddr = NULL;
-	}
+				)
+				elog(LOG, "shmdt(%p) failed: %m", usedShmem->UsedShmemSegAddr);
+			usedShmem->UsedShmemSegAddr = NULL;
+		}
 
-	if (AnonymousShmem != NULL)
-	{
-		if (munmap(AnonymousShmem, AnonymousShmemSize) < 0)
-			elog(LOG, "munmap(%p, %zu) failed: %m",
-				 AnonymousShmem, AnonymousShmemSize);
-		AnonymousShmem = NULL;
+		if (anonshmem->addr != NULL)
+		{
+			if (munmap(anonshmem->addr, anonshmem->size) < 0)
+				elog(LOG, "munmap(%p, %zu) failed: %m",
+					 anonshmem->addr, anonshmem->size);
+			anonshmem->addr = NULL;
+			close(anonshmem->fd);
+			anonshmem->fd = -1;
+		}
 	}
 }
diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c
index 794e4fcb2ad..76a8cc9ae52 100644
--- a/src/backend/port/win32_shmem.c
+++ b/src/backend/port/win32_shmem.c
@@ -39,15 +39,14 @@
  * address space and is negligible relative to the 64-bit address space.
  */
 #define PROTECTIVE_REGION_SIZE (10 * WIN32_STACK_RLIMIT)
-void	   *ShmemProtectiveRegion = NULL;
-
-HANDLE		UsedShmemSegID = INVALID_HANDLE_VALUE;
-void	   *UsedShmemSegAddr = NULL;
-static Size UsedShmemSegSize = 0;
 
 static bool EnableLockPagesPrivilege(int elevel);
 static void pgwin32_SharedMemoryDelete(int status, Datum shmId);
 
+PGUsedShmemInfo UsedShmemInfo[NUM_MEMORY_MAPPINGS];
+
+static Size UsedShmemSegSizes[NUM_MEMORY_MAPPINGS] = {0};
+
 /*
  * Generate shared memory segment name. Expand the data directory, to generate
  * an identifier unique for this data directory. Then replace all backslashes
@@ -202,9 +201,11 @@ EnableLockPagesPrivilege(int elevel)
  *
  * Create a shared memory segment of the given size and initialize its
  * standard header.
+ *
+ * TODO: Check that the segment_id is a valid one before indexing corresponding arrays.
  */
-PGShmemHeader *
-PGSharedMemoryCreate(Size size,
+void
+PGSharedMemoryCreate(int segment_id, const char *name, Size init_size, Size max_size,
 					 PGShmemHeader **shim)
 {
 	void	   *memAddress;
@@ -216,13 +217,14 @@ PGSharedMemoryCreate(Size size,
 	DWORD		size_high;
 	DWORD		size_low;
 	SIZE_T		largePageSize = 0;
-	Size		orig_size = size;
+	Size		size = mapping_sizes->shmem_req_size;
 	DWORD		flProtect = PAGE_READWRITE;
 	DWORD		desiredAccess;
+	PGUsedShmemInfo *usedShmem = &UsedShmemInfo[segment_id];
 
-	ShmemProtectiveRegion = VirtualAlloc(NULL, PROTECTIVE_REGION_SIZE,
-										 MEM_RESERVE, PAGE_NOACCESS);
-	if (ShmemProtectiveRegion == NULL)
+	usedShmem->ShmemProtectiveRegion = VirtualAlloc(NULL, PROTECTIVE_REGION_SIZE,
+													MEM_RESERVE, PAGE_NOACCESS);
+	if (usedShmem->ShmemProtectiveRegion == NULL)
 		elog(FATAL, "could not reserve memory region: error code %lu",
 			 GetLastError());
 
@@ -231,8 +233,12 @@ PGSharedMemoryCreate(Size size,
 
 	szShareMem = GetSharedMemName();
 
-	UsedShmemSegAddr = NULL;
+	usedShmem->UsedShmemSegAddr = NULL;
 
+	/*
+	 * TODO: We don't need to perform this as many times as the number of
+	 * segments. Instead do something similar to sysv_shmem.c
+	 */
 	if (huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY)
 	{
 		/* Does the processor support large pages? */
@@ -304,7 +310,7 @@ retry:
 				 * Use the original size, not the rounded-up value, when
 				 * falling back to non-huge pages.
 				 */
-				size = orig_size;
+				size = mapping_sizes->shmem_req_size;
 				flProtect = PAGE_READWRITE;
 				goto retry;
 			}
@@ -337,6 +343,8 @@ retry:
 	if (!hmap)
 		ereport(FATAL,
 				(errmsg("pre-existing shared memory block is still in use"),
+				 errdetail("when trying to create shared memory block for segment \"%s\"",
+						   PGShmemSegmentName(segment)),
 				 errhint("Check if there are any old server processes still running, and terminate them.")));
 
 	free(szShareMem);
@@ -393,11 +401,12 @@ retry:
 	hdr->dsm_control = 0;
 
 	/* Save info for possible future use */
-	UsedShmemSegAddr = memAddress;
-	UsedShmemSegSize = size;
-	UsedShmemSegID = hmap2;
+	usedShmem->UsedShmemSegAddr = memAddress;
+	UsedShmemSegSizes[segment_id] = size;
+	usedShmem->UsedShmemSegID = (unsigned long) hmap2;
 
 	/* Register on-exit routine to delete the new segment */
+	/* TODO: This may happen for every shared memory that gets created. Do it only once. */
 	on_shmem_exit(pgwin32_SharedMemoryDelete, PointerGetDatum(hmap2));
 
 	*shim = hdr;
@@ -405,8 +414,12 @@ retry:
 	/* Report whether huge pages are in use */
 	SetConfigOption("huge_pages_status", (flProtect & SEC_LARGE_PAGES) ?
 					"on" : "off", PGC_INTERNAL, PGC_S_DYNAMIC_DEFAULT);
+}
 
-	return hdr;
+bool
+PGSharedMemoryResize(int segment_id, const char *name, Size new_size)
+{
+	elog(ERROR, "resizing shared memory segments is not supported on Windows");
 }
 
 /*
@@ -416,42 +429,52 @@ retry:
  * an already existing shared memory segment, using the handle inherited from
  * the postmaster.
  *
- * ShmemProtectiveRegion, UsedShmemSegID and UsedShmemSegAddr are implicit
- * parameters to this routine.  The caller must have already restored them to
- * the postmaster's values.
+ * Segments is an implicit parameters to this routine.  The caller must have
+ * already restored ShmemProtectiveRegion, UsedShmemSegID and UsedShmemSegAddr
+ * in each Segment to the postmaster's values.
  */
 void
 PGSharedMemoryReAttach(void)
 {
 	PGShmemHeader *hdr;
-	void	   *origUsedShmemSegAddr = UsedShmemSegAddr;
+	void	   *origUsedShmemSegAddr;
 
-	Assert(ShmemProtectiveRegion != NULL);
-	Assert(UsedShmemSegAddr != NULL);
 	Assert(IsUnderPostmaster);
 
-	/*
-	 * Release memory region reservations made by the postmaster
-	 */
-	if (VirtualFree(ShmemProtectiveRegion, 0, MEM_RELEASE) == 0)
-		elog(FATAL, "failed to release reserved memory region (addr=%p): error code %lu",
-			 ShmemProtectiveRegion, GetLastError());
-	if (VirtualFree(UsedShmemSegAddr, 0, MEM_RELEASE) == 0)
-		elog(FATAL, "failed to release reserved memory region (addr=%p): error code %lu",
-			 UsedShmemSegAddr, GetLastError());
-
-	hdr = (PGShmemHeader *) MapViewOfFileEx(UsedShmemSegID, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, 0, UsedShmemSegAddr);
-	if (!hdr)
-		elog(FATAL, "could not reattach to shared memory (key=%p, addr=%p): error code %lu",
-			 UsedShmemSegID, UsedShmemSegAddr, GetLastError());
-	if (hdr != origUsedShmemSegAddr)
-		elog(FATAL, "reattaching to shared memory returned unexpected address (got %p, expected %p)",
-			 hdr, origUsedShmemSegAddr);
-	if (hdr->magic != PGShmemMagic)
-		elog(FATAL, "reattaching to shared memory returned non-PostgreSQL memory");
-	dsm_set_control_handle(hdr->dsm_control);
-
-	UsedShmemSegAddr = hdr;		/* probably redundant */
+	for (int i = 0; i < NUM_MEMORY_MAPPINGS; i++)
+	{
+		PGUsedShmemInfo *usedShmem = &UsedShmemInfo[i];
+
+		Assert(usedShmem->ShmemProtectiveRegion != NULL);
+		Assert(usedShmem->UsedShmemSegAddr != NULL);
+
+		origUsedShmemSegAddr = usedShmem->UsedShmemSegAddr;
+
+		/*
+		 * Release memory region reservations made by the postmaster
+		 */
+		if (VirtualFree(usedShmem->ShmemProtectiveRegion, 0, MEM_RELEASE) == 0)
+			elog(FATAL, "failed to release reserved memory region (addr=%p): error code %lu",
+				 usedShmem->ShmemProtectiveRegion, GetLastError());
+		if (VirtualFree(usedShmem->UsedShmemSegAddr, 0, MEM_RELEASE) == 0)
+			elog(FATAL, "failed to release reserved memory region (addr=%p): error code %lu",
+				 usedShmem->UsedShmemSegAddr, GetLastError());
+
+		hdr = (PGShmemHeader *) MapViewOfFileEx(usedShmem->UsedShmemSegID, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, 0, usedShmem->UsedShmemSegAddr);
+		if (!hdr)
+			elog(FATAL, "could not reattach to shared memory (key=%p, addr=%p): error code %lu",
+				 usedShmem->UsedShmemSegID, usedShmem->UsedShmemSegAddr, GetLastError());
+		if (hdr != origUsedShmemSegAddr)
+			elog(FATAL, "reattaching to shared memory returned unexpected address (got %p, expected %p)",
+				 hdr, origUsedShmemSegAddr);
+		if (hdr->magic != PGShmemMagic)
+			elog(FATAL, "reattaching to shared memory returned non-PostgreSQL memory");
+		/* Re-establish dsm_control mapping, if any */
+		if (hdr->dsm_control != 0)
+			dsm_set_control_handle(hdr->dsm_control);
+
+		usedShmem->UsedShmemSegAddr = hdr;	/* probably redundant */
+	}
 }
 
 /*
@@ -464,22 +487,28 @@ PGSharedMemoryReAttach(void)
  * The child process startup logic might or might not call PGSharedMemoryDetach
  * after this; make sure that it will be a no-op if called.
  *
- * ShmemProtectiveRegion, UsedShmemSegID and UsedShmemSegAddr are implicit
- * parameters to this routine.  The caller must have already restored them to
- * the postmaster's values.
+ * Segments is an implicit parameters to this routine.  The caller must have
+ * already restored ShmemProtectiveRegion and UsedShmemSegAddr
+ * in each Segment to the postmaster's values.
  */
 void
 PGSharedMemoryNoReAttach(void)
 {
-	Assert(ShmemProtectiveRegion != NULL);
-	Assert(UsedShmemSegAddr != NULL);
 	Assert(IsUnderPostmaster);
+	for (int i = 0; i < NUM_MEMORY_MAPPINGS; i++)
+	{
+		PGUsedShmemInfo *usedShmem = &UsedShmemInfo[i];
 
-	/*
-	 * Under Windows we will not have mapped the segment, so we don't need to
-	 * un-map it.  Just reset UsedShmemSegAddr to show we're not attached.
-	 */
-	UsedShmemSegAddr = NULL;
+		Assert(usedShmem->ShmemProtectiveRegion != NULL);
+		Assert(usedShmem->UsedShmemSegAddr != NULL);
+
+		/*
+		 * Under Windows we will not have mapped the segment, so we don't need
+		 * to un-map it.  Just reset UsedShmemSegAddr to show we're not
+		 * attached.
+		 */
+		usedShmem->UsedShmemSegAddr = NULL;
+	}
 
 	/*
 	 * We *must* close the inherited shmem segment handle, else Windows will
@@ -492,49 +521,55 @@ PGSharedMemoryNoReAttach(void)
 /*
  * PGSharedMemoryDetach
  *
- * Detach from the shared memory segment, if still attached.  This is not
+ * Detach from the shared memory segments, if still attached.  This is not
  * intended to be called explicitly by the process that originally created the
- * segment (it will have an on_shmem_exit callback registered to do that).
- * Rather, this is for subprocesses that have inherited an attachment and want
- * to get rid of it.
+ * segments (it will have an on_shmem_exit callback registered to do that).
+ * Rather, this is for subprocesses that have inherited an attachment and want to
+ * get rid of it.
  *
- * ShmemProtectiveRegion, UsedShmemSegID and UsedShmemSegAddr are implicit
- * parameters to this routine.
+ * UsedShmemInfo is an implicit parameters to this routine.  The caller must have
+ * already restored ShmemProtectiveRegion, UsedShmemSegID and UsedShmemSegAddr in
+ * each Segment to the postmaster's values.
  */
 void
 PGSharedMemoryDetach(void)
 {
-	/*
-	 * Releasing the protective region liberates an unimportant quantity of
-	 * address space, but be tidy.
-	 */
-	if (ShmemProtectiveRegion != NULL)
+	for (int i = 0; i < NUM_MEMORY_MAPPINGS; i++)
 	{
-		if (VirtualFree(ShmemProtectiveRegion, 0, MEM_RELEASE) == 0)
-			elog(LOG, "failed to release reserved memory region (addr=%p): error code %lu",
-				 ShmemProtectiveRegion, GetLastError());
+		PGUsedShmemInfo *segment = &UsedShmemInfo[i];
 
-		ShmemProtectiveRegion = NULL;
-	}
+		/*
+		 * Releasing the protective region liberates an unimportant quantity
+		 * of address space, but be tidy.
+		 */
+		if (segment->ShmemProtectiveRegion != NULL)
+		{
+			if (VirtualFree(segment->ShmemProtectiveRegion, 0, MEM_RELEASE) == 0)
+				elog(LOG, "failed to release reserved memory region (addr=%p): error code %lu",
+					 segment->ShmemProtectiveRegion, GetLastError());
 
-	/* Unmap the view, if it's mapped */
-	if (UsedShmemSegAddr != NULL)
-	{
-		if (!UnmapViewOfFile(UsedShmemSegAddr))
-			elog(LOG, "could not unmap view of shared memory: error code %lu",
-				 GetLastError());
+			segment->ShmemProtectiveRegion = NULL;
+		}
 
-		UsedShmemSegAddr = NULL;
-	}
+		/* Unmap the view, if it's mapped */
+		if (segment->UsedShmemSegAddr != NULL)
+		{
+			if (!UnmapViewOfFile(segment->UsedShmemSegAddr))
+				elog(LOG, "could not unmap view of shared memory: error code %lu",
+					 GetLastError());
 
-	/* And close the shmem handle, if we have one */
-	if (UsedShmemSegID != INVALID_HANDLE_VALUE)
-	{
-		if (!CloseHandle(UsedShmemSegID))
-			elog(LOG, "could not close handle to shared memory: error code %lu",
-				 GetLastError());
+			segment->UsedShmemSegAddr = NULL;
+		}
 
-		UsedShmemSegID = INVALID_HANDLE_VALUE;
+		/* And close the shmem handle, if we have one */
+		if (segment->UsedShmemSegID != INVALID_HANDLE_VALUE)
+		{
+			if (!CloseHandle(segment->UsedShmemSegID))
+				elog(LOG, "could not close handle to shared memory: error code %lu",
+					 GetLastError());
+
+			segment->UsedShmemSegID = INVALID_HANDLE_VALUE;
+		}
 	}
 }
 
@@ -574,50 +609,55 @@ pgwin32_ReserveSharedMemoryRegion(HANDLE hChild)
 {
 	void	   *address;
 
-	Assert(ShmemProtectiveRegion != NULL);
-	Assert(UsedShmemSegAddr != NULL);
-	Assert(UsedShmemSegSize != 0);
-
-	/* ShmemProtectiveRegion */
-	address = VirtualAllocEx(hChild, ShmemProtectiveRegion,
-							 PROTECTIVE_REGION_SIZE,
-							 MEM_RESERVE, PAGE_NOACCESS);
-	if (address == NULL)
+	for (int i = 0; i < NUM_MEMORY_MAPPINGS; i++)
 	{
-		/* Don't use FATAL since we're running in the postmaster */
-		elog(LOG, "could not reserve shared memory region (addr=%p) for child %p: error code %lu",
-			 ShmemProtectiveRegion, hChild, GetLastError());
-		return false;
-	}
-	if (address != ShmemProtectiveRegion)
-	{
-		/*
-		 * Should never happen - in theory if allocation granularity causes
-		 * strange effects it could, so check just in case.
-		 *
-		 * Don't use FATAL since we're running in the postmaster.
-		 */
-		elog(LOG, "reserved shared memory region got incorrect address %p, expected %p",
-			 address, ShmemProtectiveRegion);
-		return false;
-	}
+		PGUsedShmemInfo *segment = &UsedShmemInfo[i];
 
-	/* UsedShmemSegAddr */
-	address = VirtualAllocEx(hChild, UsedShmemSegAddr, UsedShmemSegSize,
-							 MEM_RESERVE, PAGE_READWRITE);
-	if (address == NULL)
-	{
-		elog(LOG, "could not reserve shared memory region (addr=%p) for child %p: error code %lu",
-			 UsedShmemSegAddr, hChild, GetLastError());
-		return false;
-	}
-	if (address != UsedShmemSegAddr)
-	{
-		elog(LOG, "reserved shared memory region got incorrect address %p, expected %p",
-			 address, UsedShmemSegAddr);
-		return false;
-	}
+		Assert(segment->ShmemProtectiveRegion != NULL);
+		Assert(segment->UsedShmemSegAddr != NULL);
+		Assert(UsedShmemSegSizes[i] != 0);
+
+		/* ShmemProtectiveRegion */
+		address = VirtualAllocEx(hChild, segment->ShmemProtectiveRegion,
+								 PROTECTIVE_REGION_SIZE,
+								 MEM_RESERVE, PAGE_NOACCESS);
+		if (address == NULL)
+		{
+			/* Don't use FATAL since we're running in the postmaster */
+			elog(LOG, "could not reserve shared memory region (addr=%p) for child %p: error code %lu",
+				 segment->ShmemProtectiveRegion, hChild, GetLastError());
+			return false;
+		}
+		if (address != segment->ShmemProtectiveRegion)
+		{
+			/*
+			 * Should never happen - in theory if allocation granularity
+			 * causes strange effects it could, so check just in case.
+			 *
+			 * Don't use FATAL since we're running in the postmaster.
+			 */
+			elog(LOG, "reserved shared memory region got incorrect address %p, expected %p",
+				 address, segment->ShmemProtectiveRegion);
+			return false;
+		}
+
+		/* UsedShmemSegAddr */
+		address = VirtualAllocEx(hChild, segment->UsedShmemSegAddr, UsedShmemSegSizes[i],
+								 MEM_RESERVE, PAGE_READWRITE);
+		if (address == NULL)
+		{
+			elog(LOG, "could not reserve shared memory region (addr=%p) for child %p: error code %lu",
+				 segment->UsedShmemSegAddr, hChild, GetLastError());
+			return false;
+		}
+		if (address != segment->UsedShmemSegAddr)
+		{
+			elog(LOG, "reserved shared memory region got incorrect address %p, expected %p",
+				 address, segment->UsedShmemSegAddr);
+			return false;
+		}
 
+	}
 	return true;
 }
 
@@ -627,7 +667,7 @@ pgwin32_ReserveSharedMemoryRegion(HANDLE hChild)
  * use GetLargePageMinimum() instead.
  */
 void
-GetHugePageSize(Size *hugepagesize, int *mmap_flags)
+GetHugePageSize(Size *hugepagesize, int *mmap_flags, int *memfd_flags)
 {
 	if (hugepagesize)
 		*hugepagesize = 0;
diff --git a/src/backend/postmaster/launch_backend.c b/src/backend/postmaster/launch_backend.c
index 8f638118cdf..7c5585398ad 100644
--- a/src/backend/postmaster/launch_backend.c
+++ b/src/backend/postmaster/launch_backend.c
@@ -90,13 +90,7 @@ typedef int InheritableSocket;
 typedef struct
 {
 	char		DataDir[MAXPGPATH];
-#ifndef WIN32
-	unsigned long UsedShmemSegID;
-#else
-	void	   *ShmemProtectiveRegion;
-	HANDLE		UsedShmemSegID;
-#endif
-	void	   *UsedShmemSegAddr;
+	PGUsedShmemInfo UsedShmemInfo[NUM_MEMORY_MAPPINGS];
 #ifdef USE_INJECTION_POINTS
 	struct InjectionPointsCtl *ActiveInjectionPoints;
 #endif
@@ -580,6 +574,7 @@ SubPostmasterMain(int argc, char *argv[])
 	char	   *child_kind;
 	BackendType child_type;
 	TimestampTz fork_end;
+	bool		register_shmem_structs = false;
 
 	/* In EXEC_BACKEND case we will not have inherited these settings */
 	IsPostmasterEnvironment = true;
@@ -676,12 +671,22 @@ SubPostmasterMain(int argc, char *argv[])
 	process_shared_preload_libraries();
 
 	/* Restore basic shared memory pointers */
-	if (UsedShmemSegAddr != NULL)
+	/*
+	 * TODO: Need to find a way to pass num_seg_registrations here and also the
+	 * properties of the segments like name.
+	 */
+	for (int i = 0; i < NUM_MEMORY_MAPPINGS; i++)
 	{
-		InitShmemAllocator(UsedShmemSegAddr);
+		PGUsedShmemInfo *usedShmem = &UsedShmemInfo[i];
 
-		RegisterShmemStructs();
+		if (usedShmem->UsedShmemSegAddr != NULL)
+		{
+			InitShmemAllocator(i, "main", usedShmem->UsedShmemSegAddr);
+			register_shmem_structs = true;
+		}
 	}
+	if (register_shmem_structs)
+		RegisterShmemStructs();
 
 	/*
 	 * Run the appropriate Main function
@@ -722,12 +727,7 @@ save_backend_variables(BackendParameters *param,
 	strlcpy(param->DataDir, DataDir, MAXPGPATH);
 
 	param->MyPMChildSlot = child_slot;
-
-#ifdef WIN32
-	param->ShmemProtectiveRegion = ShmemProtectiveRegion;
-#endif
-	param->UsedShmemSegID = UsedShmemSegID;
-	param->UsedShmemSegAddr = UsedShmemSegAddr;
+	memcpy(param->UsedShmemInfo, UsedShmemInfo, sizeof(UsedShmemInfo));
 
 #ifdef USE_INJECTION_POINTS
 	param->ActiveInjectionPoints = ActiveInjectionPoints;
@@ -980,12 +980,7 @@ restore_backend_variables(BackendParameters *param)
 	SetDataDir(param->DataDir);
 
 	MyPMChildSlot = param->MyPMChildSlot;
-
-#ifdef WIN32
-	ShmemProtectiveRegion = param->ShmemProtectiveRegion;
-#endif
-	UsedShmemSegID = param->UsedShmemSegID;
-	UsedShmemSegAddr = param->UsedShmemSegAddr;
+	memcpy(UsedShmemInfo, param->UsedShmemInfo, sizeof(UsedShmemInfo));
 
 #ifdef USE_INJECTION_POINTS
 	ActiveInjectionPoints = param->ActiveInjectionPoints;
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 952988645d0..3c294721786 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -142,8 +142,7 @@ CalculateShmemSize(void)
 	/* include additional requested shmem from preload libraries */
 	size = add_size(size, total_addin_request);
 
-	/* might as well round it off to a multiple of a typical page size */
-	size = add_size(size, 8192 - (size % 8192));
+	ShmemRegisterSegment("main", size, size);
 
 	return size;
 }
@@ -182,25 +181,20 @@ AttachSharedMemoryStructs(void)
 
 /*
  * CreateSharedMemoryAndSemaphores
- *		Creates and initializes shared memory and semaphores.
+ *  	Creates shared memory segments and initializes shared memory structures
+ *  	and semaphores.
  */
 void
 CreateSharedMemoryAndSemaphores(void)
 {
-	PGShmemHeader *shim;
-	PGShmemHeader *seghdr;
-	Size		size;
+	PGShmemHeader *main_seg_shim = NULL;
 
 	Assert(!IsUnderPostmaster);
 
-	/* Compute the size of the shared-memory block */
-	size = CalculateShmemSize();
-	elog(DEBUG3, "invoking IpcMemoryCreate(size=%zu)", size);
+	CalculateShmemSize();
 
-	/*
-	 * Create the shmem segment
-	 */
-	seghdr = PGSharedMemoryCreate(size, &shim);
+	/* Decide if we use huge pages or regular size pages */
+	PrepareHugePages();
 
 	/*
 	 * Make sure that huge pages are never reported as "unknown" while the
@@ -212,7 +206,7 @@ CreateSharedMemoryAndSemaphores(void)
 	/*
 	 * Set up shared memory allocation mechanism
 	 */
-	InitShmemAllocator(seghdr);
+	main_seg_shim = ShmemCreateRegisteredSegments();
 
 	/* Reserve space for semaphores. */
 	if (!IsUnderPostmaster)
@@ -222,7 +216,7 @@ CreateSharedMemoryAndSemaphores(void)
 	CreateOrAttachShmemStructs();
 
 	/* Initialize dynamic shared memory facilities. */
-	dsm_postmaster_startup(shim);
+	dsm_postmaster_startup(main_seg_shim);
 
 	/*
 	 * Now give loadable modules a chance to set up their shmem allocations
@@ -360,6 +354,7 @@ InitializeShmemGUCs(void)
 	/*
 	 * Calculate the shared memory size and round up to the nearest megabyte.
 	 */
+	/* TODO: this doesn't account for the on-demand shared memory segments. */
 	size_b = CalculateShmemSize();
 	size_mb = add_size(size_b, (1024 * 1024) - 1) / (1024 * 1024);
 	sprintf(buf, "%zu", size_mb);
@@ -369,7 +364,7 @@ InitializeShmemGUCs(void)
 	/*
 	 * Calculate the number of huge pages required.
 	 */
-	GetHugePageSize(&hp_size, NULL);
+	GetHugePageSize(&hp_size, NULL, NULL);
 	if (hp_size != 0)
 	{
 		Size		hp_required;
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index e73ac489b2b..7836962edf4 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -88,6 +88,21 @@
 static ShmemStructDesc *registry[SHMEM_INDEX_SIZE];
 static int num_registrations = 0;
 
+typedef struct ShmemSegDesc
+{
+	const char *name;
+	Size		init_size;
+	/*
+	 * TODO: At least right now resizable shared memory is only available in non
+	 * EXEC_BACKEND builds. We should move this and all the relevant code under
+	 * #ifdef EXEC_BACKEND.
+	 */
+	Size		max_size;
+} ShmemSegDesc;
+
+static ShmemSegDesc seg_registry[NUM_MEMORY_MAPPINGS];
+static int num_seg_registrations = 0;
+
 /*
  * This is the first data structure stored in the shared memory segment, at
  * the offset that PGShmemHeader->content_offset points to.  Allocations by
@@ -105,21 +120,41 @@ typedef struct ShmemAllocatorData
 	slock_t		shmem_lock;
 } ShmemAllocatorData;
 
-static void *ShmemAllocRaw(Size size, Size *allocated_size);
+/* Structure managing one shared memory segment. */
+typedef struct ShmemSegment
+{
+	PGShmemHeader *ShmemSegHdr; /* shared mem segment header */
+	ShmemAllocatorData *ShmemAllocator;
+	void	   *ShmemBase;		/* start address of shared memory */
+	const char *ShmemSegmentName;	/* name of the segment for logging */
+} ShmemSegment;
+
+ShmemSegment Segments[NUM_MEMORY_MAPPINGS];
+
+static void *ShmemAllocRaw(ShmemSegment *segment, Size size, Size *allocated_size);
+static bool ShmemAddrIsValid(int segment_id, const void *addr);
+static void *ShmemInitStructInSegment(const char *name, Size size,
+									  bool *foundPtr, int segment_id);
 
 static void shmem_hash_init(void *arg);
 static void shmem_hash_attach(void *arg);
 
-/* shared memory global variables */
+/* Expose ShmemLock from the main segment for allocating LWLock tranches. */
+slock_t    *ShmemLock;
 
-static PGShmemHeader *ShmemSegHdr;	/* shared mem segment header */
-static void *ShmemBase;			/* start address of shared memory */
-static void *ShmemEnd;			/* end+1 address of shared memory */
+/*
+ * Id of main or the default segment which contains all the fixed shared memory
+ * structures.
+ */
+int main_seg_id = -1;
 
-static ShmemAllocatorData *ShmemAllocator;
-slock_t    *ShmemLock;			/* points to ShmemAllocator->shmem_lock */
- /* primary index hashtable for shmem */
-HTAB *ShmemIndex = NULL;
+/*
+ * Primary index hashtable for shmem, for simplicity we use a single for all
+ * shared memory structure independent of the segments they belong to. There can
+ * be performance consequences of that, and an alternative option would be to
+ * have one index per shared memory segments.
+ */
+static HTAB *ShmemIndex = NULL;
 
 
 
@@ -136,6 +171,29 @@ static bool firstNumaTouch = true;
 
 Datum		pg_numa_available(PG_FUNCTION_ARGS);
 
+void
+ShmemRegisterSegment(const char *name, Size init_size, Size max_size)
+{
+	ShmemSegDesc *desc;
+
+	if (num_seg_registrations >= NUM_MEMORY_MAPPINGS)
+		elog(ERROR, "cannot register more than %d shared memory segments",
+			 NUM_MEMORY_MAPPINGS);
+
+	/* We allocate PGShmemHeader and ShmemAllocator in the segment, account for that. */
+	init_size = add_size(init_size, sizeof(PGShmemHeader) + sizeof(ShmemAllocatorData));
+	max_size = add_size(max_size, sizeof(PGShmemHeader) + sizeof(ShmemAllocatorData));
+
+	desc = &seg_registry[num_seg_registrations++];
+
+	desc->name = name;
+	/* might as well round it off to a multiple of a typical page size */
+	desc->init_size = align_size(init_size, 8192);
+	desc->max_size = align_size(max_size, 8192);
+
+	if (strcmp(name, "main") == 0)
+		main_seg_id = num_seg_registrations - 1;
+}
 
 void
 ShmemRegisterStruct(ShmemStructDesc *desc)
@@ -143,16 +201,58 @@ ShmemRegisterStruct(ShmemStructDesc *desc)
 	elog(DEBUG2, "REGISTER: %s with size %zd", desc->name, desc->size);
 
 	registry[num_registrations++] = desc;
+
+	if (desc->max_size > 0)
+	{
+		elog(LOG, "RESIZABLE structure: %s has max_size %zd", desc->name, desc->max_size);
+		ShmemRegisterSegment(desc->name, desc->size + desc->extra_size, desc->max_size);
+		/*
+		 * TODO:
+		 * At this point, we know the segment id for this resizable structure,
+		 * should we save it in ShmemStructDesc itself and make use of it in
+		 * ShmemInitRegistered()? Right now that function relies on the order in
+		 * which resizable structures appear in the registry.
+		 *
+		 * Flip size is that the segment id is assigned by shmem.c module but it
+		 * is part of the ShmemStructDesc which is filled by the module which
+		 * may think that they have to specify the segment id too. But that can
+		 * be fixed using a comment.
+		 */
+	}
 }
 
 size_t
 ShmemRegisteredSize(void)
 {
 	size_t		size;
+	int			seg_id = 0;
 
 	size = 0;
 	for (int i = 0; i < num_registrations; i++)
 	{
+		if (registry[i]->max_size > 0)
+		{
+			ShmemSegDesc *segDesc = &seg_registry[seg_id++];
+
+			/*
+			 * A resizable structure is placed in its own shared memory segment,
+			 * which is allocated on FIFO basis. Just make sure that the
+			 * corresponding segment is the right one. We returns only the sum of
+			 * the sizes in the main segment.
+			 *
+			 * TODO: Once we use ShmemRegisterStruct for every shared memory
+			 * structure, we should change this function to return the total size
+			 * of all the segments instead of just the main segment. And also
+			 * call ShmemRegisterSegment() for main segment from here instead of
+			 * CalculateShmemSize().
+			 */
+			Assert(strcmp(segDesc->name, registry[i]->name) == 0);
+			Assert(segDesc->init_size >= registry[i]->size + registry[i]->extra_size);
+			Assert(segDesc->max_size >= registry[i]->max_size);
+
+			elog(LOG, "RESIZABLE structure: %s has init_size = %zd max_size %zd", segDesc->name, segDesc->init_size, segDesc->max_size);
+
+		}
 		size = add_size(size, registry[i]->size);
 		size = add_size(size, registry[i]->extra_size);
 	}
@@ -162,9 +262,54 @@ ShmemRegisteredSize(void)
 	return size;
 }
 
+PGShmemHeader *
+ShmemCreateRegisteredSegments(void)
+{
+	PGShmemHeader *main_seg_shim = NULL;
+
+	/* Should be called only by the postmaster or a standalone backend. */
+	Assert(!IsUnderPostmaster);
+
+	for (int i = 0; i < num_seg_registrations; i++)
+	{
+		ShmemSegDesc *segDesc = &seg_registry[i];
+
+		PGUsedShmemInfo *usedShmem = &UsedShmemInfo[i];
+		PGShmemHeader *shim;
+		PGShmemHeader *seghdr;
+
+		/*
+		 * Set seed shmem identifier which will be changed to the final one
+		 * when creating the shared memory segment.
+		 */
+		usedShmem->UsedShmemSegID = i;
+
+		elog(DEBUG3, "invoking PGSharedMemoryCreate(segment %s, size=%zu, reserved address space=%zu)",
+			 segDesc->name, segDesc->init_size, segDesc->max_size);
+
+		/*
+		 * Create the shmem segment.
+		 */
+		seghdr = PGSharedMemoryCreate(i, segDesc->name, segDesc->init_size, segDesc->max_size, &shim);
+
+		/*
+		 * Set up shared memory allocation mechanism
+		 */
+		InitShmemAllocator(i, segDesc->name, seghdr);
+
+		/* Remember main segment shim for the caller to use. */
+		if (i == main_seg_id)
+			main_seg_shim = shim;
+	}
+
+	return main_seg_shim;
+}
+
 void
 ShmemInitRegistered(void)
 {
+	int seg_id = 0;
+
 	/* Should be called only by the postmaster or a standalone backend. */
 	Assert(!IsUnderPostmaster);
 
@@ -174,6 +319,7 @@ ShmemInitRegistered(void)
 		void	   *structPtr;
 		bool		found;
 		ShmemIndexEnt *result;
+		int struct_seg_id;
 
 		elog(DEBUG2, "INIT [%d/%d]: %s", i, num_registrations, registry[i]->name);
 
@@ -190,8 +336,27 @@ ShmemInitRegistered(void)
 		if (found)
 			elog(ERROR, "shmem struct \"%s\" is already initialized", registry[i]->name);
 
-		/* allocate and initialize it */
-		structPtr = ShmemAllocRaw(registry[i]->size, &allocated_size);
+		/*
+		 * allocate and initialize the structures. Resizable structures go into
+		 * their own segments and fixed structures go into the main segment.
+		 */
+		if (registry[i]->max_size > 0)
+		{
+#ifdef USE_ASSERT_CHECKING
+			ShmemSegDesc *segDesc = &seg_registry[seg_id];
+
+			Assert(seg_id < num_seg_registrations - 1);
+			Assert(strcmp(segDesc->name, registry[i]->name) == 0);
+			Assert(segDesc->init_size >= registry[i]->size + registry[i]->extra_size);
+			Assert(segDesc->max_size >= registry[i]->max_size);
+#endif
+			struct_seg_id = seg_id++;
+
+		}
+		else
+			struct_seg_id = main_seg_id;
+
+		structPtr = ShmemAllocRaw(&Segments[struct_seg_id], registry[i]->size, &allocated_size);
 		if (structPtr == NULL)
 		{
 			/* out of memory; remove the failed ShmemIndex entry */
@@ -205,6 +370,7 @@ ShmemInitRegistered(void)
 		result->size = registry[i]->size;
 		result->allocated_size = allocated_size;
 		result->location = structPtr;
+		result->segment_id = struct_seg_id;
 
 		*(registry[i]->ptr) = structPtr;
 		if (registry[i]->init_fn)
@@ -212,13 +378,101 @@ ShmemInitRegistered(void)
 	}
 }
 
+#ifndef EXEC_BACKEND
+void
+ShmemResizeRegistered(const char *name, Size new_size)
+{
+	ShmemIndexEnt *result;
+	bool found;
+	/* Cachealign new size */
+	Size allocated_size = CACHELINEALIGN(new_size);
+	ShmemSegment *segment;
+	ShmemAllocatorData *ShmemAllocator;
+	Size		newFree;
+	PGShmemHeader *shmhdr;
+	Size new_seg_size;
+
+	/* look it up in the shmem index */
+	LWLockAcquire(ShmemIndexLock, LW_EXCLUSIVE);
+	result = (ShmemIndexEnt *)
+		hash_search(ShmemIndex, name, HASH_FIND, &found);
+	if (!found)
+		elog(ERROR, "shmem struct \"%s\" is not initialized", name);
+
+	Assert(result);
+
+	/*
+	 * TODO:
+	 * In order to use num_seg_registrations here in EXEC_BACKEND case, it needs
+	 * to be passed through launch_backend() to the backend process which will
+	 * call this function.
+	 */
+	Assert(result->segment_id >= 0 && result->segment_id < num_seg_registrations);
+
+	/* Structures in the main segment should are fixed sized. */
+	Assert(result->segment_id != main_seg_id);
+
+	segment = &Segments[result->segment_id];
+	ShmemAllocator = segment->ShmemAllocator;
+	shmhdr = segment->ShmemSegHdr;
+
+	/*
+	 * The resizable structures are placed in their own segment after the
+	 * header and the spinlock. Hence the memory location where they end are
+	 * same as the start of free memory in that segment.
+	 */
+	Assert((char *) segment->ShmemBase + ShmemAllocator->free_offset == (char *) result->location + result->allocated_size);
+	newFree = ShmemAllocator->free_offset + (allocated_size - result->allocated_size);
+	new_seg_size = shmhdr->totalsize + (allocated_size - result->allocated_size);
+	new_seg_size = align_size(new_seg_size, 8192);
+	Assert(new_seg_size >= 0);
+	if (allocated_size == result->allocated_size)
+	{
+		result->size = new_size;
+		/* No need to resize if the existing allocated size is sufficient */
+		LWLockRelease(ShmemIndexLock);
+		return;
+	}
+	/* TODO: Can we call PGSharedMemoryResize while holding the LWLock? */
+	else if (allocated_size > result->allocated_size)
+	{
+		/* We need to expand the shared memory segment before expanding the structure. */
+		PGSharedMemoryResize(result->segment_id, segment->ShmemSegmentName, new_seg_size);
+		 /* Update the available size in the shmem index entry. */
+		SpinLockAcquire(&ShmemAllocator->shmem_lock);
+		ShmemAllocator->free_offset = newFree;
+		SpinLockRelease(&ShmemAllocator->shmem_lock);
+		result->size = new_size;
+		result->allocated_size = allocated_size;
+	}
+	else
+	{
+		SpinLockAcquire(&ShmemAllocator->shmem_lock);
+		ShmemAllocator->free_offset = newFree;
+		SpinLockRelease(&ShmemAllocator->shmem_lock);
+		result->size = new_size;
+		result->allocated_size = allocated_size;
+		/* Shrink the shared structure before shrinking the shared memory segment. */
+		PGSharedMemoryResize(result->segment_id, segment->ShmemSegmentName, new_seg_size);
+	}
+
+	/*
+	 * End of the structure should still be same as the start of free memory
+	 * in the segment
+	 */
+	Assert((char *) segment->ShmemBase + ShmemAllocator->free_offset == (char *) result->location + result->allocated_size);
+
+	LWLockRelease(ShmemIndexLock);
+}
+#endif
+
 #ifdef EXEC_BACKEND
 void
 ShmemAttachRegistered(void)
 {
 	/* Must be initializing a (non-standalone) backend */
 	Assert(IsUnderPostmaster);
-	Assert(ShmemAllocator->index != NULL);
+	Assert(ShmemIndex != NULL);
 
 	LWLockAcquire(ShmemIndexLock, LW_EXCLUSIVE);
 
@@ -251,7 +505,7 @@ ShmemAttachRegistered(void)
 #endif
 
 /*
- *	InitShmemAllocator() --- set up basic pointers to shared memory.
+ *	InitShmemAllocator() --- set up basic pointers to shared memory in the given segment.
  *
  * Called at postmaster or stand-alone backend startup, to initialize the
  * allocator's data structure in the shared memory segment.  In EXEC_BACKEND,
@@ -259,9 +513,13 @@ ShmemAttachRegistered(void)
  * memory areas.
  */
 void
-InitShmemAllocator(PGShmemHeader *seghdr)
+InitShmemAllocator(int segment_id, const char *segname, PGShmemHeader *seghdr)
 {
+	ShmemSegment *segment;
+
 	Assert(seghdr != NULL);
+	Assert(segment_id >= 0 && segment_id < NUM_MEMORY_MAPPINGS);
+	segment = &Segments[segment_id];
 
 	/*
 	 * We assume the pointer and offset are MAXALIGN.  Not a hard requirement,
@@ -270,23 +528,24 @@ InitShmemAllocator(PGShmemHeader *seghdr)
 	Assert(seghdr == (void *) MAXALIGN(seghdr));
 	Assert(seghdr->content_offset == MAXALIGN(seghdr->content_offset));
 
-	ShmemSegHdr = seghdr;
-	ShmemBase = seghdr;
-	ShmemEnd = (char *) ShmemBase + seghdr->totalsize;
+	segment->ShmemSegHdr = seghdr;
+	segment->ShmemBase = seghdr;
+	segment->ShmemSegmentName = segname;
 
 #ifndef EXEC_BACKEND
 	Assert(!IsUnderPostmaster);
 #endif
 	if (IsUnderPostmaster)
 	{
-		PGShmemHeader *shmhdr = ShmemSegHdr;
+		PGShmemHeader *shmhdr = segment->ShmemSegHdr;
+
+		segment->ShmemAllocator = (ShmemAllocatorData *) ((char *) shmhdr + shmhdr->content_offset);
 
-		ShmemAllocator = (ShmemAllocatorData *) ((char *) shmhdr + shmhdr->content_offset);
-		ShmemLock = &ShmemAllocator->shmem_lock;
 	}
 	else
 	{
 		Size		offset;
+		ShmemAllocatorData *ShmemAllocator;
 
 		/*
 		 * Allocations after this point should go through ShmemAlloc, which
@@ -303,47 +562,65 @@ InitShmemAllocator(PGShmemHeader *seghdr)
 		ShmemAllocator = (ShmemAllocatorData *) ((char *) seghdr + seghdr->content_offset);
 
 		SpinLockInit(&ShmemAllocator->shmem_lock);
-		ShmemLock = &ShmemAllocator->shmem_lock;
 		ShmemAllocator->free_offset = offset;
 		/* ShmemIndex can't be set up yet (need LWLocks first) */
 		ShmemAllocator->index = NULL;
+
+		segment->ShmemAllocator = ShmemAllocator;
 		ShmemIndex = (HTAB *) NULL;
 	}
+
+	/* Expose ShmemLock from the main segment for allocating LWLock tranches. */
+	if (segment_id == main_seg_id)
+		ShmemLock = &segment->ShmemAllocator->shmem_lock;
 }
 
 /*
- * ShmemAlloc -- allocate max-aligned chunk from shared memory
+ * ShmemAlloc --
+ * 		allocate max-aligned chunk from given shared memory segment
  *
  * Throws error if request cannot be satisfied.
  *
- * Assumes ShmemLock and ShmemSegHdr are initialized.
+ * Assumes ShmemLock and ShmemSegHdr in the given segment are initialized.
  */
-void *
-ShmemAlloc(Size size)
+static void *
+ShmemAllocInternal(ShmemSegment *segment, Size size)
 {
 	void	   *newSpace;
 	Size		allocated_size;
 
-	newSpace = ShmemAllocRaw(size, &allocated_size);
+	newSpace = ShmemAllocRaw(segment, size, &allocated_size);
 	if (!newSpace)
 		ereport(ERROR,
 				(errcode(ERRCODE_OUT_OF_MEMORY),
-				 errmsg("out of shared memory (%zu bytes requested)",
-						size)));
+				 errmsg("out of shared memory in segment %s (%zu bytes requested)",
+						segment->ShmemSegmentName, size)));
 	return newSpace;
 }
 
+void *
+ShmemAlloc(Size size)
+{
+	return ShmemAllocInternal(&Segments[main_seg_id], size);
+}
+
 /*
  * ShmemAllocNoError -- allocate max-aligned chunk from shared memory
  *
  * As ShmemAlloc, but returns NULL if out of space, rather than erroring.
+ *
+ * This is used as a memory allocation callback for hash tables created using
+ * dynahash.c APIs. It's a bit of work to make the callback specify the segment
+ * where to allocate the memory. For now, there is no need to create shared
+ * memory hash tables in shared memory segments other than main memory segment.
+ * Hence we do not support segment_id parameter here.
  */
 void *
 ShmemAllocNoError(Size size)
 {
 	Size		allocated_size;
 
-	return ShmemAllocRaw(size, &allocated_size);
+	return ShmemAllocRaw(&Segments[main_seg_id], size, &allocated_size);
 }
 
 /*
@@ -353,11 +630,13 @@ ShmemAllocNoError(Size size)
  * be equal to the number requested plus any padding we choose to add.
  */
 static void *
-ShmemAllocRaw(Size size, Size *allocated_size)
+ShmemAllocRaw(ShmemSegment *segment, Size size, Size *allocated_size)
 {
 	Size		newStart;
 	Size		newFree;
 	void	   *newSpace;
+	PGShmemHeader *shmhdr = segment->ShmemSegHdr;
+	ShmemAllocatorData *ShmemAllocator = segment->ShmemAllocator;
 
 	/*
 	 * Ensure all space is adequately aligned.  We used to only MAXALIGN this
@@ -373,22 +652,21 @@ ShmemAllocRaw(Size size, Size *allocated_size)
 	size = CACHELINEALIGN(size);
 	*allocated_size = size;
 
-	Assert(ShmemSegHdr != NULL);
+	Assert(shmhdr != NULL);
 
-	SpinLockAcquire(ShmemLock);
+	SpinLockAcquire(&ShmemAllocator->shmem_lock);
 
 	newStart = ShmemAllocator->free_offset;
-
 	newFree = newStart + size;
-	if (newFree <= ShmemSegHdr->totalsize)
+	if (newFree <= shmhdr->totalsize)
 	{
-		newSpace = (char *) ShmemBase + newStart;
+		newSpace = (char *) segment->ShmemBase + newStart;
 		ShmemAllocator->free_offset = newFree;
 	}
 	else
 		newSpace = NULL;
 
-	SpinLockRelease(ShmemLock);
+	SpinLockRelease(&ShmemAllocator->shmem_lock);
 
 	/* note this assert is okay with newSpace == NULL */
 	Assert(newSpace == (void *) CACHELINEALIGN(newSpace));
@@ -396,15 +674,49 @@ ShmemAllocRaw(Size size, Size *allocated_size)
 	return newSpace;
 }
 
+bool
+ShmemStructAddrIsValid(const char *name, const void *addr)
+{
+	ShmemIndexEnt *result;
+	bool found;
+	int segment_id;
+
+	LWLockAcquire(ShmemIndexLock, LW_SHARED);
+	result = (ShmemIndexEnt *)
+		hash_search(ShmemIndex, name, HASH_FIND, &found);
+	if (!found)
+	{
+		LWLockRelease(ShmemIndexLock);
+		return false;
+	}
+	segment_id = result->segment_id;
+	LWLockRelease(ShmemIndexLock);
+
+	/*
+	 * TODO: We could do a better job here: Make sure that the given address is
+	 * within the memory allocated for the given structure.
+	 */
+	return ShmemAddrIsValid(segment_id, addr);
+}
+
 /*
- * ShmemAddrIsValid -- test if an address refers to shared memory
+ * ShmemAddrIsValid
+ * 		test if an address refers to the given shared memory segment.
  *
  * Returns true if the pointer points within the shared memory segment.
  */
-bool
-ShmemAddrIsValid(const void *addr)
+static bool
+ShmemAddrIsValid(int segment_id, const void *addr)
 {
-	return (addr >= ShmemBase) && (addr < ShmemEnd);
+	ShmemSegment *segment;
+	void	   *shmemEnd;
+
+	Assert(segment_id >= 0 && segment_id < NUM_MEMORY_MAPPINGS);
+
+	segment = &Segments[segment_id];
+	shmemEnd = (char *) segment->ShmemBase + segment->ShmemSegHdr->totalsize;
+
+	return (addr >= segment->ShmemBase) && (addr < shmemEnd);
 }
 
 /*
@@ -556,6 +868,9 @@ shmem_hash_attach(void *arg)
  * Note: before Postgres 9.0, this function returned NULL for some failure
  * cases.  Now, it always throws error instead, so callers need not check
  * for NULL.
+ *
+ * See prologue of ShmemAllocNoError for explanation about lack of segment_id
+ * parameter.
  */
 HTAB *
 ShmemInitHash(const char *name,		/* table string name for shmem index */
@@ -579,9 +894,9 @@ ShmemInitHash(const char *name,		/* table string name for shmem index */
 	hash_flags |= HASH_SHARED_MEM | HASH_ALLOC | HASH_DIRSIZE;
 
 	/* look it up in the shmem index */
-	location = ShmemInitStruct(name,
-							   hash_get_shared_size(infoP, hash_flags),
-							   &found);
+	location = ShmemInitStructInSegment(name,
+										hash_get_shared_size(infoP, hash_flags),
+										&found, main_seg_id);
 
 	/*
 	 * if it already exists, attach to it rather than allocate and initialize
@@ -611,18 +926,38 @@ ShmemInitHash(const char *name,		/* table string name for shmem index */
  *	Note: before Postgres 9.0, this function returned NULL for some failure
  *	cases.  Now, it always throws error instead, so callers need not check
  *	for NULL.
+ *
+ * TODO: Once we use ShmemRegisterStruct for all shared memory structures, we
+ * won't need this function anymore; not at least in this form. And possibly next as well.
  */
 void *
 ShmemInitStruct(const char *name, Size size, bool *foundPtr)
+{
+	return ShmemInitStructInSegment(name, size, foundPtr, main_seg_id);
+}
+
+void *
+ShmemInitStructInSegment(const char *name, Size size, bool *foundPtr, int segment_id)
 {
 	ShmemIndexEnt *result;
 	void	   *structPtr;
+	ShmemSegment *segment;
+
+	Assert(segment_id >= 0 && segment_id < num_registrations);
+
+	segment = &Segments[segment_id];
 
 	LWLockAcquire(ShmemIndexLock, LW_EXCLUSIVE);
 
 	if (!ShmemIndex)
 	{
-		/* Must be trying to create/attach to ShmemIndex itself */
+		ShmemAllocatorData *ShmemAllocator = segment->ShmemAllocator;
+
+		/*
+		 * Must be trying to create/attach to ShmemIndex itself in the main
+		 * shared memory segment.
+		 */
+		Assert(segment_id == main_seg_id);
 		Assert(strcmp(name, "ShmemIndex") == 0);
 
 		if (IsUnderPostmaster)
@@ -643,7 +978,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
 			 * process can be accessing shared memory yet.
 			 */
 			Assert(ShmemAllocator->index == NULL);
-			structPtr = ShmemAlloc(size);
+			structPtr = ShmemAllocInternal(segment, size);
 			ShmemAllocator->index = structPtr;
 			*foundPtr = false;
 		}
@@ -660,8 +995,8 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
 		LWLockRelease(ShmemIndexLock);
 		ereport(ERROR,
 				(errcode(ERRCODE_OUT_OF_MEMORY),
-				 errmsg("could not create ShmemIndex entry for data structure \"%s\"",
-						name)));
+				 errmsg("could not create ShmemIndex entry for data structure \"%s\" in segment %d",
+						name, segment_id)));
 	}
 
 	if (*foundPtr)
@@ -686,7 +1021,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
 		Size		allocated_size;
 
 		/* It isn't in the table yet. allocate and initialize it */
-		structPtr = ShmemAllocRaw(size, &allocated_size);
+		structPtr = ShmemAllocRaw(segment, size, &allocated_size);
 		if (structPtr == NULL)
 		{
 			/* out of memory; remove the failed ShmemIndex entry */
@@ -701,11 +1036,12 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
 		result->size = size;
 		result->allocated_size = allocated_size;
 		result->location = structPtr;
+		result->segment_id = segment_id;
 	}
 
 	LWLockRelease(ShmemIndexLock);
 
-	Assert(ShmemAddrIsValid(structPtr));
+	Assert(ShmemAddrIsValid(segment_id, structPtr));
 
 	Assert(structPtr == (void *) CACHELINEALIGN(structPtr));
 
@@ -743,18 +1079,35 @@ mul_size(Size s1, Size s2)
 	return result;
 }
 
+/*
+ * Round up the given size to the next multiple of the given alignment, checking
+ * for overflow.
+ */
+Size
+align_size(Size size, Size alignment)
+{
+
+	Assert(alignment != 0);
+
+	if (size % alignment == 0)
+		return size;
+	return add_size(size, alignment - (size % alignment));
+}
+
 /* SQL SRF showing allocated shared memory */
 Datum
 pg_get_shmem_allocations(PG_FUNCTION_ARGS)
 {
-#define PG_GET_SHMEM_SIZES_COLS 4
+#define PG_GET_SHMEM_SIZES_COLS 5
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
 	HASH_SEQ_STATUS hstat;
 	ShmemIndexEnt *ent;
-	Size		named_allocated = 0;
+	Size		*named_allocated;
 	Datum		values[PG_GET_SHMEM_SIZES_COLS];
 	bool		nulls[PG_GET_SHMEM_SIZES_COLS];
+	int			i;
 
+	named_allocated = palloc0(sizeof(Size) * num_seg_registrations);
 	InitMaterializedSRF(fcinfo, 0);
 
 	LWLockAcquire(ShmemIndexLock, LW_SHARED);
@@ -765,30 +1118,49 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS)
 	memset(nulls, 0, sizeof(nulls));
 	while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
 	{
+		ShmemSegment *segment = &Segments[ent->segment_id];
+		PGShmemHeader *shmhdr = segment->ShmemSegHdr;
+
 		values[0] = CStringGetTextDatum(ent->key);
-		values[1] = Int64GetDatum((char *) ent->location - (char *) ShmemSegHdr);
-		values[2] = Int64GetDatum(ent->size);
-		values[3] = Int64GetDatum(ent->allocated_size);
-		named_allocated += ent->allocated_size;
+		values[1] = CStringGetTextDatum(segment->ShmemSegmentName);
+		values[2] = Int64GetDatum((char *) ent->location - (char *) shmhdr);
+		values[3] = Int64GetDatum(ent->size);
+		values[4] = Int64GetDatum(ent->allocated_size);
+		named_allocated[ent->segment_id] += ent->allocated_size;
 
 		tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
 							 values, nulls);
 	}
 
 	/* output shared memory allocated but not counted via the shmem index */
-	values[0] = CStringGetTextDatum("<anonymous>");
-	nulls[1] = true;
-	values[2] = Int64GetDatum(ShmemAllocator->free_offset - named_allocated);
-	values[3] = values[2];
-	tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
+	for (i = 0; i < num_seg_registrations; i++)
+	{
+		ShmemSegment *segment = &Segments[i];
+		ShmemAllocatorData *ShmemAllocator = segment->ShmemAllocator;
+
+		values[0] = CStringGetTextDatum("<anonymous>");
+		values[1] = CStringGetTextDatum(segment->ShmemSegmentName);
+		nulls[2] = true;
+		values[3] = Int64GetDatum(ShmemAllocator->free_offset - named_allocated[i]);
+		values[4] = values[3];
+		tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
+	}
 
 	/* output as-of-yet unused shared memory */
-	nulls[0] = true;
-	values[1] = Int64GetDatum(ShmemAllocator->free_offset);
-	nulls[1] = false;
-	values[2] = Int64GetDatum(ShmemSegHdr->totalsize - ShmemAllocator->free_offset);
-	values[3] = values[2];
-	tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
+	memset(nulls, 0, sizeof(nulls));
+	for (i = 0; i < num_seg_registrations; i++)
+	{
+		ShmemSegment *segment = &Segments[i];
+		PGShmemHeader *shmhdr = segment->ShmemSegHdr;
+		ShmemAllocatorData *ShmemAllocator = segment->ShmemAllocator;
+
+		nulls[0] = true;
+		values[1] = CStringGetTextDatum(segment->ShmemSegmentName);
+		values[2] = Int64GetDatum(ShmemAllocator->free_offset);
+		values[3] = Int64GetDatum(shmhdr->totalsize - ShmemAllocator->free_offset);
+		values[4] = values[3];
+		tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
+	}
 
 	LWLockRelease(ShmemIndexLock);
 
@@ -813,7 +1185,7 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
 	Size		os_page_size;
 	void	  **page_ptrs;
 	int		   *pages_status;
-	uint64		shm_total_page_count,
+	uint64		shm_total_page_count = 0,
 				shm_ent_page_count,
 				max_nodes;
 	Size	   *nodes;
@@ -848,7 +1220,13 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
 	 * this is not very likely, and moreover we have more entries, each of
 	 * them using only fraction of the total pages.
 	 */
-	shm_total_page_count = (ShmemSegHdr->totalsize / os_page_size) + 1;
+	for (int segment = 0; segment < num_seg_registrations; segment++)
+	{
+		PGShmemHeader *shmhdr = Segments[segment].ShmemSegHdr;
+
+		shm_total_page_count += (shmhdr->totalsize / os_page_size) + 1;
+	}
+
 	page_ptrs = palloc0_array(void *, shm_total_page_count);
 	pages_status = palloc_array(int, shm_total_page_count);
 
@@ -989,7 +1367,7 @@ pg_get_shmem_pagesize(void)
 	Assert(huge_pages_status != HUGE_PAGES_UNKNOWN);
 
 	if (huge_pages_status == HUGE_PAGES_ON)
-		GetHugePageSize(&os_page_size, NULL);
+		GetHugePageSize(&os_page_size, NULL, NULL);
 
 	return os_page_size;
 }
@@ -999,3 +1377,45 @@ pg_numa_available(PG_FUNCTION_ARGS)
 {
 	PG_RETURN_BOOL(pg_numa_init() != -1);
 }
+
+/* SQL SRF showing shared memory segments */
+Datum
+pg_get_shmem_segments(PG_FUNCTION_ARGS)
+{
+#define PG_GET_SHMEM_SEGS_COLS 5
+	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+	Datum		values[PG_GET_SHMEM_SEGS_COLS];
+	bool		nulls[PG_GET_SHMEM_SEGS_COLS];
+	int			i;
+
+	InitMaterializedSRF(fcinfo, 0);
+
+	/* output all allocated entries */
+	for (i = 0; i < num_seg_registrations; i++)
+	{
+		ShmemSegment *segment = &Segments[i];
+		PGShmemHeader *shmhdr = segment->ShmemSegHdr;
+		ShmemAllocatorData *ShmemAllocator = segment->ShmemAllocator;
+		int			j;
+
+		if (shmhdr == NULL)
+		{
+			for (j = 0; j < PG_GET_SHMEM_SEGS_COLS; j++)
+				nulls[j] = true;
+		}
+		else
+		{
+			memset(nulls, 0, sizeof(nulls));
+			values[0] = Int32GetDatum(i);
+			values[1] = CStringGetTextDatum(segment->ShmemSegmentName);
+			values[2] = Int64GetDatum(shmhdr->totalsize);
+			values[3] = Int64GetDatum(ShmemAllocator->free_offset);
+			values[4] = Int64GetDatum(shmhdr->reservedsize);
+		}
+
+		tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
+							 values, nulls);
+	}
+
+	return (Datum) 0;
+}
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index fe75ead3501..8d223bed327 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -595,7 +595,7 @@ CreatePredXact(void)
 static void
 ReleasePredXact(SERIALIZABLEXACT *sxact)
 {
-	Assert(ShmemAddrIsValid(sxact));
+	Assert(ShmemStructAddrIsValid("PredXactList", sxact));
 
 	dlist_delete(&sxact->xactLink);
 	dlist_push_tail(&PredXact->availableList, &sxact->xactLink);
diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat
index 83f6501df38..4b27f2a245e 100644
--- a/src/include/catalog/pg_proc.dat
+++ b/src/include/catalog/pg_proc.dat
@@ -8592,8 +8592,8 @@
 { oid => '5052', descr => 'allocations from the main shared memory segment',
   proname => 'pg_get_shmem_allocations', prorows => '50', proretset => 't',
   provolatile => 'v', prorettype => 'record', proargtypes => '',
-  proallargtypes => '{text,int8,int8,int8}', proargmodes => '{o,o,o,o}',
-  proargnames => '{name,off,size,allocated_size}',
+  proallargtypes => '{text,text,int8,int8,int8}', proargmodes => '{o,o,o,o,o}',
+  proargnames => '{name,segment,off,size,allocated_size}',
   prosrc => 'pg_get_shmem_allocations' },
 
 { oid => '4099', descr => 'Is NUMA support available?',
@@ -8616,6 +8616,14 @@
   proargmodes => '{o,o,o}', proargnames => '{name,type,size}',
   prosrc => 'pg_get_dsm_registry_allocations' },
 
+# shared memory segments
+{ oid => '5101', descr => 'shared memory segments',
+  proname => 'pg_get_shmem_segments', prorows => '6', proretset => 't',
+  provolatile => 'v', prorettype => 'record', proargtypes => '',
+  proallargtypes => '{int4,text,int8,int8,int8}', proargmodes => '{o,o,o,o,o}',
+  proargnames => '{id,name,size,freeoffset,reserved_size}',
+  prosrc => 'pg_get_shmem_segments' },
+
 # memory context of local backend
 { oid => '2282',
   descr => 'information about all memory contexts of local backend',
diff --git a/src/include/storage/pg_shmem.h b/src/include/storage/pg_shmem.h
index 10c7b065861..d0b8910e0ad 100644
--- a/src/include/storage/pg_shmem.h
+++ b/src/include/storage/pg_shmem.h
@@ -31,7 +31,14 @@ typedef struct PGShmemHeader	/* standard header for all Postgres shmem */
 	int32		magic;			/* magic # to identify Postgres segments */
 #define PGShmemMagic  679834894
 	pid_t		creatorPID;		/* PID of creating process (set but unread) */
+
+	/*
+	 * TODO: We might have to rename these fields to allocSize (for amount of
+	 * memory allocated currently in this segment), maxSize (for maximum size
+	 * the segment can grow to.)
+	 */
 	Size		totalsize;		/* total size of segment */
+	Size		reservedsize;	/* Size of the reserved mapping */
 	Size		content_offset; /* offset to the data, i.e. size of this
 								 * header */
 	dsm_handle	dsm_control;	/* ID of dynamic shared memory control seg */
@@ -41,6 +48,33 @@ typedef struct PGShmemHeader	/* standard header for all Postgres shmem */
 #endif
 } PGShmemHeader;
 
+/*
+ * Information about the shared memory segment that is required to be passed
+ * from the Postmaster to each backend.
+ */
+typedef struct PGUsedShmemInfo
+{
+	void	   *UsedShmemSegAddr;	/* SysV shared memory for the header */
+#ifndef WIN32
+	unsigned long UsedShmemSegID;	/* IPC key */
+#else
+	void	   *ShmemProtectiveRegion;	/* Protective region for Windows
+										 * shared memory */
+	HANDLE		UsedShmemSegID;
+#endif
+} PGUsedShmemInfo;
+
+/*
+ * Resizable shared memory structures are allocated in their own shared memory
+ * segments. For now we allocate arrays to hold properties of each segment. This
+ * macro is maximum number of resizable shared memory segments that we support.
+ *
+ * TODO: We should find a way to change this on-demand.
+ */
+#define NUM_MEMORY_MAPPINGS 4
+
+extern PGDLLIMPORT PGUsedShmemInfo UsedShmemInfo[NUM_MEMORY_MAPPINGS];
+
 /* GUC variables */
 extern PGDLLIMPORT int shared_memory_type;
 extern PGDLLIMPORT int huge_pages;
@@ -64,14 +98,6 @@ typedef enum
 	SHMEM_TYPE_MMAP,
 }			PGShmemType;
 
-#ifndef WIN32
-extern PGDLLIMPORT unsigned long UsedShmemSegID;
-#else
-extern PGDLLIMPORT HANDLE UsedShmemSegID;
-extern PGDLLIMPORT void *ShmemProtectiveRegion;
-#endif
-extern PGDLLIMPORT void *UsedShmemSegAddr;
-
 #if !defined(WIN32) && !defined(EXEC_BACKEND)
 #define DEFAULT_SHARED_MEMORY_TYPE SHMEM_TYPE_MMAP
 #elif !defined(WIN32)
@@ -85,10 +111,13 @@ extern void PGSharedMemoryReAttach(void);
 extern void PGSharedMemoryNoReAttach(void);
 #endif
 
-extern PGShmemHeader *PGSharedMemoryCreate(Size size,
+extern PGShmemHeader *PGSharedMemoryCreate(int segment_id, const char *name,  Size init_size, Size max_size,
 										   PGShmemHeader **shim);
 extern bool PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2);
 extern void PGSharedMemoryDetach(void);
-extern void GetHugePageSize(Size *hugepagesize, int *mmap_flags);
+extern void GetHugePageSize(Size *hugepagesize, int *mmap_flags,
+							int *memfd_flags);
+extern void PrepareHugePages(void);
+extern bool PGSharedMemoryResize(int segment_id, const char *name, Size new_size);
 
 #endif							/* PG_SHMEM_H */
diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h
index cbd4ef8d03f..d34553cfd7b 100644
--- a/src/include/storage/shmem.h
+++ b/src/include/storage/shmem.h
@@ -50,6 +50,9 @@ typedef struct ShmemStructDesc
 	 */
 	size_t		extra_size;
 
+	/* Maximum size this structure can grow upto in future. The memory is not allocated right away but the corresponding address space is allocated so that memory can be mapped to it when the structure grows. Typically should be used for structures like buffer blocks which need contiguous and large memory. */
+	size_t		max_size;
+
 	/* Pointer to the variable to which pointer to this shared memory area is assigned after allocation. */
 	void	   **ptr;
 } ShmemStructDesc;
@@ -76,14 +79,19 @@ typedef struct ShmemHashDesc
 extern PGDLLIMPORT slock_t *ShmemLock;
 typedef struct PGShmemHeader PGShmemHeader; /* avoid including
 											 * storage/pg_shmem.h here */
-extern void InitShmemAllocator(PGShmemHeader *seghdr);
+extern void InitShmemAllocator(int segment_id, const char *segname, PGShmemHeader *seghdr);
 extern void *ShmemAlloc(Size size);
 extern void *ShmemAllocNoError(Size size);
-extern bool ShmemAddrIsValid(const void *addr);
+extern bool ShmemStructAddrIsValid(const char *name, const void *addr);
 extern void InitShmemIndex(void);
 
 extern void ShmemRegisterHash(ShmemHashDesc *desc, HASHCTL *infoP, int hash_flags);
 extern void ShmemRegisterStruct(ShmemStructDesc *desc);
+extern void ShmemRegisterSegment(const char *name, Size init_size, Size max_size);
+extern PGShmemHeader *ShmemCreateRegisteredSegments(void);
+#ifndef EXEC_BACKEND
+extern void ShmemResizeRegistered(const char *name, Size new_size);
+#endif
 
 /* Legacy functions */
 extern HTAB *ShmemInitHash(const char *name, int64 init_size, int64 max_size,
@@ -96,6 +104,7 @@ extern void ShmemAttachRegistered(void);
 
 extern Size add_size(Size s1, Size s2);
 extern Size mul_size(Size s1, Size s2);
+extern Size align_size(Size size, Size align);
 
 extern PGDLLIMPORT Size pg_get_shmem_pagesize(void);
 
@@ -115,6 +124,7 @@ typedef struct
 	void	   *location;		/* location in shared mem */
 	Size		size;			/* # bytes requested for the structure */
 	Size		allocated_size; /* # bytes actually allocated */
+	int			segment_id;		/* segment in which the structure is allocated */
 } ShmemIndexEnt;
 
 #endif							/* SHMEM_H */
diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile
index 44c7163c1cd..a5df6edae18 100644
--- a/src/test/modules/Makefile
+++ b/src/test/modules/Makefile
@@ -14,6 +14,7 @@ SUBDIRS = \
 		  libpq_pipeline \
 		  oauth_validator \
 		  plsample \
+		  resizable_shmem \
 		  spgist_name_ops \
 		  test_aio \
 		  test_binaryheap \
diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build
index 2634a519935..961bb62759d 100644
--- a/src/test/modules/meson.build
+++ b/src/test/modules/meson.build
@@ -13,6 +13,7 @@ subdir('libpq_pipeline')
 subdir('nbtree')
 subdir('oauth_validator')
 subdir('plsample')
+subdir('resizable_shmem')
 subdir('spgist_name_ops')
 subdir('ssl_passphrase_callback')
 subdir('test_aio')
diff --git a/src/test/modules/resizable_shmem/Makefile b/src/test/modules/resizable_shmem/Makefile
new file mode 100644
index 00000000000..ad2f040b2f0
--- /dev/null
+++ b/src/test/modules/resizable_shmem/Makefile
@@ -0,0 +1,23 @@
+# src/test/modules/resizable_shmem/Makefile
+
+MODULES = resizable_shmem
+ISOLATION = resizable_shmem
+
+EXTENSION = resizable_shmem
+DATA = resizable_shmem--1.0.sql
+PGFILEDESC = "resizable_shmem - test module for resizable shared memory"
+
+# This test requires library to be loaded at the server start, so disable
+# installcheck
+NO_INSTALLCHECK = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/resizable_shmem
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/src/makefiles/pgxs.mk
+endif
diff --git a/src/test/modules/resizable_shmem/expected/resizable_shmem.out b/src/test/modules/resizable_shmem/expected/resizable_shmem.out
new file mode 100644
index 00000000000..7827e7ad450
--- /dev/null
+++ b/src/test/modules/resizable_shmem/expected/resizable_shmem.out
@@ -0,0 +1,89 @@
+Parsed test spec with 3 sessions
+
+starting permutation: zia zis s1w2048 s2r2048 zs8192 zia zis s2w8192 s1r8192 zs6144 zia zis s1w6144 s2r6144
+step zia: SELECT name, size, allocated_size FROM pg_shmem_allocations WHERE name = 'resizable_shmem';
+name           |size|allocated_size
+---------------+----+--------------
+resizable_shmem|8196|          8320
+(1 row)
+
+step zis: SELECT name, size, freeoffset, reserved_size FROM pg_shmem_segments WHERE name = 'resizable_shmem';
+name           | size|freeoffset|reserved_size
+---------------+-----+----------+-------------
+resizable_shmem|16384|      8448|        40960
+(1 row)
+
+step s1w2048: SELECT resizable_shmem_write(0, 100), resizable_shmem_write(2047, 200);
+resizable_shmem_write|resizable_shmem_write
+---------------------+---------------------
+                     |                     
+(1 row)
+
+step s2r2048: SELECT resizable_shmem_read(0), resizable_shmem_read(2047);
+resizable_shmem_read|resizable_shmem_read
+--------------------+--------------------
+                 100|                 200
+(1 row)
+
+resizer: NOTICE:  resized to 8192 entries (32772 bytes, was 2048 entries)
+step zs8192: SELECT resizable_shmem_resize(8192);
+resizable_shmem_resize
+----------------------
+                      
+(1 row)
+
+step zia: SELECT name, size, allocated_size FROM pg_shmem_allocations WHERE name = 'resizable_shmem';
+name           | size|allocated_size
+---------------+-----+--------------
+resizable_shmem|32772|         32896
+(1 row)
+
+step zis: SELECT name, size, freeoffset, reserved_size FROM pg_shmem_segments WHERE name = 'resizable_shmem';
+name           | size|freeoffset|reserved_size
+---------------+-----+----------+-------------
+resizable_shmem|40960|     33024|        40960
+(1 row)
+
+step s2w8192: SELECT resizable_shmem_write(8191, 500), resizable_shmem_write(2048, 300);
+resizable_shmem_write|resizable_shmem_write
+---------------------+---------------------
+                     |                     
+(1 row)
+
+step s1r8192: SELECT resizable_shmem_read(0), resizable_shmem_read(2047), resizable_shmem_read(2048), resizable_shmem_read(8191);
+resizable_shmem_read|resizable_shmem_read|resizable_shmem_read|resizable_shmem_read
+--------------------+--------------------+--------------------+--------------------
+                 100|                 200|                 300|                 500
+(1 row)
+
+resizer: NOTICE:  resized to 6144 entries (24580 bytes, was 8192 entries)
+step zs6144: SELECT resizable_shmem_resize(6144);
+resizable_shmem_resize
+----------------------
+                      
+(1 row)
+
+step zia: SELECT name, size, allocated_size FROM pg_shmem_allocations WHERE name = 'resizable_shmem';
+name           | size|allocated_size
+---------------+-----+--------------
+resizable_shmem|24580|         24704
+(1 row)
+
+step zis: SELECT name, size, freeoffset, reserved_size FROM pg_shmem_segments WHERE name = 'resizable_shmem';
+name           | size|freeoffset|reserved_size
+---------------+-----+----------+-------------
+resizable_shmem|32768|     24832|        40960
+(1 row)
+
+step s1w6144: SELECT resizable_shmem_write(6143, 999);
+resizable_shmem_write
+---------------------
+                     
+(1 row)
+
+step s2r6144: SELECT resizable_shmem_read(0), resizable_shmem_read(2047), resizable_shmem_read(2048), resizable_shmem_read(6143);
+resizable_shmem_read|resizable_shmem_read|resizable_shmem_read|resizable_shmem_read
+--------------------+--------------------+--------------------+--------------------
+                 100|                 200|                 300|                 999
+(1 row)
+
diff --git a/src/test/modules/resizable_shmem/meson.build b/src/test/modules/resizable_shmem/meson.build
new file mode 100644
index 00000000000..6e5f6d5caaf
--- /dev/null
+++ b/src/test/modules/resizable_shmem/meson.build
@@ -0,0 +1,37 @@
+# src/test/modules/test_resizable_shmem/meson.build
+
+resizable_shmem_sources = files(
+  'resizable_shmem.c',
+)
+
+if host_system == 'windows'
+  resizable_shmem_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
+    '--NAME', 'resizable_shmem',
+    '--FILEDESC', 'resizable_shmem - test module for resizable shared memory',])
+endif
+
+resizable_shmem = shared_module('resizable_shmem',
+  resizable_shmem_sources,
+  kwargs: pg_test_mod_args,
+)
+test_install_libs += resizable_shmem
+
+test_install_data += files(
+  'resizable_shmem.control',
+  'resizable_shmem--1.0.sql',
+)
+
+tests += {
+  'name': 'resizable_shmem',
+  'sd': meson.current_source_dir(),
+  'bd': meson.current_build_dir(),
+  'isolation': {
+    'specs': [
+      'resizable_shmem',
+    ],
+    'regress_args': ['--temp-config', files('resizable_shmem.conf')],
+    # This test requires library to be loaded at the server start, so disable
+    # installcheck
+    'runningcheck': false,
+  },
+}
diff --git a/src/test/modules/resizable_shmem/resizable_shmem--1.0.sql b/src/test/modules/resizable_shmem/resizable_shmem--1.0.sql
new file mode 100644
index 00000000000..bfc2417abc9
--- /dev/null
+++ b/src/test/modules/resizable_shmem/resizable_shmem--1.0.sql
@@ -0,0 +1,22 @@
+/* src/test/modules/resizable_shmem/resizable_shmem--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION resizable_shmem" to load this file. \quit
+
+-- Function to resize the test structure in the shared memory
+CREATE FUNCTION resizable_shmem_resize(new_entries integer)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+-- Function to write data to the test structure in the shared memory
+CREATE FUNCTION resizable_shmem_write(entry_key integer, entry_value integer)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+-- Function to read data from the test structure in the shared memory
+CREATE FUNCTION resizable_shmem_read(entry_key integer)
+RETURNS integer
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
diff --git a/src/test/modules/resizable_shmem/resizable_shmem.c b/src/test/modules/resizable_shmem/resizable_shmem.c
new file mode 100644
index 00000000000..f28e2efebaf
--- /dev/null
+++ b/src/test/modules/resizable_shmem/resizable_shmem.c
@@ -0,0 +1,201 @@
+/* -------------------------------------------------------------------------
+ *
+ * resizable_shmem.c
+ *		Test module for PostgreSQL's resizable shared memory functionality
+ *
+ * This module demonstrates and tests the resizable shared memory API
+ * provided by shmem.c/shmem.h.
+ *
+ * -------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "funcapi.h"
+#include "miscadmin.h"
+#include "storage/shmem.h"
+#include "storage/spin.h"
+#include "utils/builtins.h"
+#include "utils/guc.h"
+#include "utils/memutils.h"
+#include "utils/timestamp.h"
+
+PG_MODULE_MAGIC;
+
+/* Entry constants */
+#define TEST_INITIAL_ENTRIES	2048			/* Initial number of entries */
+#define TEST_MAX_ENTRIES		8192			/* Maximum number of entries (4x initial) */
+#define TEST_ENTRY_SIZE			sizeof(int32)		/* Size of each entry */
+
+/*
+ * Resizable test data structure stored in shared memory.
+ *
+ * We do not use any locks. The test performs resizing, reads and writes none of
+ * which are concurrent to keep the code and the test simple.
+ */
+typedef struct TestResizableShmemStruct
+{
+	/* Metadata */
+	int32		num_entries;		/* Number of entries that can fit */
+
+	/* Data area - variable size */
+	int32		data[FLEXIBLE_ARRAY_MEMBER];
+} TestResizableShmemStruct;
+
+/* Global pointer to our shared memory structure */
+static TestResizableShmemStruct *resizable_shmem = NULL;
+
+static void resizable_shmem_shmem_init(void *arg);
+
+static ShmemStructDesc testShmemDesc = {
+	.name = "resizable_shmem",
+	.size = offsetof(TestResizableShmemStruct, data) + (TEST_INITIAL_ENTRIES * TEST_ENTRY_SIZE),
+	.max_size = offsetof(TestResizableShmemStruct, data) + (TEST_MAX_ENTRIES * TEST_ENTRY_SIZE),
+	.alignment = MAXIMUM_ALIGNOF,
+	.init_fn = resizable_shmem_shmem_init,
+	.ptr = (void **) &resizable_shmem,
+};
+
+static shmem_request_hook_type prev_shmem_request_hook = NULL;
+
+static void resizable_shmem_request(void);
+
+/* SQL-callable functions */
+PG_FUNCTION_INFO_V1(resizable_shmem_resize);
+PG_FUNCTION_INFO_V1(resizable_shmem_write);
+PG_FUNCTION_INFO_V1(resizable_shmem_read);
+
+/*
+ * Module load callback
+ */
+void
+_PG_init(void)
+{
+	/*
+	 * The module needs to be loaded via shared_preload_libraries to register
+	 * shared memory structure. But if that's not the case, don't throw an error.
+	 * The SQL functions check for existence of the shared memory data structure.
+	 */
+	if (!process_shared_preload_libraries_in_progress)
+		return;
+
+#ifdef EXEC_BACKEND
+	ereport(ERROR,
+			(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+			 errmsg("resizable_shmem is not supported in EXEC_BACKEND builds")));
+#endif
+
+	/* Install hook to register shared memory structure. */
+	prev_shmem_request_hook = shmem_request_hook;
+	shmem_request_hook = resizable_shmem_request;
+}
+
+/*
+ * Module unload callback
+ */
+void
+_PG_fini(void)
+{
+	/* Restore hooks */
+	shmem_request_hook = prev_shmem_request_hook;
+}
+
+/*
+ * Request shared memory resources
+ */
+static void
+resizable_shmem_request(void)
+{
+	if (prev_shmem_request_hook)
+		prev_shmem_request_hook();
+
+	/* Register our resizable shared memory structure */
+	ShmemRegisterStruct(&testShmemDesc);
+}
+
+/*
+ * Initialize shared memory structure
+ */
+static void
+resizable_shmem_shmem_init(void *arg)
+{
+	/*
+	 * Shared memory structure should have been allocated with the requested
+	 * size. Initialize the metadata.
+	 */
+	Assert(resizable_shmem != NULL);
+	resizable_shmem->num_entries = TEST_INITIAL_ENTRIES;
+}
+
+/*
+ * Resize the shared memory structure to accommodate the specified number of
+ * entries.
+ */
+Datum
+resizable_shmem_resize(PG_FUNCTION_ARGS)
+{
+	int32		new_entries = PG_GETARG_INT32(0);
+	Size		new_size;
+	int			old_entries;
+
+	if (!resizable_shmem)
+		ereport(ERROR,
+				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+				 errmsg("resizable_shmem is not initialized")));
+
+	new_size = offsetof(TestResizableShmemStruct, data) + (new_entries * TEST_ENTRY_SIZE);
+	ShmemResizeRegistered(testShmemDesc.name, new_size);
+	resizable_shmem->num_entries = new_entries;
+
+	PG_RETURN_VOID();
+}
+
+/*
+ * Write the given integer value to the specified index in the data array.
+ */
+Datum
+resizable_shmem_write(PG_FUNCTION_ARGS)
+{
+	int32		entry_key = PG_GETARG_INT32(0);
+	int32		entry_value = PG_GETARG_INT32(1);
+
+	if (!resizable_shmem)
+		ereport(ERROR,
+				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+				 errmsg("resizable_shmem is not initialized")));
+
+	if (entry_key < 0 || entry_key >= resizable_shmem->num_entries)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("entry_key %d is out of range (0..%d)", entry_key, resizable_shmem->num_entries - 1)));
+
+	resizable_shmem->data[entry_key] = entry_value;
+
+	PG_RETURN_VOID();
+}
+
+/*
+ * Read an integer value from the specified index in the data array.
+ */
+Datum
+resizable_shmem_read(PG_FUNCTION_ARGS)
+{
+	int32		entry_key = PG_GETARG_INT32(0);
+	int32		value;
+
+	if (resizable_shmem == NULL)
+		ereport(ERROR,
+				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+				 errmsg("resizable_shmem is not initialized")));
+
+	/* Validate entry_key using cached num_entries */
+	if (entry_key < 0 || entry_key >= resizable_shmem->num_entries)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("entry_key %d is out of range (0..%d)", entry_key, resizable_shmem->num_entries - 1)));
+
+	/* Read the integer value */
+	value = resizable_shmem->data[entry_key];
+
+	PG_RETURN_INT32(value);
+}
diff --git a/src/test/modules/resizable_shmem/resizable_shmem.conf b/src/test/modules/resizable_shmem/resizable_shmem.conf
new file mode 100644
index 00000000000..94738e0ac70
--- /dev/null
+++ b/src/test/modules/resizable_shmem/resizable_shmem.conf
@@ -0,0 +1 @@
+shared_preload_libraries = 'resizable_shmem'
diff --git a/src/test/modules/resizable_shmem/resizable_shmem.control b/src/test/modules/resizable_shmem/resizable_shmem.control
new file mode 100644
index 00000000000..1ce2c5ea21a
--- /dev/null
+++ b/src/test/modules/resizable_shmem/resizable_shmem.control
@@ -0,0 +1,5 @@
+# resizable_shmem extension test module
+comment = 'test module for testing resizable shared memory structure functionality'
+default_version = '1.0'
+module_pathname = '$libdir/resizable_shmem'
+relocatable = true
diff --git a/src/test/modules/resizable_shmem/specs/resizable_shmem.spec b/src/test/modules/resizable_shmem/specs/resizable_shmem.spec
new file mode 100644
index 00000000000..f3e7164367c
--- /dev/null
+++ b/src/test/modules/resizable_shmem/specs/resizable_shmem.spec
@@ -0,0 +1,37 @@
+# Test resizable shared memory structure
+#
+# It tests that a resizable shared memory structure can be resized from any
+# backend and that the new sizes are visible to all the backends. It uses
+# isolation test infrastructure so that resizing, reading and writing can be
+# interleaved.
+
+setup
+{
+  CREATE EXTENSION resizable_shmem;
+}
+
+teardown
+{
+  DROP EXTENSION resizable_shmem;
+}
+
+session "session1"
+step s1w2048 { SELECT resizable_shmem_write(0, 100), resizable_shmem_write(2047, 200); }
+step s1r8192 { SELECT resizable_shmem_read(0), resizable_shmem_read(2047), resizable_shmem_read(2048), resizable_shmem_read(8191); }
+step s1w6144 { SELECT resizable_shmem_write(6143, 999); }
+
+session "session2"
+step s2r2048 { SELECT resizable_shmem_read(0), resizable_shmem_read(2047); }
+step s2w8192 { SELECT resizable_shmem_write(8191, 500), resizable_shmem_write(2048, 300);}
+step s2r6144 { SELECT resizable_shmem_read(0), resizable_shmem_read(2047), resizable_shmem_read(2048), resizable_shmem_read(6143);}
+
+session "resizer"
+step zs8192 { SELECT resizable_shmem_resize(8192); }
+step zs6144 { SELECT resizable_shmem_resize(6144); }
+step zia { SELECT name, size, allocated_size FROM pg_shmem_allocations WHERE name = 'resizable_shmem'; }
+step "zis" { SELECT name, size, freeoffset, reserved_size FROM pg_shmem_segments WHERE name = 'resizable_shmem'; }
+
+# Test shrinking and expanding the shared memory, while doing so the previously
+# written data in the portion of the memory that doesn't undergo change should
+# not change.
+permutation zia zis s1w2048 s2r2048 zs8192 zia zis s2w8192 s1r8192 zs6144 zia zis s1w6144 s2r6144
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index f4ee2bd7459..1e1bd1eb8b4 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -1768,14 +1768,21 @@ pg_shadow| SELECT pg_authid.rolname AS usename,
      LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid))))
   WHERE pg_authid.rolcanlogin;
 pg_shmem_allocations| SELECT name,
+    segment,
     off,
     size,
     allocated_size
-   FROM pg_get_shmem_allocations() pg_get_shmem_allocations(name, off, size, allocated_size);
+   FROM pg_get_shmem_allocations() pg_get_shmem_allocations(name, segment, off, size, allocated_size);
 pg_shmem_allocations_numa| SELECT name,
     numa_node,
     size
    FROM pg_get_shmem_allocations_numa() pg_get_shmem_allocations_numa(name, numa_node, size);
+pg_shmem_segments| SELECT id,
+    name,
+    size,
+    freeoffset,
+    reserved_size
+   FROM pg_get_shmem_segments() pg_get_shmem_segments(id, name, size, freeoffset, reserved_size);
 pg_stat_activity| SELECT s.datid,
     d.datname,
     s.pid,
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 77518489412..293ed99fdd0 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -120,6 +120,7 @@ AmcheckOptions
 AnalyzeAttrComputeStatsFunc
 AnalyzeAttrFetchFunc
 AnalyzeForeignTable_function
+AnonShmemData
 AnlExprData
 AnlIndexData
 AnyArrayType
@@ -1887,6 +1888,7 @@ PGFInfoFunction
 PGFileType
 PGFunction
 PGIOAlignedBlock
+PGUsedShmemInfo
 PGLZ_HistEntry
 PGLZ_Strategy
 PGLoadBalanceType
@@ -2807,6 +2809,7 @@ ShippableCacheEntry
 ShmemAllocatorData
 ShippableCacheKey
 ShmemIndexEnt
+ShmemSegment
 ShutdownForeignScan_function
 ShutdownInformation
 ShutdownMode
-- 
2.34.1

