From f451b44f81e57759a876fc9ca8360826da0537d9 Mon Sep 17 00:00:00 2001
From: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date: Thu, 26 Mar 2026 13:26:56 +0200
Subject: [PATCH v1 3/5] Use a separate spinlock to protect LWLockTranches

Previously we reused the shmem allocator's ShmemLock for this. For the
sake of modularity, it's more clear to have a dedicated lock. Now that
lwlock.c has its own shared memory struct, this is easy to do.
---
 src/backend/storage/ipc/shmem.c   |  8 +++-----
 src/backend/storage/lmgr/lwlock.c | 24 +++++++++++++-----------
 src/include/storage/shmem.h       |  1 -
 3 files changed, 16 insertions(+), 17 deletions(-)

diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 0fb3bc59929..f392faa534f 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -105,7 +105,6 @@ static void *ShmemBase;			/* start address of shared memory */
 static void *ShmemEnd;			/* end+1 address of shared memory */
 
 static ShmemAllocatorData *ShmemAllocator;
-slock_t    *ShmemLock;			/* points to ShmemAllocator->shmem_lock */
 static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
 
 /* To get reliable results for NUMA inquiry we need to "touch pages" once */
@@ -166,7 +165,6 @@ InitShmemAllocator(PGShmemHeader *seghdr)
 		ShmemAllocator->free_offset = offset;
 	}
 
-	ShmemLock = &ShmemAllocator->shmem_lock;
 	ShmemSegHdr = seghdr;
 	ShmemBase = seghdr;
 	ShmemEnd = (char *) ShmemBase + seghdr->totalsize;
@@ -200,7 +198,7 @@ InitShmemAllocator(PGShmemHeader *seghdr)
  *
  * Throws error if request cannot be satisfied.
  *
- * Assumes ShmemLock and ShmemSegHdr are initialized.
+ * Assumes ShmemSegHdr are initialized.
  */
 void *
 ShmemAlloc(Size size)
@@ -259,7 +257,7 @@ ShmemAllocRaw(Size size, Size *allocated_size)
 
 	Assert(ShmemSegHdr != NULL);
 
-	SpinLockAcquire(ShmemLock);
+	SpinLockAcquire(&ShmemAllocator->shmem_lock);
 
 	newStart = ShmemAllocator->free_offset;
 
@@ -272,7 +270,7 @@ ShmemAllocRaw(Size size, Size *allocated_size)
 	else
 		newSpace = NULL;
 
-	SpinLockRelease(ShmemLock);
+	SpinLockRelease(&ShmemAllocator->shmem_lock);
 
 	/* note this assert is okay with newSpace == NULL */
 	Assert(newSpace == (void *) CACHELINEALIGN(newSpace));
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 2e3f2f2a6ff..4c01afce2f7 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -191,6 +191,8 @@ typedef struct LWLockTrancheShmemData
 	}			user_defined[MAX_USER_DEFINED_TRANCHES];
 
 	int			num_user_defined;	/* 'user_defined' entries in use */
+
+	slock_t		lock;			/* protects the above */
 } LWLockTrancheShmemData;
 
 LWLockTrancheShmemData *LWLockTranches;
@@ -437,6 +439,7 @@ CreateLWLocks(void)
 			ShmemAlloc(sizeof(LWLockTrancheShmemData));
 
 		/* Initialize the dynamic-allocation counter for tranches */
+		SpinLockInit(&LWLockTranches->lock);
 		LWLockTranches->num_user_defined = 0;
 
 		/* Allocate and initialize the main array */
@@ -522,9 +525,9 @@ GetNamedLWLockTranche(const char *tranche_name)
 {
 	int			i;
 
-	SpinLockAcquire(ShmemLock);
+	SpinLockAcquire(&LWLockTranches->lock);
 	LocalNumUserDefinedTranches = LWLockTranches->num_user_defined;
-	SpinLockRelease(ShmemLock);
+	SpinLockRelease(&LWLockTranches->lock);
 
 	/*
 	 * Obtain the position of base address of LWLock belonging to requested
@@ -576,14 +579,13 @@ LWLockNewTrancheId(const char *name)
 						   NAMEDATALEN - 1)));
 
 	/*
-	 * We use the ShmemLock spinlock to protect the counter and the tranche
-	 * names.
+	 * We use the spinlock to protect the counter and the tranche names.
 	 */
-	SpinLockAcquire(ShmemLock);
+	SpinLockAcquire(&LWLockTranches->lock);
 
 	if (LWLockTranches->num_user_defined >= MAX_USER_DEFINED_TRANCHES)
 	{
-		SpinLockRelease(ShmemLock);
+		SpinLockRelease(&LWLockTranches->lock);
 		ereport(ERROR,
 				(errmsg("maximum number of tranches already registered"),
 				 errdetail("No more than %d tranches may be registered.",
@@ -602,7 +604,7 @@ LWLockNewTrancheId(const char *name)
 	/* the locks are not in the main array */
 	LWLockTranches->user_defined[idx].main_array_idx = -1;
 
-	SpinLockRelease(ShmemLock);
+	SpinLockRelease(&LWLockTranches->lock);
 
 	return LWTRANCHE_FIRST_USER_DEFINED + idx;
 }
@@ -712,14 +714,14 @@ GetLWTrancheName(uint16 trancheId)
 	 * lookups can avoid taking the spinlock as long as the backend-local
 	 * counter (LocalNumUserDefinedTranches) is greater than the requested
 	 * tranche ID.  Else, we need to first update the backend-local counter
-	 * with ShmemLock held before attempting the lookup again.  In practice,
-	 * the latter case is probably rare.
+	 * with the spinlock held before attempting the lookup again.  In
+	 * practice, the latter case is probably rare.
 	 */
 	if (idx >= LocalNumUserDefinedTranches)
 	{
-		SpinLockAcquire(ShmemLock);
+		SpinLockAcquire(&LWLockTranches->lock);
 		LocalNumUserDefinedTranches = LWLockTranches->num_user_defined;
-		SpinLockRelease(ShmemLock);
+		SpinLockRelease(&LWLockTranches->lock);
 
 		if (idx >= LocalNumUserDefinedTranches)
 			elog(ERROR, "tranche %d is not registered", trancheId);
diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h
index 0de8a36429b..2a9e9becd26 100644
--- a/src/include/storage/shmem.h
+++ b/src/include/storage/shmem.h
@@ -26,7 +26,6 @@
 
 
 /* shmem.c */
-extern PGDLLIMPORT slock_t *ShmemLock;
 typedef struct PGShmemHeader PGShmemHeader; /* avoid including
 											 * storage/pg_shmem.h here */
 extern void InitShmemAllocator(PGShmemHeader *seghdr);
-- 
2.47.3

