From a87fba3b9a26d2fb5d56bcde709b3315631dd8f6 Mon Sep 17 00:00:00 2001
From: Yura Sokolov <y.sokolov@postgrespro.ru>
Date: Wed, 9 Jul 2025 10:49:38 +0300
Subject: [PATCH v1] Optimise LWLockAttemptLock by separate SHARED and
 EXCLUSIVE acquiring code.

Don't try to write anything if it doesn't look as lock_free.
LWLockAcquire and other functions calls AttemptLock at least twice for
correctness. "Do always CAS" doesn't pay.

Use atomic_fetch_add for SHARED lock acquiring.
---
 src/backend/storage/lmgr/lwlock.c | 67 ++++++++++++++++---------------
 1 file changed, 34 insertions(+), 33 deletions(-)

diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 46f44bc4511..b591831961d 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -796,6 +796,8 @@ static bool
 LWLockAttemptLock(LWLock *lock, LWLockMode mode)
 {
 	uint32		old_state;
+	uint32		desired_state;
+	bool		lock_free;
 
 	Assert(mode == LW_EXCLUSIVE || mode == LW_SHARED);
 
@@ -805,51 +807,50 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode)
 	 */
 	old_state = pg_atomic_read_u32(&lock->state);
 
-	/* loop until we've determined whether we could acquire the lock or not */
-	while (true)
+	if (mode == LW_SHARED)
 	{
-		uint32		desired_state;
-		bool		lock_free;
-
-		desired_state = old_state;
-
-		if (mode == LW_EXCLUSIVE)
-		{
-			lock_free = (old_state & LW_LOCK_MASK) == 0;
-			if (lock_free)
-				desired_state += LW_VAL_EXCLUSIVE;
-		}
-		else
+		while (true)
 		{
 			lock_free = (old_state & LW_VAL_EXCLUSIVE) == 0;
 			if (lock_free)
-				desired_state += LW_VAL_SHARED;
-		}
+			{
+				old_state = pg_atomic_fetch_add_u32(&lock->state,
+													LW_VAL_SHARED);
+				if ((old_state & LW_VAL_EXCLUSIVE) == 0)
+					return false;
 
-		/*
-		 * Attempt to swap in the state we are expecting. If we didn't see
-		 * lock to be free, that's just the old value. If we saw it as free,
-		 * we'll attempt to mark it acquired. The reason that we always swap
-		 * in the value is that this doubles as a memory barrier. We could try
-		 * to be smarter and only swap in values if we saw the lock as free,
-		 * but benchmark haven't shown it as beneficial so far.
-		 *
-		 * Retry if the value changed since we last looked at it.
-		 */
-		if (pg_atomic_compare_exchange_u32(&lock->state,
-										   &old_state, desired_state))
+				old_state = pg_atomic_sub_fetch_u32(&lock->state,
+													LW_VAL_SHARED);
+			}
+			else
+			{
+				pg_read_barrier();
+				return true;
+			}
+		}
+	}
+
+	while (true)
+	{
+		lock_free = (old_state & LW_LOCK_MASK) == 0;
+		desired_state = old_state | LW_VAL_EXCLUSIVE;
+
+		if (lock_free)
 		{
-			if (lock_free)
+			if (pg_atomic_compare_exchange_u32(&lock->state, &old_state,
+											   desired_state))
 			{
 				/* Great! Got the lock. */
 #ifdef LOCK_DEBUG
-				if (mode == LW_EXCLUSIVE)
-					lock->owner = MyProc;
+				lock->owner = MyProc;
 #endif
 				return false;
 			}
-			else
-				return true;	/* somebody else has the lock */
+		}
+		else
+		{
+			pg_read_barrier();
+			return true;
 		}
 	}
 	pg_unreachable();
-- 
2.43.0

