From cc0167a03c35a5630983b5bed725913383b47ed4 Mon Sep 17 00:00:00 2001
From: Kyotaro Horiguchi <horiguchi.kyotaro@lab.ntt.co.jp>
Date: Tue, 1 Nov 2016 18:28:58 +0900
Subject: [PATCH] Alternative Type 2 atomic.

Use atomic_u64 for holding progressAt.
---
 src/backend/access/transam/xlog.c | 42 +++++++++++----------------------------
 1 file changed, 12 insertions(+), 30 deletions(-)

diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 1eff059..bc78e9e 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -466,7 +466,6 @@ typedef struct
 {
 	LWLock		lock;
 	XLogRecPtr	insertingAt;
-	XLogRecPtr	progressAt;
 } WALInsertLock;
 
 /*
@@ -672,9 +671,13 @@ typedef struct XLogCtlData
 	 */
 	XLogRecPtr	lastFpwDisableRecPtr;
 
+	pg_atomic_uint64	progressAt; /* This is not locked by info_lck but this
+									 * is here for just convenient now */
+
 	slock_t		info_lck;		/* locks shared variables shown above */
 } XLogCtlData;
 
+
 static XLogCtlData *XLogCtl = NULL;
 
 /* a private copy of XLogCtl->Insert.WALInsertLocks, for convenience */
@@ -1021,23 +1024,13 @@ XLogInsertRecord(XLogRecData *rdata,
 		inserted = true;
 	}
 
-	/*
-	 * Update the progress LSN positions. At least one WAL insertion lock
-	 * is already taken appropriately before doing that, and it is just more
-	 * simple to do that here where WAL record data and type is at hand.
-	 * The progress is set at the start position of the record tracked that
-	 * is being added, making easier checkpoint progress tracking as the
-	 * control file already saves the start LSN position of the last
-	 * checkpoint run. If an exclusive lock is taken for WAL insertion,
-	 * there is actually no need to update all the progression fields, so
-	 * just do it on the first one.
-	 */
-	if ((flags & XLOG_NO_PROGRESS) == 0)
 	{
-		if (holdingAllLocks)
-			WALInsertLocks[0].l.progressAt = StartPos;
-		else
-			WALInsertLocks[MyLockNo].l.progressAt = StartPos;
+		volatile XLogCtlData *xlogctl = XLogCtl;
+		XLogRecPtr tmpstartpos = pg_atomic_read_u64(&xlogctl->progressAt);
+
+		while (tmpstartpos < StartPos)
+			pg_atomic_compare_exchange_u64(&xlogctl->progressAt,
+										   &tmpstartpos, StartPos);
 	}
 
 	if (inserted)
@@ -4763,7 +4756,6 @@ XLOGShmemInit(void)
 	{
 		LWLockInitialize(&WALInsertLocks[i].l.lock, LWTRANCHE_WAL_INSERT);
 		WALInsertLocks[i].l.insertingAt = InvalidXLogRecPtr;
-		WALInsertLocks[i].l.progressAt = InvalidXLogRecPtr;
 	}
 
 	/*
@@ -8051,7 +8043,7 @@ XLogRecPtr
 GetProgressRecPtr(void)
 {
 	XLogRecPtr	res = InvalidXLogRecPtr;
-	int			i;
+	volatile XLogCtlData *xlogctl = XLogCtl;
 
 	/*
 	 * Look at the latest LSN position referring to the activity done by
@@ -8060,17 +8052,7 @@ GetProgressRecPtr(void)
 	 * Taking a lock is as well necessary to prevent potential torn reads
 	 * on some platforms.
 	 */
-	for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
-	{
-		XLogRecPtr	progress_lsn;
-
-		LWLockAcquire(&WALInsertLocks[i].l.lock, LW_EXCLUSIVE);
-		progress_lsn = WALInsertLocks[i].l.progressAt;
-		LWLockRelease(&WALInsertLocks[i].l.lock);
-
-		if (res < progress_lsn)
-			res = progress_lsn;
-	}
+	res = pg_atomic_read_u64(&xlogctl->progressAt);
 
 	return res;
 }
-- 
2.9.2

