GSoC 2017: weekly progress reports (week 6)

Started by Shubham Baraiover 8 years ago54 messages
#1Shubham Barai
shubhambaraiss@gmail.com

Project: Explicitly support predicate locks in index AMs besides b-tree

I have done following tasks during this week.

1) worked on how to detect rw conflicts when fast update is enabled

2) created tests for different gin operators

3) went through some patches on commitfest to review

4) solved some issues that came up while testing

link to the code:
https://github.com/shubhambaraiss/postgres/commit/1365d75db36a4e398406dd266c3d4fe8e1ec30ff

<https://mailtrack.io/&gt; Sent with Mailtrack
<https://mailtrack.io/install?source=signature&amp;lang=en&amp;referral=shubhambaraiss@gmail.com&amp;idSignature=22&gt;

#2Shubham Barai
shubhambaraiss@gmail.com
In reply to: Shubham Barai (#1)
1 attachment(s)
Re: GSoC 2017: weekly progress reports (week 6)

Hi,

I am attaching a patch for predicate locking in gin index.

Regards,
Shubham

<https://mailtrack.io/&gt; Sent with Mailtrack
<https://mailtrack.io/install?source=signature&amp;lang=en&amp;referral=shubhambaraiss@gmail.com&amp;idSignature=22&gt;

On 11 July 2017 at 19:10, Shubham Barai <shubhambaraiss@gmail.com> wrote:

Show quoted text

Project: Explicitly support predicate locks in index AMs besides b-tree

I have done following tasks during this week.

1) worked on how to detect rw conflicts when fast update is enabled

2) created tests for different gin operators

3) went through some patches on commitfest to review

4) solved some issues that came up while testing

link to the code: https://github.com/shubhambaraiss/postgres/commit/
1365d75db36a4e398406dd266c3d4fe8e1ec30ff

<https://mailtrack.io/&gt; Sent with Mailtrack
<https://mailtrack.io/install?source=signature&amp;lang=en&amp;referral=shubhambaraiss@gmail.com&amp;idSignature=22&gt;

Attachments:

0001-Predicate-locking-in-gin-index.patchapplication/octet-stream; name=0001-Predicate-locking-in-gin-index.patchDownload
From db911b6d08bf00818eb67d4195aaab8a142ecd66 Mon Sep 17 00:00:00 2001
From: shubhambaraiss <you@example.com>
Date: Tue, 11 Jul 2017 05:26:52 +0530
Subject: [PATCH] Predicate locking in gin index

---
 src/backend/access/gin/ginbtree.c               |   5 +
 src/backend/access/gin/ginget.c                 |  38 +++
 src/backend/access/gin/gininsert.c              |   5 +
 src/backend/access/gin/ginutil.c                |   2 +-
 src/backend/access/gin/ginvacuum.c              |  11 +-
 src/backend/storage/lmgr/README-SSI             |   5 +
 src/test/isolation/expected/predicate-gin-2.out | 321 ++++++++++++++++++++++
 src/test/isolation/expected/predicate-gin.out   | 339 ++++++++++++++++++++++++
 src/test/isolation/isolation_schedule           |   2 +
 src/test/isolation/specs/predicate-gin-2.spec   |  41 +++
 src/test/isolation/specs/predicate-gin.spec     |  41 +++
 11 files changed, 807 insertions(+), 3 deletions(-)
 create mode 100644 src/test/isolation/expected/predicate-gin-2.out
 create mode 100644 src/test/isolation/expected/predicate-gin.out
 create mode 100644 src/test/isolation/specs/predicate-gin-2.spec
 create mode 100644 src/test/isolation/specs/predicate-gin.spec

diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index b02cb8a..0379b85 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -524,6 +525,10 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
 		}
 
 		/*
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 56a5bf4..63691c5 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -16,9 +16,11 @@
 
 #include "access/gin_private.h"
 #include "access/relscan.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -73,6 +75,10 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
+	if (!GinGetUseFastUpdate(index))
+		PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -94,6 +100,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 			break;				/* no more pages */
 
 		buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+		if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
 	}
 
 	UnlockReleaseBuffer(buffer);
@@ -323,6 +332,17 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+	else
+		PredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = TRUE;
 
@@ -391,6 +411,9 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -633,6 +656,9 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		if (!GinGetUseFastUpdate(ginstate->index))
+			PredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +703,11 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1733,6 +1764,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (!GinGetUseFastUpdate(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 5378011..f9b661f 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "catalog/index.h"
 #include "miscadmin.h"
 #include "storage/bufmgr.h"
@@ -196,6 +197,8 @@ ginEntryInsert(GinState *ginstate,
 	stack = ginFindLeafPage(&btree, false, NULL);
 	page = BufferGetPage(stack->buffer);
 
+	CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
+
 	if (btree.findItem(&btree, stack))
 	{
 		/* found pre-existing entry */
@@ -513,6 +516,8 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		CheckForSerializableConflictIn(index, NULL, NULL);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 91e4a8c..5cfb45d 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -49,7 +49,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 31425e9..9b97fd9 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -153,11 +153,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index a9dc01f..37c360c 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -379,6 +379,11 @@ level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist.
 
+    * Gin searches acquire predicate locks only on the leaf pages.
+If, however, fast update is enabled, a predicate lock on the index
+relation is required. During a page split, a predicate lock is copied
+from the original page to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/test/isolation/expected/predicate-gin-2.out b/src/test/isolation/expected/predicate-gin-2.out
new file mode 100644
index 0000000..9cc5d3b
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-2.out
@@ -0,0 +1,321 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+10620          
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g;
+step c2: COMMIT;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+10260          
+step wx1: insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;
+step c1: COMMIT;
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000..ab293f6
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,339 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+1004           
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+502            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 32c965b..3099d8b 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -62,3 +62,5 @@ test: sequence-ddl
 test: async-notify
 test: vacuum-reltuples
 test: timeouts
+test: predicate-gin
+test: predicate-gin-2
diff --git a/src/test/isolation/specs/predicate-gin-2.spec b/src/test/isolation/specs/predicate-gin-2.spec
new file mode 100644
index 0000000..954d6f6
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-2.spec
@@ -0,0 +1,41 @@
+# Test for page level predicate locking in gin
+#
+# Test to check false positives.
+#
+# Queries are written in such a way that an index scan(from one transaction) and an index insert(from another transaction) will try to access different parts(sub-tree) of the index.
+
+
+setup
+{
+ create table gin_tbl(id int4, p int4[]);
+ create index ginidx on gin_tbl using gin(p) with 
+ (fastupdate = off);
+ insert into gin_tbl select g, array[1, g*2, 2] from generate_series(11, 100) g;
+ insert into gin_tbl select g, array[3, g*2, 4] from generate_series(11, 100) g;
+}
+
+teardown
+{
+ DROP TABLE gin_tbl;
+}
+
+session "s1"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+step "rxy1"	{ select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2]; }
+step "wx1"	{ insert into gin_tbl select g, array[g, g*2, g*2] 
+		  from generate_series(1000, 1100) g;}
+step "c1"	{ COMMIT; }
+
+session "s2"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+
+step "rxy2"	{ select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4]; }
+step "wy2"	{ insert into gin_tbl select g, array[g, g*2, g*3] 
+		  from generate_series(1000, 1100) g; }
+step "c2"	{ COMMIT; }
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000..7d9f446
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,41 @@
+# Test for page level predicate locking in gin
+#
+# Test to verify serialization failures
+#
+# Queries are written in such a way that an index scan(from one transaction) and an index insert(from another transaction) will try to access the same part(sub-tree) of the index.
+
+
+setup
+{
+ create table gin_tbl(id int4, p int4[]);
+ create index ginidx on gin_tbl using gin(p) with 
+ (fastupdate = off);
+ insert into gin_tbl select g, array[1, 2, g*2] from generate_series(1, 200) g;
+ insert into gin_tbl select g, array[3, 4, g*3] from generate_series(1, 200) g;
+}
+
+teardown
+{
+ DROP TABLE gin_tbl;
+}
+
+session "s1"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+step "rxy1"	{ select sum(p[2]) from gin_tbl where p @> array[1,2]; }
+step "wx1"	{ insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;}
+step "c1"	{ COMMIT; }
+
+session "s2"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+
+step "rxy2"	{ select sum(p[2]) from gin_tbl where p @> array[3,4]; }
+step "wy2"	{ insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g; }
+step "c2"	{ COMMIT; }
-- 
1.9.1

#3Shubham Barai
shubhambaraiss@gmail.com
In reply to: Shubham Barai (#2)
1 attachment(s)
Re: GSoC 2017: weekly progress reports (week 6)

Hi,

Please find the updated patch for predicate locking in gin index here.

There was a small issue in the previous patch. I didn't consider the case
where only root page exists in the tree, and there is a predicate lock on
it,
and it gets split.

If we treat the original page as a left page and create a new root and right
page, then we just need to copy a predicate lock from the left to the right
page (this is the case in B-tree).

But if we treat the original page as a root and create a new left and right
page, then we need to copy a predicate lock on both new pages (in the case
of rum and gin).

link to updated code and tests:
https://github.com/shubhambaraiss/postgres/commit/6172639a104785f051cb4aa0d511c58f2bae65a6

Regards,
Shubham

<https://mailtrack.io/&gt; Sent with Mailtrack
<https://mailtrack.io/install?source=signature&amp;lang=en&amp;referral=shubhambaraiss@gmail.com&amp;idSignature=22&gt;

On 17 July 2017 at 19:08, Shubham Barai <shubhambaraiss@gmail.com> wrote:

Show quoted text

Hi,

I am attaching a patch for predicate locking in gin index.

Regards,
Shubham

<https://mailtrack.io/&gt; Sent with Mailtrack
<https://mailtrack.io/install?source=signature&amp;lang=en&amp;referral=shubhambaraiss@gmail.com&amp;idSignature=22&gt;

On 11 July 2017 at 19:10, Shubham Barai <shubhambaraiss@gmail.com> wrote:

Project: Explicitly support predicate locks in index AMs besides b-tree

I have done following tasks during this week.

1) worked on how to detect rw conflicts when fast update is enabled

2) created tests for different gin operators

3) went through some patches on commitfest to review

4) solved some issues that came up while testing

link to the code: https://github.com/shubhambaraiss/postgres/commit/1365
d75db36a4e398406dd266c3d4fe8e1ec30ff

<https://mailtrack.io/&gt; Sent with Mailtrack
<https://mailtrack.io/install?source=signature&amp;lang=en&amp;referral=shubhambaraiss@gmail.com&amp;idSignature=22&gt;

Attachments:

Predicate-locking-in-gin-index.patchapplication/octet-stream; name=Predicate-locking-in-gin-index.patchDownload
From 6172639a104785f051cb4aa0d511c58f2bae65a6 Mon Sep 17 00:00:00 2001
From: shubhambaraiss <you@example.com>
Date: Tue, 11 Jul 2017 05:26:52 +0530
Subject: [PATCH] Predicate locking in gin index

---
 src/backend/access/gin/ginbtree.c               |  14 +
 src/backend/access/gin/ginget.c                 |  38 +++
 src/backend/access/gin/gininsert.c              |   5 +
 src/backend/access/gin/ginutil.c                |   2 +-
 src/backend/access/gin/ginvacuum.c              |  11 +-
 src/backend/storage/lmgr/README-SSI             |   5 +
 src/test/isolation/expected/predicate-gin-2.out | 321 ++++++++++++++++++++++
 src/test/isolation/expected/predicate-gin.out   | 339 ++++++++++++++++++++++++
 src/test/isolation/isolation_schedule           |   2 +
 src/test/isolation/specs/predicate-gin-2.spec   |  41 +++
 src/test/isolation/specs/predicate-gin.spec     |  41 +++
 11 files changed, 816 insertions(+), 3 deletions(-)
 create mode 100644 src/test/isolation/expected/predicate-gin-2.out
 create mode 100644 src/test/isolation/expected/predicate-gin.out
 create mode 100644 src/test/isolation/specs/predicate-gin-2.spec
 create mode 100644 src/test/isolation/specs/predicate-gin.spec

diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index b02cb8a..22ef7e0 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,15 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+			PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+
 		}
 		else
 		{
@@ -524,6 +534,10 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
 		}
 
 		/*
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 56a5bf4..63691c5 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -16,9 +16,11 @@
 
 #include "access/gin_private.h"
 #include "access/relscan.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -73,6 +75,10 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
+	if (!GinGetUseFastUpdate(index))
+		PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -94,6 +100,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 			break;				/* no more pages */
 
 		buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+		if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
 	}
 
 	UnlockReleaseBuffer(buffer);
@@ -323,6 +332,17 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+	else
+		PredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = TRUE;
 
@@ -391,6 +411,9 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -633,6 +656,9 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		if (!GinGetUseFastUpdate(ginstate->index))
+			PredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +703,11 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1733,6 +1764,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (!GinGetUseFastUpdate(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 5378011..f9b661f 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "catalog/index.h"
 #include "miscadmin.h"
 #include "storage/bufmgr.h"
@@ -196,6 +197,8 @@ ginEntryInsert(GinState *ginstate,
 	stack = ginFindLeafPage(&btree, false, NULL);
 	page = BufferGetPage(stack->buffer);
 
+	CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
+
 	if (btree.findItem(&btree, stack))
 	{
 		/* found pre-existing entry */
@@ -513,6 +516,8 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		CheckForSerializableConflictIn(index, NULL, NULL);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 91e4a8c..5cfb45d 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -49,7 +49,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 31425e9..9b97fd9 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -153,11 +153,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index a9dc01f..37c360c 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -379,6 +379,11 @@ level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist.
 
+    * Gin searches acquire predicate locks only on the leaf pages.
+If, however, fast update is enabled, a predicate lock on the index
+relation is required. During a page split, a predicate lock is copied
+from the original page to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/test/isolation/expected/predicate-gin-2.out b/src/test/isolation/expected/predicate-gin-2.out
new file mode 100644
index 0000000..b347bce
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-2.out
@@ -0,0 +1,321 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000..ab293f6
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,339 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+1004           
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+502            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 32c965b..3099d8b 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -62,3 +62,5 @@ test: sequence-ddl
 test: async-notify
 test: vacuum-reltuples
 test: timeouts
+test: predicate-gin
+test: predicate-gin-2
diff --git a/src/test/isolation/specs/predicate-gin-2.spec b/src/test/isolation/specs/predicate-gin-2.spec
new file mode 100644
index 0000000..baac108
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-2.spec
@@ -0,0 +1,41 @@
+# Test for page level predicate locking in gin
+#
+# Test to check reduced false positives.
+#
+# Queries are written in such a way that an index scan(from one transaction) and an index insert(from another transaction) will try to access different parts(sub-tree) of the index.
+
+
+setup
+{
+ create table gin_tbl(id int4, p int4[]);
+ create index ginidx on gin_tbl using gin(p) with 
+ (fastupdate = off);
+ insert into gin_tbl select g, array[1, g*2, 2] from generate_series(11, 1000) g;
+ insert into gin_tbl select g, array[3, g*2, 4] from generate_series(11, 1000) g;
+}
+
+teardown
+{
+ DROP TABLE gin_tbl;
+}
+
+session "s1"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+step "rxy1"	{ select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2]; }
+step "wx1"	{ insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;}
+step "c1"	{ COMMIT; }
+
+session "s2"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+
+step "rxy2"	{ select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4]; }
+step "wy2"	{ insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g; }
+step "c2"	{ COMMIT; }
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000..7d9f446
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,41 @@
+# Test for page level predicate locking in gin
+#
+# Test to verify serialization failures
+#
+# Queries are written in such a way that an index scan(from one transaction) and an index insert(from another transaction) will try to access the same part(sub-tree) of the index.
+
+
+setup
+{
+ create table gin_tbl(id int4, p int4[]);
+ create index ginidx on gin_tbl using gin(p) with 
+ (fastupdate = off);
+ insert into gin_tbl select g, array[1, 2, g*2] from generate_series(1, 200) g;
+ insert into gin_tbl select g, array[3, 4, g*3] from generate_series(1, 200) g;
+}
+
+teardown
+{
+ DROP TABLE gin_tbl;
+}
+
+session "s1"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+step "rxy1"	{ select sum(p[2]) from gin_tbl where p @> array[1,2]; }
+step "wx1"	{ insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;}
+step "c1"	{ COMMIT; }
+
+session "s2"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+
+step "rxy2"	{ select sum(p[2]) from gin_tbl where p @> array[3,4]; }
+step "wy2"	{ insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g; }
+step "c2"	{ COMMIT; }
-- 
1.9.1

#4Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Shubham Barai (#3)
Re: GSoC 2017: weekly progress reports (week 6)

Hi!

On Wed, Aug 9, 2017 at 6:30 PM, Shubham Barai <shubhambaraiss@gmail.com>
wrote:

Please find the updated patch for predicate locking in gin index here.

There was a small issue in the previous patch. I didn't consider the case
where only root page exists in the tree, and there is a predicate lock on
it,
and it gets split.

If we treat the original page as a left page and create a new root and
right
page, then we just need to copy a predicate lock from the left to the
right
page (this is the case in B-tree).

But if we treat the original page as a root and create a new left and right
page, then we need to copy a predicate lock on both new pages (in the case
of rum and gin).

link to updated code and tests: https://github.com/
shubhambaraiss/postgres/commit/6172639a104785f051cb4aa0d511c58f2bae65a6

I've assigned to review this patch. First of all I'd like to understand
general idea of this patch.

As I get, you're placing predicate locks to both entry tree leaf pages and
posting tree leaf pages. But, GIN implements so called "fast scan"
technique which allows it to skip some leaf pages of posting tree when
these pages are guaranteed to not contain matching item pointers. Wherein
the particular order of posting list scan and skip depends of their
estimated size (so it's a kind of coincidence).

But thinking about this more generally, I found that proposed locking
scheme is redundant. Currently when entry has posting tree, you're locking
both:
1) entry tree leaf page containing pointer to posting tree,
2) leaf pages of corresponding posting tree.
Therefore conflicting transactions accessing same entry would anyway
conflict while accessing the same entry tree leaf page. So, there is no
necessity to lock posting tree leaf pages at all. Alternatively, if entry
has posting tree, you can skip locking entry tree leaf page and lock
posting tree leaf pages instead.

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#5Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Alexander Korotkov (#4)
Re: GSoC 2017: weekly progress reports (week 6)

On Thu, Sep 28, 2017 at 12:45 AM, Alexander Korotkov <
a.korotkov@postgrespro.ru> wrote:

On Wed, Aug 9, 2017 at 6:30 PM, Shubham Barai <shubhambaraiss@gmail.com>
wrote:

Please find the updated patch for predicate locking in gin index here.

There was a small issue in the previous patch. I didn't consider the case
where only root page exists in the tree, and there is a predicate lock on
it,
and it gets split.

If we treat the original page as a left page and create a new root and
right
page, then we just need to copy a predicate lock from the left to the
right
page (this is the case in B-tree).

But if we treat the original page as a root and create a new left and
right
page, then we need to copy a predicate lock on both new pages (in the
case of rum and gin).

link to updated code and tests: https://github.com/shub
hambaraiss/postgres/commit/6172639a104785f051cb4aa0d511c58f2bae65a6

I've assigned to review this patch. First of all I'd like to understand
general idea of this patch.

As I get, you're placing predicate locks to both entry tree leaf pages and
posting tree leaf pages. But, GIN implements so called "fast scan"
technique which allows it to skip some leaf pages of posting tree when
these pages are guaranteed to not contain matching item pointers. Wherein
the particular order of posting list scan and skip depends of their
estimated size (so it's a kind of coincidence).

But thinking about this more generally, I found that proposed locking
scheme is redundant. Currently when entry has posting tree, you're locking
both:
1) entry tree leaf page containing pointer to posting tree,
2) leaf pages of corresponding posting tree.
Therefore conflicting transactions accessing same entry would anyway
conflict while accessing the same entry tree leaf page. So, there is no
necessity to lock posting tree leaf pages at all. Alternatively, if entry
has posting tree, you can skip locking entry tree leaf page and lock
posting tree leaf pages instead.

I'd like to note that I had following warnings during compilation using
clang.

gininsert.c:519:47: warning: incompatible pointer to integer conversion

passing 'void *' to parameter of type 'Buffer' (aka 'int')
[-Wint-conversion]
CheckForSerializableConflictIn(index, NULL, NULL);
^~~~
/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/clang/8.0.0/include/stddef.h:105:16:
note: expanded from macro 'NULL'
# define NULL ((void*)0)
^~~~~~~~~~
../../../../src/include/storage/predicate.h:64:87: note: passing argument
to parameter 'buffer' here
extern void CheckForSerializableConflictIn(Relation relation, HeapTuple
tuple, Buffer buffer);

^
1 warning generated.
ginvacuum.c:163:2: warning: implicit declaration of function
'PredicateLockPageCombine' is invalid in C99
[-Wimplicit-function-declaration]
PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
^
1 warning generated.

Also, I tried to remove predicate locks from posting tree leafs. At least
isolation tests passed correctly after this change.

However, after telegram discussion with Andrew Borodin, we decided that it
would be better to do predicate locking and conflict checking for posting
tree leafs, but skip that for entry tree leafs (in the case when entry has
posting tree). That would give us more granular locking and less false
positives.

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#6Shubham Barai
shubhambaraiss@gmail.com
In reply to: Alexander Korotkov (#5)
1 attachment(s)
Re: GSoC 2017: weekly progress reports (week 6)

<https://mailtrack.io/&gt; Sent with Mailtrack
<https://mailtrack.io/install?source=signature&amp;lang=en&amp;referral=shubhambaraiss@gmail.com&amp;idSignature=22&gt;
<#>

On 28 September 2017 at 15:49, Alexander Korotkov <a.korotkov@postgrespro.ru

wrote:

On Thu, Sep 28, 2017 at 12:45 AM, Alexander Korotkov <
a.korotkov@postgrespro.ru> wrote:

On Wed, Aug 9, 2017 at 6:30 PM, Shubham Barai <shubhambaraiss@gmail.com>
wrote:

Please find the updated patch for predicate locking in gin index here.

There was a small issue in the previous patch. I didn't consider the case
where only root page exists in the tree, and there is a predicate lock
on it,
and it gets split.

If we treat the original page as a left page and create a new root and
right
page, then we just need to copy a predicate lock from the left to the
right
page (this is the case in B-tree).

But if we treat the original page as a root and create a new left and
right
page, then we need to copy a predicate lock on both new pages (in the
case of rum and gin).

link to updated code and tests: https://github.com/shub
hambaraiss/postgres/commit/6172639a104785f051cb4aa0d511c58f2bae65a6

I've assigned to review this patch. First of all I'd like to understand
general idea of this patch.

As I get, you're placing predicate locks to both entry tree leaf pages
and posting tree leaf pages. But, GIN implements so called "fast scan"
technique which allows it to skip some leaf pages of posting tree when
these pages are guaranteed to not contain matching item pointers. Wherein
the particular order of posting list scan and skip depends of their
estimated size (so it's a kind of coincidence).

But thinking about this more generally, I found that proposed locking
scheme is redundant. Currently when entry has posting tree, you're locking
both:
1) entry tree leaf page containing pointer to posting tree,
2) leaf pages of corresponding posting tree.
Therefore conflicting transactions accessing same entry would anyway
conflict while accessing the same entry tree leaf page. So, there is no
necessity to lock posting tree leaf pages at all. Alternatively, if entry
has posting tree, you can skip locking entry tree leaf page and lock
posting tree leaf pages instead.

I'd like to note that I had following warnings during compilation using
clang.

gininsert.c:519:47: warning: incompatible pointer to integer conversion

passing 'void *' to parameter of type 'Buffer' (aka 'int')
[-Wint-conversion]
CheckForSerializableConflictIn(index, NULL, NULL);
^~~~
/Applications/Xcode.app/Contents/Developer/Toolchains/
XcodeDefault.xctoolchain/usr/bin/../lib/clang/8.0.0/include/stddef.h:105:16:
note: expanded from macro 'NULL'
# define NULL ((void*)0)
^~~~~~~~~~
../../../../src/include/storage/predicate.h:64:87: note: passing
argument to parameter 'buffer' here
extern void CheckForSerializableConflictIn(Relation relation, HeapTuple
tuple, Buffer buffer);

^
1 warning generated.
ginvacuum.c:163:2: warning: implicit declaration of function
'PredicateLockPageCombine' is invalid in C99 [-Wimplicit-function-
declaration]
PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
^
1 warning generated.

Also, I tried to remove predicate locks from posting tree leafs. At least
isolation tests passed correctly after this change.

However, after telegram discussion with Andrew Borodin, we decided that it
would be better to do predicate locking and conflict checking for posting
tree leafs, but skip that for entry tree leafs (in the case when entry has
posting tree). That would give us more granular locking and less false
positives.

Hi Alexander,

I have made changes according to your suggestions. Please have a look at
the updated patch.
I am also considering your suggestions for my other patches also. But, I
will need some time to
make changes as I am currently busy doing my master's.

Kind Regards,
Shubham

Attachments:

Predicate-locking-in-gin-index_2.patchapplication/octet-stream; name=Predicate-locking-in-gin-index_2.patchDownload
From 2ed73e5dbb921a31e7d2f558a683414f8b7c74f0 Mon Sep 17 00:00:00 2001
From: shubhambaraiss <you@example.com>
Date: Tue, 11 Jul 2017 05:26:52 +0530
Subject: [PATCH] Predicate locking in gin index

---
 src/backend/access/gin/ginbtree.c               |  22 ++
 src/backend/access/gin/gindatapage.c            |  10 +-
 src/backend/access/gin/ginget.c                 |  47 +++-
 src/backend/access/gin/gininsert.c              |  18 +-
 src/backend/access/gin/ginutil.c                |   2 +-
 src/backend/access/gin/ginvacuum.c              |  12 +-
 src/backend/storage/lmgr/README-SSI             |  11 +
 src/include/access/gin_private.h                |   2 +-
 src/test/isolation/expected/predicate-gin-2.out | 321 ++++++++++++++++++++++
 src/test/isolation/expected/predicate-gin.out   | 339 ++++++++++++++++++++++++
 src/test/isolation/isolation_schedule           |   2 +
 src/test/isolation/specs/predicate-gin-2.spec   |  41 +++
 src/test/isolation/specs/predicate-gin.spec     |  41 +++
 13 files changed, 854 insertions(+), 14 deletions(-)
 create mode 100644 src/test/isolation/expected/predicate-gin-2.out
 create mode 100644 src/test/isolation/expected/predicate-gin.out
 create mode 100644 src/test/isolation/specs/predicate-gin-2.spec
 create mode 100644 src/test/isolation/specs/predicate-gin.spec

diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index b02cb8a..8184dc0 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 2e5ea47..806c54f 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -20,6 +20,7 @@
 #include "lib/ilist.h"
 #include "miscadmin.h"
 #include "utils/rel.h"
+#include "storage/predicate.h"
 
 /*
  * Min, Max and Target size of posting lists stored on leaf pages, in bytes.
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 *Copy a predicate lock from entry tree leaf (containing posting list)
+	 *to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 56a5bf4..3b006d5 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -16,9 +16,11 @@
 
 #include "access/gin_private.h"
 #include "access/relscan.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -37,7 +39,7 @@ typedef struct pendingPosition
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -51,6 +53,7 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
 
 		stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE);
 		stack->blkno = BufferGetBlockNumber(stack->buffer);
+		PredicateLockPage(btree->index, stack->blkno, snapshot);
 		stack->off = FirstOffsetNumber;
 	}
 
@@ -73,6 +76,10 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
+	if (!GinGetUseFastUpdate(index))
+		PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -94,6 +101,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 			break;				/* no more pages */
 
 		buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+		if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
 	}
 
 	UnlockReleaseBuffer(buffer);
@@ -131,6 +141,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = btree->ginstate->origTupdesc->attrs[attnum - 1];
 
+	PredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +153,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +262,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +335,15 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = TRUE;
 
@@ -391,6 +412,9 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -414,6 +438,8 @@ restartScanEntry:
 		}
 		else if (GinGetNPosting(itup) > 0)
 		{
+			PredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
 									   &entry->nlist);
 			entry->predictNumberResult = entry->nlist;
@@ -633,6 +659,9 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		if (!GinGetUseFastUpdate(ginstate->index))
+			PredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +706,11 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1733,6 +1767,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (!GinGetUseFastUpdate(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 5378011..661b605 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = TRUE;
 	}
 	else
 	{
+		CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,8 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 91e4a8c..5cfb45d 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -49,7 +49,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 31425e9..1e52313 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index a9dc01f..6bbdfef 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -379,6 +379,17 @@ level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist.
 
+    * Gin searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. We acquire a predicate lock on entry
+tree leaf pages only when entry has a posting list. If entry tree has
+a pointer to posting tree, we skip locking entry tree leaf page and lock
+only posting tree leaf pages. If, however, fast update is enabled, a
+predicate lock on the index relation is required as fast update postpones
+the insertion of tuples into index structure by temporarily storing them
+into pending list due to which we are unable to detect all r-w conflicts.
+During a page split, a predicate lock is copied from the original page
+to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index adfdb0c..03c4103 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -217,7 +217,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin-2.out b/src/test/isolation/expected/predicate-gin-2.out
new file mode 100644
index 0000000..b347bce
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-2.out
@@ -0,0 +1,321 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4];
+sum            
+
+1007820        
+step wy2: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g;
+step c2: COMMIT;
+step rxy1: select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2];
+sum            
+
+1003860        
+step wx1: insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;
+step c1: COMMIT;
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000..ab293f6
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,339 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+1004           
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+400            
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select sum(p[2]) from gin_tbl where p @> array[3,4];
+sum            
+
+800            
+step wy2: insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g;
+step c2: COMMIT;
+step rxy1: select sum(p[2]) from gin_tbl where p @> array[1,2];
+sum            
+
+502            
+step wx1: insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;
+step c1: COMMIT;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 32c965b..3099d8b 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -62,3 +62,5 @@ test: sequence-ddl
 test: async-notify
 test: vacuum-reltuples
 test: timeouts
+test: predicate-gin
+test: predicate-gin-2
diff --git a/src/test/isolation/specs/predicate-gin-2.spec b/src/test/isolation/specs/predicate-gin-2.spec
new file mode 100644
index 0000000..baac108
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-2.spec
@@ -0,0 +1,41 @@
+# Test for page level predicate locking in gin
+#
+# Test to check reduced false positives.
+#
+# Queries are written in such a way that an index scan(from one transaction) and an index insert(from another transaction) will try to access different parts(sub-tree) of the index.
+
+
+setup
+{
+ create table gin_tbl(id int4, p int4[]);
+ create index ginidx on gin_tbl using gin(p) with 
+ (fastupdate = off);
+ insert into gin_tbl select g, array[1, g*2, 2] from generate_series(11, 1000) g;
+ insert into gin_tbl select g, array[3, g*2, 4] from generate_series(11, 1000) g;
+}
+
+teardown
+{
+ DROP TABLE gin_tbl;
+}
+
+session "s1"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+step "rxy1"	{ select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[1,2]; }
+step "wx1"	{ insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(3000, 3100) g;}
+step "c1"	{ COMMIT; }
+
+session "s2"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+
+step "rxy2"	{ select sum(p[1]+p[2]+p[3]) from gin_tbl where p @> array[3,4]; }
+step "wy2"	{ insert into gin_tbl select g, array[g, g*2, g*3]
+		  from generate_series(4000, 4100) g; }
+step "c2"	{ COMMIT; }
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000..7d9f446
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,41 @@
+# Test for page level predicate locking in gin
+#
+# Test to verify serialization failures
+#
+# Queries are written in such a way that an index scan(from one transaction) and an index insert(from another transaction) will try to access the same part(sub-tree) of the index.
+
+
+setup
+{
+ create table gin_tbl(id int4, p int4[]);
+ create index ginidx on gin_tbl using gin(p) with 
+ (fastupdate = off);
+ insert into gin_tbl select g, array[1, 2, g*2] from generate_series(1, 200) g;
+ insert into gin_tbl select g, array[3, 4, g*3] from generate_series(1, 200) g;
+}
+
+teardown
+{
+ DROP TABLE gin_tbl;
+}
+
+session "s1"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+step "rxy1"	{ select sum(p[2]) from gin_tbl where p @> array[1,2]; }
+step "wx1"	{ insert into gin_tbl select g, array[3, 4, g*3] 
+		  from generate_series(200, 250) g;}
+step "c1"	{ COMMIT; }
+
+session "s2"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+
+step "rxy2"	{ select sum(p[2]) from gin_tbl where p @> array[3,4]; }
+step "wy2"	{ insert into gin_tbl select g, array[1, 2, g*2] 
+		  from generate_series(200, 250) g; }
+step "c2"	{ COMMIT; }
-- 
1.9.1

#7Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Shubham Barai (#6)
Re: GSoC 2017: weekly progress reports (week 6)

On Sat, Sep 30, 2017 at 6:12 PM, Shubham Barai <shubhambaraiss@gmail.com>
wrote:

I have made changes according to your suggestions. Please have a look at
the updated patch.
I am also considering your suggestions for my other patches also. But, I
will need some time to
make changes as I am currently busy doing my master's.

I don't understand why sometimes you call PredicateLockPage() only when
fast update is off. For example:

@@ -94,6 +101,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,

break; /* no more pages */

buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+ if (!GinGetUseFastUpdate(index))
+ PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
}

UnlockReleaseBuffer(buffer);

But sometimes you call PredicateLockPage() unconditionally.

@@ -131,6 +141,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack

*stack,
attnum = scanEntry->attnum;
attr = btree->ginstate->origTupdesc->attrs[attnum - 1];

+ PredicateLockPage(btree->index, stack->buffer, snapshot);
+
for (;;)
{
Page page;

As I understand, all page-level predicate locking should happen only when
fast update is off.

Also, despite general idea is described in README-SSI, in-code comments
would be useful.

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#8Shubham Barai
shubhambaraiss@gmail.com
In reply to: Alexander Korotkov (#7)
Re: GSoC 2017: weekly progress reports (week 6)

On 1 October 2017 at 01:47, Alexander Korotkov <a.korotkov@postgrespro.ru>
wrote:

On Sat, Sep 30, 2017 at 6:12 PM, Shubham Barai <shubhambaraiss@gmail.com>
wrote:

I have made changes according to your suggestions. Please have a look at
the updated patch.
I am also considering your suggestions for my other patches also. But, I
will need some time to
make changes as I am currently busy doing my master's.

I don't understand why sometimes you call PredicateLockPage() only when
fast update is off. For example:

@@ -94,6 +101,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,

break; /* no more pages */

buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+ if (!GinGetUseFastUpdate(index))
+ PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
}

UnlockReleaseBuffer(buffer);

But sometimes you call PredicateLockPage() unconditionally.

@@ -131,6 +141,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack

*stack,
attnum = scanEntry->attnum;
attr = btree->ginstate->origTupdesc->attrs[attnum - 1];

+ PredicateLockPage(btree->index, stack->buffer, snapshot);
+
for (;;)
{
Page page;

As I understand, all page-level predicate locking should happen only when
fast update is off.

Also, despite general idea is described in README-SSI, in-code comments
would be useful.

Hi Alexander,

Yes, page-level predicate locking should happen only when fast update is
off.
Actually, I forgot to put conditions in updated patch. Does everything else
look ok ?

Kind Regards,
Shubham

Show quoted text
#9Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Shubham Barai (#8)
Re: GSoC 2017: weekly progress reports (week 6)

On Sun, Oct 1, 2017 at 11:53 AM, Shubham Barai <shubhambaraiss@gmail.com>
wrote:

Yes, page-level predicate locking should happen only when fast update is
off.
Actually, I forgot to put conditions in updated patch. Does everything
else look ok ?

I think that isolation tests should be improved. It doesn't seems that any
posting tree would be generated by the tests that you've provided, because
all the TIDs could fit the single posting list. Note, that you can get
some insight into GIN physical structure using pageinspect contrib.

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#10Michael Paquier
michael.paquier@gmail.com
In reply to: Alexander Korotkov (#9)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On Tue, Oct 3, 2017 at 1:51 AM, Alexander Korotkov
<a.korotkov@postgrespro.ru> wrote:

I think that isolation tests should be improved. It doesn't seems that any
posting tree would be generated by the tests that you've provided, because
all the TIDs could fit the single posting list. Note, that you can get some
insight into GIN physical structure using pageinspect contrib.

This thread had no updates for almost two months, so I am marking it
as returned with feedback.
--
Michael

#11Shubham Barai
shubhambaraiss@gmail.com
In reply to: Alexander Korotkov (#9)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On 2 October 2017 at 22:21, Alexander Korotkov <a.korotkov@postgrespro.ru>
wrote:

On Sun, Oct 1, 2017 at 11:53 AM, Shubham Barai <shubhambaraiss@gmail.com>
wrote:

Yes, page-level predicate locking should happen only when fast update is
off.
Actually, I forgot to put conditions in updated patch. Does everything
else look ok ?

I think that isolation tests should be improved. It doesn't seems that
any posting tree would be generated by the tests that you've provided,
because all the TIDs could fit the single posting list. Note, that you can
get some insight into GIN physical structure using pageinspect contrib.

I have created new isolation tests. Please have a look at
updated patch.

Regards,
Shubham

Attachments:

Predicate-locking-in-gin-index_4.patchapplication/octet-stream; name=Predicate-locking-in-gin-index_4.patchDownload
From fa194ee8db4590b607cfbf4ae3889ad9ff2ca33e Mon Sep 17 00:00:00 2001
From: shubhambaraiss <you@example.com>
Date: Tue, 11 Jul 2017 05:26:52 +0530
Subject: [PATCH] Predicate locking in gin index

---
 src/backend/access/gin/ginbtree.c             |  22 +
 src/backend/access/gin/gindatapage.c          |  10 +-
 src/backend/access/gin/ginget.c               |  50 +-
 src/backend/access/gin/gininsert.c            |  18 +-
 src/backend/access/gin/ginutil.c              |   2 +-
 src/backend/access/gin/ginvacuum.c            |  12 +-
 src/backend/storage/lmgr/README-SSI           |  11 +
 src/include/access/gin_private.h              |   2 +-
 src/test/isolation/expected/predicate-gin.out | 799 ++++++++++++++++++++++++++
 src/test/isolation/isolation_schedule         |   1 +
 src/test/isolation/specs/predicate-gin.spec   | 120 ++++
 11 files changed, 1033 insertions(+), 14 deletions(-)
 create mode 100644 src/test/isolation/expected/predicate-gin.out
 create mode 100644 src/test/isolation/specs/predicate-gin.spec

diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index b02cb8a..8184dc0 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 2e5ea47..0a3f22f 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -20,6 +20,7 @@
 #include "lib/ilist.h"
 #include "miscadmin.h"
 #include "utils/rel.h"
+#include "storage/predicate.h"
 
 /*
  * Min, Max and Target size of posting lists stored on leaf pages, in bytes.
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 * Copy a predicate lock from entry tree leaf (containing posting list)
+	 * to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 56a5bf4..13c5304 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -16,9 +16,11 @@
 
 #include "access/gin_private.h"
 #include "access/relscan.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -37,7 +39,7 @@ typedef struct pendingPosition
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -51,6 +53,8 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
 
 		stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE);
 		stack->blkno = BufferGetBlockNumber(stack->buffer);
+		if (!GinGetUseFastUpdate(btree->index))
+			PredicateLockPage(btree->index, stack->blkno, snapshot);
 		stack->off = FirstOffsetNumber;
 	}
 
@@ -73,6 +77,10 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
+	if (!GinGetUseFastUpdate(index))
+		PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -94,6 +102,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 			break;				/* no more pages */
 
 		buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+		if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
 	}
 
 	UnlockReleaseBuffer(buffer);
@@ -131,6 +142,9 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = btree->ginstate->origTupdesc->attrs[attnum - 1];
 
+	if (!GinGetUseFastUpdate(btree->index))
+		PredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +155,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +264,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +337,15 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = TRUE;
 
@@ -391,6 +414,9 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -414,6 +440,9 @@ restartScanEntry:
 		}
 		else if (GinGetNPosting(itup) > 0)
 		{
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
 									   &entry->nlist);
 			entry->predictNumberResult = entry->nlist;
@@ -633,6 +662,9 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		if (!GinGetUseFastUpdate(ginstate->index))
+			PredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +709,11 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1733,6 +1770,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (!GinGetUseFastUpdate(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 5378011..661b605 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = TRUE;
 	}
 	else
 	{
+		CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,8 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 91e4a8c..5cfb45d 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -49,7 +49,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 31425e9..1e52313 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index a9dc01f..6bbdfef 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -379,6 +379,17 @@ level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist.
 
+    * Gin searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. We acquire a predicate lock on entry
+tree leaf pages only when entry has a posting list. If entry tree has
+a pointer to posting tree, we skip locking entry tree leaf page and lock
+only posting tree leaf pages. If, however, fast update is enabled, a
+predicate lock on the index relation is required as fast update postpones
+the insertion of tuples into index structure by temporarily storing them
+into pending list due to which we are unable to detect all r-w conflicts.
+During a page split, a predicate lock is copied from the original page
+to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index adfdb0c..03c4103 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -217,7 +217,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000..e80c897
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,799 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c1: COMMIT;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c2: COMMIT;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c2: COMMIT;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10050          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c1: COMMIT;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c1: COMMIT;
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c1: COMMIT;
+step c2: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+step c2: COMMIT;
+step c1: COMMIT;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step c2: COMMIT;
+step wx1: insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: COMMIT;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step c1: COMMIT;
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step c2: COMMIT;
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
+step c2: COMMIT;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c2: COMMIT;
+step c1: COMMIT;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g;
+step rxy3: 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+
+count          
+
+4              
+step c2: COMMIT;
+step wx3: insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g;
+step c1: COMMIT;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 32c965b..de9322b 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -62,3 +62,4 @@ test: sequence-ddl
 test: async-notify
 test: vacuum-reltuples
 test: timeouts
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000..b362eb3
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,120 @@
+# Test for page level predicate locking in gin
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan(from one transaction) and an index insert(from another
+# transaction) will try to access the same part(sub-tree) of the index.
+#
+# To check reduced false positives, queries and permutations are written in such
+# a way that an index scan(from one transaction) and an index insert(from another
+# transaction) will try to access different parts(sub-tree) of the index.
+
+
+setup
+{
+ create table gin_tbl(id int4, p int4[]);
+ create index ginidx on gin_tbl using gin(p) with 
+ (fastupdate = off);
+ insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+ insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+}
+
+teardown
+{
+ DROP TABLE gin_tbl;
+}
+
+session "s1"
+setup		{
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1"	{ insert into gin_tbl select g, array[5,6] 
+		  from generate_series(20001, 20050) g; }
+step "rxy3"	{ 
+		  select count(*) from gin_tbl where p @> array[1,2] or 
+		  p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+		}
+step "wx3"	{ insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(1, 50) g; }
+step "c1"	{ COMMIT; }
+
+session "s2"
+setup		{ 
+		  BEGIN ISOLATION LEVEL SERIALIZABLE;
+		  set enable_seqscan=off;
+		}
+
+step "rxy2"	{ select count(*) from gin_tbl where p @> array[5,6]; }
+step "wy2"	{ insert into gin_tbl select g, array[4,5] 
+		  from generate_series(20051, 20100) g; }
+step "rxy4"	{ 
+		  select count(*) from gin_tbl where p @> array[4000,8000] or
+		  p @> array[5000,10000] or p @> array[6000,12000] or 
+		  p @> array[8000,16000];
+		}
+step "wy4"	{ insert into gin_tbl select g, array[g,g*2] 
+		  from generate_series(10000, 10050) g; }
+step "c2"	{ COMMIT; }
+
+# An index scan(from one transaction) and an index insert(from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan(from one transaction) and an index insert(from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan(from one transaction) and an index insert(from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan(from one transaction) and an index insert(from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
-- 
1.9.1

#12Thomas Munro
thomas.munro@enterprisedb.com
In reply to: Shubham Barai (#11)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On Wed, Jan 3, 2018 at 4:31 AM, Shubham Barai <shubhambaraiss@gmail.com> wrote:

I have created new isolation tests. Please have a look at
updated patch.

Hi Shubham,

Could we please have a rebased version of the gin one?

--
Thomas Munro
http://www.enterprisedb.com

#13Shubham Barai
shubhambaraiss@gmail.com
In reply to: Thomas Munro (#12)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On 28 February 2018 at 05:51, Thomas Munro <thomas.munro@enterprisedb.com>
wrote:

On Wed, Jan 3, 2018 at 4:31 AM, Shubham Barai <shubhambaraiss@gmail.com>
wrote:

I have created new isolation tests. Please have a look at
updated patch.

Hi Shubham,

Could we please have a rebased version of the gin one?

Sure. I have attached a rebased version

Regards,
Shubham

Attachments:

Predicate-Locking-in-gin-index_v5.patchapplication/octet-stream; name=Predicate-Locking-in-gin-index_v5.patchDownload
From 3bd14fc57a21ef20c659a9381ffeae6be5f16ab8 Mon Sep 17 00:00:00 2001
From: shubhambaraiss <you@example.com>
Date: Wed, 28 Feb 2018 21:27:21 +0530
Subject: [PATCH] Predicate Locking in gin index

---
 src/backend/access/gin/ginbtree.c             |  22 +
 src/backend/access/gin/gindatapage.c          |  12 +-
 src/backend/access/gin/ginget.c               |  56 +-
 src/backend/access/gin/gininsert.c            |  18 +-
 src/backend/access/gin/ginutil.c              |   2 +-
 src/backend/access/gin/ginvacuum.c            |  12 +-
 src/backend/storage/lmgr/README-SSI           |  11 +
 src/include/access/gin_private.h              |   2 +-
 src/test/isolation/expected/predicate-gin.out | 719 ++++++++++++++++++++++++++
 src/test/isolation/isolation_schedule         |   1 +
 src/test/isolation/specs/predicate-gin.spec   | 116 +++++
 11 files changed, 953 insertions(+), 18 deletions(-)
 create mode 100644 src/test/isolation/expected/predicate-gin.out
 create mode 100644 src/test/isolation/specs/predicate-gin.spec

diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 37070b3..095b119 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index f9daaba..52e1017 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -20,6 +20,7 @@
 #include "lib/ilist.h"
 #include "miscadmin.h"
 #include "utils/rel.h"
+#include "storage/predicate.h"
 
 /*
  * Min, Max and Target size of posting lists stored on leaf pages, in bytes.
@@ -1423,7 +1424,7 @@ disassembleLeaf(Page page)
  * Any segments that acquire new items are decoded, and the new items are
  * merged with the old items.
  *
- * Returns true if any new items were added. False means they were all
+ * Returns true if any new items were added. false means they were all
  * duplicates of existing items on the page.
  */
 static bool
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 * Copy a predicate lock from entry tree leaf (containing posting list)
+	 * to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 6fe67f3..4b31d44 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -16,9 +16,11 @@
 
 #include "access/gin_private.h"
 #include "access/relscan.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -37,7 +39,7 @@ typedef struct pendingPosition
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -51,6 +53,8 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
 
 		stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE);
 		stack->blkno = BufferGetBlockNumber(stack->buffer);
+		if (!GinGetUseFastUpdate(btree->index))
+			PredicateLockPage(btree->index, stack->blkno, snapshot);
 		stack->off = FirstOffsetNumber;
 	}
 
@@ -73,6 +77,10 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
+	if (!GinGetUseFastUpdate(index))
+		PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -94,6 +102,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 			break;				/* no more pages */
 
 		buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+		if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
 	}
 
 	UnlockReleaseBuffer(buffer);
@@ -131,6 +142,9 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1);
 
+	if (!GinGetUseFastUpdate(btree->index))
+		PredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +155,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +264,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +337,15 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = true;
 
@@ -391,6 +414,9 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -414,6 +440,9 @@ restartScanEntry:
 		}
 		else if (GinGetNPosting(itup) > 0)
 		{
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
 									   &entry->nlist);
 			entry->predictNumberResult = entry->nlist;
@@ -493,7 +522,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
+			/* Pass all entries <= i as false, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -633,6 +662,9 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		if (!GinGetUseFastUpdate(ginstate->index))
+			PredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +709,11 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			if (!GinGetUseFastUpdate(ginstate->index))
+				PredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1038,8 +1075,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
-	 * returns FALSE, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries false. If it
+	 * returns false, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1733,6 +1770,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (!GinGetUseFastUpdate(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 473cc3d..c10236a 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = true;
 	}
 	else
 	{
+		CheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,8 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 7bac7a1..3acacb7 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -49,7 +49,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 398532d..6bce589 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index a9dc01f..6bbdfef 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -379,6 +379,17 @@ level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist.
 
+    * Gin searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. We acquire a predicate lock on entry
+tree leaf pages only when entry has a posting list. If entry tree has
+a pointer to posting tree, we skip locking entry tree leaf page and lock
+only posting tree leaf pages. If, however, fast update is enabled, a
+predicate lock on the index relation is required as fast update postpones
+the insertion of tuples into index structure by temporarily storing them
+into pending list due to which we are unable to detect all r-w conflicts.
+During a page split, a predicate lock is copied from the original page
+to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index a709596..4a847c9 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -217,7 +217,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000..b051d11
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,719 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10050          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index befe676..ce14929 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -65,3 +65,4 @@ test: async-notify
 test: vacuum-reltuples
 test: timeouts
 test: vacuum-concurrent-drop
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000..278911c
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,116 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan  (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+  create table gin_tbl(id int4, p int4[]);
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+  insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+}
+
+teardown
+{
+  drop table gin_tbl;
+}
+
+session "s1"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g; }
+step "rxy3"	{ select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; }
+step "wx3"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g; }
+step "c1"  { commit; }
+
+session "s2"
+setup		
+{ 
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy2"	{ select count(*) from gin_tbl where p @> array[5,6]; }
+step "wy2"	{ insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g; }
+step "rxy4"	{ select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000]; }
+step "wy4"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g; }
+step "c2"	{ commit; }
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
-- 
1.9.1

#14Andres Freund
andres@anarazel.de
In reply to: Shubham Barai (#13)
Re: GSoC 2017: weekly progress reports (week 6)

This appears to be a duplicate of https://commitfest.postgresql.org/17/1466/ - as the other one is older, I'm closing this one.

#15Andrey Borodin
x4mmm@yandex-team.ru
In reply to: Shubham Barai (#13)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Hi!

28 февр. 2018 г., в 22:19, Shubham Barai <shubhambaraiss@gmail.com> написал(а):

Sure. I have attached a rebased version

I've looked into the code closely again. The patch is heavily reworked since GSoC state :)
Tests are looking fine and locking is fine-grained.
But there is one thing I could not understand:
Why do we take a lock during moveRightIfItNeeded()?
This place is supposed to be called whenever page is split just before we a locking it and right after we've come to the page from parent.

Best regards, Andrey Borodin.

#16Alvaro Herrera
alvherre@2ndquadrant.com
In reply to: Andres Freund (#14)
Re: GSoC 2017: weekly progress reports (week 6)

Andres Freund wrote:

This appears to be a duplicate of https://commitfest.postgresql.org/17/1466/ - as the other one is older, I'm closing this one.

This comment makes no sense from the POV of the mail archives. I had to
look at the User-Agent in your email to realize that you wrote it in the
commitfest app. I see three problems here

1. impersonating the "From:" header is a bad idea; needs fixed much as
we did with the bugs and doc comments submission forms
2. it should have had a header indicating it comes from CF app
3. it would be great to include in said header a link to the CF entry
to which the comment was attached.

Thanks

--
�lvaro Herrera https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services

#17Alvaro Herrera
alvherre@2ndquadrant.com
In reply to: Shubham Barai (#13)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

I suggest to create a new function GinPredicateLockPage() that checks
whether fast update is enabled for the index. The current arrangement
looks too repetitive and it seems easy to make a mistake.

Stylistically, please keep #include lines ordered alphabetically, and
cut long lines to below 80 chars.

--
�lvaro Herrera https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services

#18Andres Freund
andres@anarazel.de
In reply to: Alvaro Herrera (#16)
Re: GSoC 2017: weekly progress reports (week 6)

Hi,

On 2018-03-07 11:58:51 -0300, Alvaro Herrera wrote:

This appears to be a duplicate of https://commitfest.postgresql.org/17/1466/ - as the other one is older, I'm closing this one.

This comment makes no sense from the POV of the mail archives. I had to
look at the User-Agent in your email to realize that you wrote it in the
commitfest app.

Yea, I stopped doing so afterwards...

1. impersonating the "From:" header is a bad idea; needs fixed much as
we did with the bugs and doc comments submission forms
2. it should have had a header indicating it comes from CF app
3. it would be great to include in said header a link to the CF entry
to which the comment was attached.

Sounds reasonable.

Greetings,

Andres Freund

#19Shubham Barai
shubhambaraiss@gmail.com
In reply to: Alvaro Herrera (#17)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On 07-Mar-2018 11:00 PM, "Alvaro Herrera" <alvherre@2ndquadrant.com> wrote:

I suggest to create a new function GinPredicateLockPage() that checks
whether fast update is enabled for the index. The current arrangement
looks too repetitive and it seems easy to make a mistake.

Stylistically, please keep #include lines ordered alphabetically, and
cut long lines to below 80 chars.

Okay, I will update the patch.

#20Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Alvaro Herrera (#17)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On Wed, Mar 7, 2018 at 8:30 PM, Alvaro Herrera <alvherre@2ndquadrant.com>
wrote:

I suggest to create a new function GinPredicateLockPage() that checks
whether fast update is enabled for the index. The current arrangement
looks too repetitive and it seems easy to make a mistake.

BTW, should we also skip CheckForSerializableConflictIn() when
fast update is enabled? AFAICS, now it doesn't cause any errors or
false positives, but makes useless load. Is it correct?

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#21Andrey Borodin
x4mmm@yandex-team.ru
In reply to: Alexander Korotkov (#20)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

12 марта 2018 г., в 1:54, Alexander Korotkov <a.korotkov@postgrespro.ru> написал(а):

On Wed, Mar 7, 2018 at 8:30 PM, Alvaro Herrera <alvherre@2ndquadrant.com> wrote:
I suggest to create a new function GinPredicateLockPage() that checks
whether fast update is enabled for the index. The current arrangement
looks too repetitive and it seems easy to make a mistake.

BTW, should we also skip CheckForSerializableConflictIn() when
fast update is enabled? AFAICS, now it doesn't cause any errors or
false positives, but makes useless load. Is it correct?

BTW to BTW. I think we should check pending list size with GinGetPendingListCleanupSize() here
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);

Because we can alter alter index set (fastupdate = off), but there still will be pending list.

We were discussing this with Shubham back in July, chosen some approach that seemed better, but I can't remember what was that...

Best regards, Andrey Borodin.

#22Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Andrey Borodin (#21)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On Mon, Mar 12, 2018 at 9:47 AM, Andrey Borodin <x4mmm@yandex-team.ru>
wrote:

12 марта 2018 г., в 1:54, Alexander Korotkov <a.korotkov@postgrespro.ru>

написал(а):

On Wed, Mar 7, 2018 at 8:30 PM, Alvaro Herrera <alvherre@2ndquadrant.com>

wrote:

I suggest to create a new function GinPredicateLockPage() that checks
whether fast update is enabled for the index. The current arrangement
looks too repetitive and it seems easy to make a mistake.

BTW, should we also skip CheckForSerializableConflictIn() when
fast update is enabled? AFAICS, now it doesn't cause any errors or
false positives, but makes useless load. Is it correct?

BTW to BTW. I think we should check pending list size with
GinGetPendingListCleanupSize() here
+
+       /*
+        * If fast update is enabled, we acquire a predicate lock on the
entire
+        * relation as fast update postpones the insertion of tuples into
index
+        * structure due to which we can't detect rw conflicts.
+        */
+       if (GinGetUseFastUpdate(ginstate->index))
+               PredicateLockRelation(ginstate->index, snapshot);

Because we can alter alter index set (fastupdate = off), but there still
will be pending list.

And what happen if somebody concurrently set (fastupdate = on)?
Can we miss conflicts because of that?

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#23Alvaro Herrera
alvherre@alvh.no-ip.org
In reply to: Alexander Korotkov (#22)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Alexander Korotkov wrote:

And what happen if somebody concurrently set (fastupdate = on)?
Can we miss conflicts because of that?

I think it'd be better to have that option require AccessExclusive lock,
so that it can never be changed concurrently with readers. Seems to me
that penalizing every single read to cope with this case would be a bad
trade-off.

--
�lvaro Herrera https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services

#24Andrey Borodin
x4mmm@yandex-team.ru
In reply to: Alexander Korotkov (#22)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

13 марта 2018 г., в 17:02, Alexander Korotkov <a.korotkov@postgrespro.ru> написал(а):

BTW to BTW. I think we should check pending list size with GinGetPendingListCleanupSize() here
+
+       /*
+        * If fast update is enabled, we acquire a predicate lock on the entire
+        * relation as fast update postpones the insertion of tuples into index
+        * structure due to which we can't detect rw conflicts.
+        */
+       if (GinGetUseFastUpdate(ginstate->index))
+               PredicateLockRelation(ginstate->index, snapshot);

Because we can alter alter index set (fastupdate = off), but there still will be pending list.

And what happen if somebody concurrently set (fastupdate = on)?
Can we miss conflicts because of that?

No, AccessExclusiveLock will prevent this kind of problems with enabling fastupdate.

Best regards, Andrey Borodin.

#25Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Andrey Borodin (#24)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On Tue, Mar 13, 2018 at 3:26 PM, Andrey Borodin <x4mmm@yandex-team.ru>
wrote:

13 марта 2018 г., в 17:02, Alexander Korotkov <a.korotkov@postgrespro.ru>

написал(а):

BTW to BTW. I think we should check pending list size with

GinGetPendingListCleanupSize() here

+
+       /*
+        * If fast update is enabled, we acquire a predicate lock on the

entire

+ * relation as fast update postpones the insertion of tuples

into index

+        * structure due to which we can't detect rw conflicts.
+        */
+       if (GinGetUseFastUpdate(ginstate->index))
+               PredicateLockRelation(ginstate->index, snapshot);

Because we can alter alter index set (fastupdate = off), but there still

will be pending list.

And what happen if somebody concurrently set (fastupdate = on)?
Can we miss conflicts because of that?

No, AccessExclusiveLock will prevent this kind of problems with enabling
fastupdate.

True. I didn't notice that ALTER INDEX SET locks index in so high mode.
Thus, everything is fine from this perspective.

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#26Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Alvaro Herrera (#23)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On Tue, Mar 13, 2018 at 3:25 PM, Alvaro Herrera <alvherre@alvh.no-ip.org>
wrote:

Alexander Korotkov wrote:

And what happen if somebody concurrently set (fastupdate = on)?
Can we miss conflicts because of that?

I think it'd be better to have that option require AccessExclusive lock,
so that it can never be changed concurrently with readers. Seems to me
that penalizing every single read to cope with this case would be a bad
trade-off.

As Andrey Borodin mentioned, we already do. Sorry for buzz :)

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#27Shubham Barai
shubhambaraiss@gmail.com
In reply to: Alexander Korotkov (#26)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On 16 March 2018 at 03:57, Alexander Korotkov <a.korotkov@postgrespro.ru>
wrote:

On Tue, Mar 13, 2018 at 3:25 PM, Alvaro Herrera <alvherre@alvh.no-ip.org>
wrote:

Alexander Korotkov wrote:

And what happen if somebody concurrently set (fastupdate = on)?
Can we miss conflicts because of that?

I think it'd be better to have that option require AccessExclusive lock,
so that it can never be changed concurrently with readers. Seems to me
that penalizing every single read to cope with this case would be a bad
trade-off.

As Andrey Borodin mentioned, we already do. Sorry for buzz :)

I have updated the patch based on suggestions.

Regards,
Shubham

Attachments:

Predicate-Locking-in-gin-index_v6.patchapplication/octet-stream; name=Predicate-Locking-in-gin-index_v6.patchDownload
From be695b3d8d88723573b937397e13a8218c7b91f5 Mon Sep 17 00:00:00 2001
From: shubhambaraiss <you@example.com>
Date: Wed, 28 Feb 2018 21:27:21 +0530
Subject: [PATCH] Predicate Locking in gin index

---
 src/backend/access/gin/ginbtree.c             |  22 +
 src/backend/access/gin/gindatapage.c          |  12 +-
 src/backend/access/gin/ginget.c               |  54 +-
 src/backend/access/gin/gininsert.c            |  18 +-
 src/backend/access/gin/ginutil.c              |  10 +-
 src/backend/access/gin/ginvacuum.c            |  12 +-
 src/backend/storage/lmgr/README-SSI           |  11 +
 src/include/access/gin_private.h              |   4 +-
 src/test/isolation/expected/predicate-gin.out | 719 ++++++++++++++++++++++++++
 src/test/isolation/isolation_schedule         |   1 +
 src/test/isolation/specs/predicate-gin.spec   | 116 +++++
 11 files changed, 961 insertions(+), 18 deletions(-)
 create mode 100644 src/test/isolation/expected/predicate-gin.out
 create mode 100644 src/test/isolation/specs/predicate-gin.spec

diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 37070b3..095b119 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index f9daaba..3fb4fc8 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -19,6 +19,7 @@
 #include "access/xloginsert.h"
 #include "lib/ilist.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/rel.h"
 
 /*
@@ -1423,7 +1424,7 @@ disassembleLeaf(Page page)
  * Any segments that acquire new items are decoded, and the new items are
  * merged with the old items.
  *
- * Returns true if any new items were added. False means they were all
+ * Returns true if any new items were added. false means they were all
  * duplicates of existing items on the page.
  */
 static bool
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 * Copy a predicate lock from entry tree leaf (containing posting list)
+	 * to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 6fe67f3..6360385 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -17,8 +17,10 @@
 #include "access/gin_private.h"
 #include "access/relscan.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -33,11 +35,18 @@ typedef struct pendingPosition
 } pendingPosition;
 
 
+static void
+GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
+{
+	if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, blkno, snapshot);
+}
+
 /*
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -73,6 +82,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
+	GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -94,6 +106,8 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 			break;				/* no more pages */
 
 		buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
 	}
 
 	UnlockReleaseBuffer(buffer);
@@ -131,6 +145,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1);
 
+	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +157,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +266,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +339,15 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = true;
 
@@ -391,6 +416,8 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -414,6 +441,8 @@ restartScanEntry:
 		}
 		else if (GinGetNPosting(itup) > 0)
 		{
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
 									   &entry->nlist);
 			entry->predictNumberResult = entry->nlist;
@@ -493,7 +522,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
+			/* Pass all entries <= i as false, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -633,6 +662,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +708,10 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1038,8 +1073,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
-	 * returns FALSE, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries false. If it
+	 * returns false, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1733,6 +1768,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (GinGetPendingListCleanupSize(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 473cc3d..98cdbd1 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = true;
 	}
 	else
 	{
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,8 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		GinCheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 7bac7a1..5632cc5 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -23,6 +23,7 @@
 #include "miscadmin.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 #include "utils/index_selfuncs.h"
 #include "utils/typcache.h"
@@ -49,7 +50,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
@@ -716,3 +717,10 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
+
+void
+GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
+{
+	if (!GinGetUseFastUpdate(relation))
+		CheckForSerializableConflictIn(relation, tuple, buffer);
+}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 398532d..6bce589 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index a9dc01f..6bbdfef 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -379,6 +379,17 @@ level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist.
 
+    * Gin searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. We acquire a predicate lock on entry
+tree leaf pages only when entry has a posting list. If entry tree has
+a pointer to posting tree, we skip locking entry tree leaf page and lock
+only posting tree leaf pages. If, however, fast update is enabled, a
+predicate lock on the index relation is required as fast update postpones
+the insertion of tuples into index structure by temporarily storing them
+into pending list due to which we are unable to detect all r-w conflicts.
+During a page split, a predicate lock is copied from the original page
+to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index a709596..d1df303 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,6 +103,8 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
+extern void GinCheckForSerializableConflictIn(Relation relation,
+				 HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -217,7 +219,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000..b051d11
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,719 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10050          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index befe676..ce14929 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -65,3 +65,4 @@ test: async-notify
 test: vacuum-reltuples
 test: timeouts
 test: vacuum-concurrent-drop
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000..278911c
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,116 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan  (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+  create table gin_tbl(id int4, p int4[]);
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+  insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+}
+
+teardown
+{
+  drop table gin_tbl;
+}
+
+session "s1"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g; }
+step "rxy3"	{ select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; }
+step "wx3"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g; }
+step "c1"  { commit; }
+
+session "s2"
+setup		
+{ 
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy2"	{ select count(*) from gin_tbl where p @> array[5,6]; }
+step "wy2"	{ insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g; }
+step "rxy4"	{ select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000]; }
+step "wy4"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g; }
+step "c2"	{ commit; }
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
-- 
1.9.1

#28Teodor Sigaev
teodor@sigaev.ru
In reply to: Shubham Barai (#27)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Hi!

Patch seems good, but I found one bug in it, in fact, nobody
checks serializible conflict with fastupdate=on:
gininsert()
{
if (GinGetUseFastUpdate())
{
/* two next lines are GinCheckForSerializableConflictIn() */
if (!GinGetUseFastUpdate())
CheckForSerializableConflictIn()
}
}

I changed to direct call CheckForSerializableConflictIn() (see attachment)

I'd like to see fastupdate=on in test too, now tests cover only case without
fastupdate. Please, add them.

Shubham Barai wrote:

On 16 March 2018 at 03:57, Alexander Korotkov <a.korotkov@postgrespro.ru
<mailto:a.korotkov@postgrespro.ru>> wrote:

On Tue, Mar 13, 2018 at 3:25 PM, Alvaro Herrera <alvherre@alvh.no-ip.org
<mailto:alvherre@alvh.no-ip.org>> wrote:

Alexander Korotkov wrote:

And what happen if somebody concurrently set (fastupdate = on)?
Can we miss conflicts because of that?

I think it'd be better to have that option require AccessExclusive lock,
so that it can never be changed concurrently with readers.О©╫ Seems to me
that penalizing every single read to cope with this case would be a bad
trade-off.

As Andrey Borodin mentioned, we already do.О©╫ Sorry for buzz :)

I have updated the patch based on suggestions.

Regards,
Shubham

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

Attachments:

Predicate-Locking-in-gin-index_v7.patchtext/x-patch; name=Predicate-Locking-in-gin-index_v7.patchDownload
commit b69b24ac54f3d31c4c25026aefd2c47ade2ed571
Author: Teodor Sigaev <teodor@sigaev.ru>
Date:   Fri Mar 23 14:25:24 2018 +0300

    x

diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 37070b3b72..095b1192cb 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index f9daaba52e..3fb4fc8264 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -19,6 +19,7 @@
 #include "access/xloginsert.h"
 #include "lib/ilist.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/rel.h"
 
 /*
@@ -1423,7 +1424,7 @@ disassembleLeaf(Page page)
  * Any segments that acquire new items are decoded, and the new items are
  * merged with the old items.
  *
- * Returns true if any new items were added. False means they were all
+ * Returns true if any new items were added. false means they were all
  * duplicates of existing items on the page.
  */
 static bool
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 * Copy a predicate lock from entry tree leaf (containing posting list)
+	 * to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 6fe67f346d..63603859bc 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -17,8 +17,10 @@
 #include "access/gin_private.h"
 #include "access/relscan.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -33,11 +35,18 @@ typedef struct pendingPosition
 } pendingPosition;
 
 
+static void
+GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
+{
+	if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, blkno, snapshot);
+}
+
 /*
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -73,6 +82,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
+	GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -94,6 +106,8 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 			break;				/* no more pages */
 
 		buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
 	}
 
 	UnlockReleaseBuffer(buffer);
@@ -131,6 +145,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1);
 
+	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +157,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +266,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +339,15 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = true;
 
@@ -391,6 +416,8 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -414,6 +441,8 @@ restartScanEntry:
 		}
 		else if (GinGetNPosting(itup) > 0)
 		{
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
 									   &entry->nlist);
 			entry->predictNumberResult = entry->nlist;
@@ -493,7 +522,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
+			/* Pass all entries <= i as false, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -633,6 +662,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +708,10 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1038,8 +1073,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
-	 * returns FALSE, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries false. If it
+	 * returns false, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1733,6 +1768,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (GinGetPendingListCleanupSize(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 23f7285547..3faaf8adf4 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = true;
 	}
 	else
 	{
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,12 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		/*
+		 * do not use GinCheckForSerializableConflictIn() here because
+		 * it will do nothing (it do actual work only with fastupdate off
+		 */
+		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 7bac7a1252..5632cc5a77 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -23,6 +23,7 @@
 #include "miscadmin.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 #include "utils/index_selfuncs.h"
 #include "utils/typcache.h"
@@ -49,7 +50,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
@@ -716,3 +717,10 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
+
+void
+GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
+{
+	if (!GinGetUseFastUpdate(relation))
+		CheckForSerializableConflictIn(relation, tuple, buffer);
+}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 398532d80b..6bce58942b 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index a9dc01f237..6bbdfef1c7 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -379,6 +379,17 @@ level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist.
 
+    * Gin searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. We acquire a predicate lock on entry
+tree leaf pages only when entry has a posting list. If entry tree has
+a pointer to posting tree, we skip locking entry tree leaf page and lock
+only posting tree leaf pages. If, however, fast update is enabled, a
+predicate lock on the index relation is required as fast update postpones
+the insertion of tuples into index structure by temporarily storing them
+into pending list due to which we are unable to detect all r-w conflicts.
+During a page split, a predicate lock is copied from the original page
+to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index a709596a7a..d1df3033a6 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,6 +103,8 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
+extern void GinCheckForSerializableConflictIn(Relation relation,
+				 HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -217,7 +219,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000000..b051d11fb2
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,719 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10050          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 74d7d59546..c38a4ad0ab 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -66,3 +66,4 @@ test: async-notify
 test: vacuum-reltuples
 test: timeouts
 test: vacuum-concurrent-drop
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000000..278911ce7d
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,116 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan  (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+  create table gin_tbl(id int4, p int4[]);
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+  insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+}
+
+teardown
+{
+  drop table gin_tbl;
+}
+
+session "s1"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g; }
+step "rxy3"	{ select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; }
+step "wx3"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g; }
+step "c1"  { commit; }
+
+session "s2"
+setup		
+{ 
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy2"	{ select count(*) from gin_tbl where p @> array[5,6]; }
+step "wy2"	{ insert into gin_tbl select g, array[4,5] from 
+              generate_series(20051, 20100) g; }
+step "rxy4"	{ select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or 
+              p @> array[8000,16000]; }
+step "wy4"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g; }
+step "c2"	{ commit; }
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
#29Dmitry Ivanov
d.ivanov@postgrespro.ru
In reply to: Teodor Sigaev (#28)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

I'd like to see fastupdate=on in test too, now tests cover only case
without fastupdate. Please, add them.

Here's a couple of tests for pending list (fastupdate = on).

--
Dmitry Ivanov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

Attachments:

Predicate-Locking-in-gin-index_v8.patchtext/x-diff; name=Predicate-Locking-in-gin-index_v8.patchDownload
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 37070b3b72..095b1192cb 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index f9daaba52e..3fb4fc8264 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -19,6 +19,7 @@
 #include "access/xloginsert.h"
 #include "lib/ilist.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/rel.h"
 
 /*
@@ -1423,7 +1424,7 @@ disassembleLeaf(Page page)
  * Any segments that acquire new items are decoded, and the new items are
  * merged with the old items.
  *
- * Returns true if any new items were added. False means they were all
+ * Returns true if any new items were added. false means they were all
  * duplicates of existing items on the page.
  */
 static bool
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 * Copy a predicate lock from entry tree leaf (containing posting list)
+	 * to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 6fe67f346d..63603859bc 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -17,8 +17,10 @@
 #include "access/gin_private.h"
 #include "access/relscan.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -33,11 +35,18 @@ typedef struct pendingPosition
 } pendingPosition;
 
 
+static void
+GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
+{
+	if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, blkno, snapshot);
+}
+
 /*
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -73,6 +82,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
+	GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -94,6 +106,8 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 			break;				/* no more pages */
 
 		buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
 	}
 
 	UnlockReleaseBuffer(buffer);
@@ -131,6 +145,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1);
 
+	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +157,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +266,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +339,15 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = true;
 
@@ -391,6 +416,8 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -414,6 +441,8 @@ restartScanEntry:
 		}
 		else if (GinGetNPosting(itup) > 0)
 		{
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
 									   &entry->nlist);
 			entry->predictNumberResult = entry->nlist;
@@ -493,7 +522,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
+			/* Pass all entries <= i as false, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -633,6 +662,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +708,10 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1038,8 +1073,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
-	 * returns FALSE, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries false. If it
+	 * returns false, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1733,6 +1768,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (GinGetPendingListCleanupSize(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 23f7285547..3faaf8adf4 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = true;
 	}
 	else
 	{
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,12 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		/*
+		 * do not use GinCheckForSerializableConflictIn() here because
+		 * it will do nothing (it do actual work only with fastupdate off
+		 */
+		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 7bac7a1252..5632cc5a77 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -23,6 +23,7 @@
 #include "miscadmin.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 #include "utils/index_selfuncs.h"
 #include "utils/typcache.h"
@@ -49,7 +50,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
@@ -716,3 +717,10 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
+
+void
+GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
+{
+	if (!GinGetUseFastUpdate(relation))
+		CheckForSerializableConflictIn(relation, tuple, buffer);
+}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 398532d80b..6bce58942b 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index e221241f96..7ea5f0b48e 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -380,6 +380,17 @@ then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist. In case there is a page split,
 we need to copy predicate lock from an original page to all new pages.
 
+    * Gin searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. We acquire a predicate lock on entry
+tree leaf pages only when entry has a posting list. If entry tree has
+a pointer to posting tree, we skip locking entry tree leaf page and lock
+only posting tree leaf pages. If, however, fast update is enabled, a
+predicate lock on the index relation is required as fast update postpones
+the insertion of tuples into index structure by temporarily storing them
+into pending list due to which we are unable to detect all r-w conflicts.
+During a page split, a predicate lock is copied from the original page
+to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index a709596a7a..d1df3033a6 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,6 +103,8 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
+extern void GinCheckForSerializableConflictIn(Relation relation,
+				 HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -217,7 +219,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000000..368cff395d
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,888 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10050          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: fu1 rxy1 wx1 c1 rxy2 wy2 c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: fu1 rxy3 wx3 c1 rxy4 wy4 c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: fu1 rxy1 wx1 rxy2 c1 wy2 c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: fu1 rxy1 rxy2 wy2 wx1 c1 c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: fu1 rxy2 rxy1 wy2 c2 wx1 c1
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: fu1 rxy3 wx3 rxy4 c1 wy4 c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: fu1 rxy4 rxy3 wx3 c1 wy4 c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: fu1 rxy4 rxy3 wx3 wy4 c1 c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 53e1f192b0..d3965fe73f 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -67,3 +67,4 @@ test: vacuum-reltuples
 test: timeouts
 test: vacuum-concurrent-drop
 test: predicate-gist
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000000..40cb6ddf73
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,154 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan  (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+  create table gin_tbl(id int4, p int4[]);
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+  insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+}
+
+teardown
+{
+  drop table gin_tbl;
+}
+
+session "s1"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+# enable pending list for a small subset of tests
+step "fu1"	{ alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable; }
+
+step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g; }
+step "rxy3"	{ select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; }
+step "wx3"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g; }
+step "c1"  { commit; }
+
+session "s2"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy2"	{ select count(*) from gin_tbl where p @> array[5,6]; }
+step "wy2"	{ insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g; }
+step "rxy4"	{ select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000]; }
+step "wy4"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g; }
+step "c2"	{ commit; }
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
+
+
+# Finally, a small subset of tests for pending list
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "fu1" "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "fu1" "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "fu1" "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "fu1" "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "fu1" "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index, but pending list is enabled,
+# so there might be a r-w conflict.
+
+permutation "fu1" "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "fu1" "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "fu1" "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
#30Dmitry Ivanov
d.ivanov@postgrespro.ru
In reply to: Dmitry Ivanov (#29)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

I'd like to see fastupdate=on in test too, now tests cover only case
without fastupdate. Please, add them.

Here's a couple of tests for pending list (fastupdate = on).

By the way, isn't it strange that permutation "fu1" "rxy3" "wx3" "rxy4"
"c1" "wy4" "c2" doesn't produce an ERROR?

--
Dmitry Ivanov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#31Teodor Sigaev
teodor@sigaev.ru
In reply to: Dmitry Ivanov (#29)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Hi!

I slightly modified test for clean demonstration of difference between
fastupdate on and off. Also I added CheckForSerializableConflictIn() to
fastupdate off codepath, but only in case of non-empty pending list.

The next question what I see: why do not we lock entry leaf pages in some cases?
As I understand, scan should lock any visited page, but now it's true only for
posting tree. Seems, it also should lock pages in entry tree because concurrent
procesess could add new entries which could be matched by partial search, for
example. BTW, partial search (see collectMatchBitmap()) locks correctly entry
tree, but regular startScanEntry() doesn't lock entry page in case of posting
tree, only in case of posting list.

Dmitry Ivanov wrote:

I'd like to see fastupdate=on in test too, now tests cover only case
without fastupdate. Please, add them.

Here's a couple of tests for pending list (fastupdate = on).

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

Attachments:

Predicate-Locking-in-gin-index_v9.patchtext/x-patch; name=Predicate-Locking-in-gin-index_v9.patchDownload
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 37070b3b72..095b1192cb 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index f9daaba52e..3fb4fc8264 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -19,6 +19,7 @@
 #include "access/xloginsert.h"
 #include "lib/ilist.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/rel.h"
 
 /*
@@ -1423,7 +1424,7 @@ disassembleLeaf(Page page)
  * Any segments that acquire new items are decoded, and the new items are
  * merged with the old items.
  *
- * Returns true if any new items were added. False means they were all
+ * Returns true if any new items were added. false means they were all
  * duplicates of existing items on the page.
  */
 static bool
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 * Copy a predicate lock from entry tree leaf (containing posting list)
+	 * to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 6fe67f346d..63603859bc 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -17,8 +17,10 @@
 #include "access/gin_private.h"
 #include "access/relscan.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -33,11 +35,18 @@ typedef struct pendingPosition
 } pendingPosition;
 
 
+static void
+GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
+{
+	if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, blkno, snapshot);
+}
+
 /*
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -73,6 +82,9 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
+	GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -94,6 +106,8 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 			break;				/* no more pages */
 
 		buffer = ginStepRight(buffer, index, GIN_SHARE);
+
+		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
 	}
 
 	UnlockReleaseBuffer(buffer);
@@ -131,6 +145,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1);
 
+	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +157,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +266,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +339,15 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = true;
 
@@ -391,6 +416,8 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -414,6 +441,8 @@ restartScanEntry:
 		}
 		else if (GinGetNPosting(itup) > 0)
 		{
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
 									   &entry->nlist);
 			entry->predictNumberResult = entry->nlist;
@@ -493,7 +522,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
+			/* Pass all entries <= i as false, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -633,6 +662,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +708,10 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1038,8 +1073,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
-	 * returns FALSE, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries false. If it
+	 * returns false, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1733,6 +1768,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (GinGetPendingListCleanupSize(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 23f7285547..74419a3117 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = true;
 	}
 	else
 	{
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,12 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		/*
+		 * do not use GinCheckForSerializableConflictIn() here because
+		 * it will do nothing (it do actual work only with fastupdate off
+		 */
+		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
@@ -523,6 +533,14 @@ gininsert(Relation index, Datum *values, bool *isnull,
 	}
 	else
 	{
+		/*
+		 * it's possible that pending list still exists, [auto]vacuum
+		 * should cleanup it, but until it exists we need to check conflict
+		 * for index
+		 */
+		if (GinGetPendingListCleanupSize(index))
+			CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleInsert(ginstate, (OffsetNumber) (i + 1),
 							   values[i], isnull[i],
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 7bac7a1252..5632cc5a77 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -23,6 +23,7 @@
 #include "miscadmin.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 #include "utils/index_selfuncs.h"
 #include "utils/typcache.h"
@@ -49,7 +50,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
@@ -716,3 +717,10 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
+
+void
+GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
+{
+	if (!GinGetUseFastUpdate(relation))
+		CheckForSerializableConflictIn(relation, tuple, buffer);
+}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 398532d80b..6bce58942b 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index e221241f96..7ea5f0b48e 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -380,6 +380,17 @@ then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist. In case there is a page split,
 we need to copy predicate lock from an original page to all new pages.
 
+    * Gin searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. We acquire a predicate lock on entry
+tree leaf pages only when entry has a posting list. If entry tree has
+a pointer to posting tree, we skip locking entry tree leaf page and lock
+only posting tree leaf pages. If, however, fast update is enabled, a
+predicate lock on the index relation is required as fast update postpones
+the insertion of tuples into index structure by temporarily storing them
+into pending list due to which we are unable to detect all r-w conflicts.
+During a page split, a predicate lock is copied from the original page
+to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index a709596a7a..d1df3033a6 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,6 +103,8 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
+extern void GinCheckForSerializableConflictIn(Relation relation,
+				 HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -217,7 +219,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000000..4f5501f6f0
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,756 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10050          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 rxy2fu wx1 c1 wy2fu c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005];
+count          
+
+0              
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2fu: insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: fu1 rxy1 rxy2fu wx1 c1 wy2fu c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable; 
+  			  set enable_seqscan=off;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005];
+count          
+
+0              
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2fu: insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 53e1f192b0..d3965fe73f 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -67,3 +67,4 @@ test: vacuum-reltuples
 test: timeouts
 test: vacuum-concurrent-drop
 test: predicate-gist
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000000..e45c9396aa
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,134 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan  (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+  create table gin_tbl(id int4, p int4[]);
+  insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+}
+
+teardown
+{
+  drop table gin_tbl;
+}
+
+session "s1"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+# enable pending list for a small subset of tests
+step "fu1"	{ alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable; 
+  			  set enable_seqscan=off; }
+
+step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g; }
+step "rxy3"	{ select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; }
+step "wx3"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g; }
+step "c1"  { commit; }
+
+session "s2"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy2"	{ select count(*) from gin_tbl where p @> array[5,6]; }
+step "rxy2fu"	{ select count(*) from gin_tbl where p @> array[10000,10005]; }
+step "wy2"	{ insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g; }
+step "wy2fu"	{ insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g; }
+step "rxy4"	{ select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000]; }
+step "wy4"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g; }
+step "c2"	{ commit; }
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
+
+# Test fastupdate = on. First test should pass because fastupdate is off and
+# sessions touches different parts of indexe, second should fail because
+# with fastupdate on whole index should be under predicate lock.
+
+permutation       "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2"
+permutation "fu1" "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2"
+
#32Teodor Sigaev
teodor@sigaev.ru
In reply to: Teodor Sigaev (#31)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

The next question what I see: why do not we lock entry leaf pages in some cases?

I've modified patch to predicate lock each leaf (entry or posting) page. Now
patch reaches commitable state from my point view.

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

Attachments:

Predicate-Locking-in-gin-index_v10.patchtext/x-patch; name=Predicate-Locking-in-gin-index_v10.patchDownload
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 37070b3b72..095b1192cb 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index f9daaba52e..3fb4fc8264 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -19,6 +19,7 @@
 #include "access/xloginsert.h"
 #include "lib/ilist.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/rel.h"
 
 /*
@@ -1423,7 +1424,7 @@ disassembleLeaf(Page page)
  * Any segments that acquire new items are decoded, and the new items are
  * merged with the old items.
  *
- * Returns true if any new items were added. False means they were all
+ * Returns true if any new items were added. false means they were all
  * duplicates of existing items on the page.
  */
 static bool
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 * Copy a predicate lock from entry tree leaf (containing posting list)
+	 * to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 6fe67f346d..21f2cbac3b 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -17,8 +17,10 @@
 #include "access/gin_private.h"
 #include "access/relscan.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -33,11 +35,18 @@ typedef struct pendingPosition
 } pendingPosition;
 
 
+static void
+GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
+{
+	if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, blkno, snapshot);
+}
+
 /*
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -52,6 +61,7 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
 		stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE);
 		stack->blkno = BufferGetBlockNumber(stack->buffer);
 		stack->off = FirstOffsetNumber;
+		GinPredicateLockPage(btree->index, stack->blkno, snapshot);
 	}
 
 	return true;
@@ -73,6 +83,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -82,6 +93,11 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	 */
 	for (;;)
 	{
+		/*
+		 * Predicate lock each leaf page in posting tree
+		 */
+		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 		page = BufferGetPage(buffer);
 		if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0)
 		{
@@ -131,6 +147,12 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1);
 
+	/*
+	 * Predicate lock entry leaf page, following pages will be locked by
+	 * moveRightIfItNeeded()
+	 */
+	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +163,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +272,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +345,15 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = true;
 
@@ -370,6 +401,10 @@ restartScanEntry:
 	{
 		IndexTuple	itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off));
 
+		/* Predicate lock visited entry leaf page */
+		GinPredicateLockPage(ginstate->index,
+							 BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 		if (GinIsPostingTree(itup))
 		{
 			BlockNumber rootPostingTree = GinGetPostingTree(itup);
@@ -391,6 +426,12 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			/*
+			 * Predicate lock visited entry posting page, following pages
+			 * will be locked by moveRightIfItNeeded or entryLoadMoreItems
+			 */
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -493,7 +534,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
+			/* Pass all entries <= i as false, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -633,6 +674,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +720,10 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1038,8 +1085,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
-	 * returns FALSE, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries false. If it
+	 * returns false, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1733,6 +1780,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (GinGetPendingListCleanupSize(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 23f7285547..74419a3117 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = true;
 	}
 	else
 	{
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,12 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		/*
+		 * do not use GinCheckForSerializableConflictIn() here because
+		 * it will do nothing (it do actual work only with fastupdate off
+		 */
+		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
@@ -523,6 +533,14 @@ gininsert(Relation index, Datum *values, bool *isnull,
 	}
 	else
 	{
+		/*
+		 * it's possible that pending list still exists, [auto]vacuum
+		 * should cleanup it, but until it exists we need to check conflict
+		 * for index
+		 */
+		if (GinGetPendingListCleanupSize(index))
+			CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleInsert(ginstate, (OffsetNumber) (i + 1),
 							   values[i], isnull[i],
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 7bac7a1252..5632cc5a77 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -23,6 +23,7 @@
 #include "miscadmin.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 #include "utils/index_selfuncs.h"
 #include "utils/typcache.h"
@@ -49,7 +50,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
@@ -716,3 +717,10 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
+
+void
+GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
+{
+	if (!GinGetUseFastUpdate(relation))
+		CheckForSerializableConflictIn(relation, tuple, buffer);
+}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 398532d80b..6bce58942b 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index e221241f96..7ea5f0b48e 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -380,6 +380,17 @@ then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist. In case there is a page split,
 we need to copy predicate lock from an original page to all new pages.
 
+    * Gin searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. We acquire a predicate lock on entry
+tree leaf pages only when entry has a posting list. If entry tree has
+a pointer to posting tree, we skip locking entry tree leaf page and lock
+only posting tree leaf pages. If, however, fast update is enabled, a
+predicate lock on the index relation is required as fast update postpones
+the insertion of tuples into index structure by temporarily storing them
+into pending list due to which we are unable to detect all r-w conflicts.
+During a page split, a predicate lock is copied from the original page
+to the new page.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index a709596a7a..d1df3033a6 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,6 +103,8 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
+extern void GinCheckForSerializableConflictIn(Relation relation,
+				 HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -217,7 +219,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000000..4f5501f6f0
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,756 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10050          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 rxy2fu wx1 c1 wy2fu c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005];
+count          
+
+0              
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2fu: insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: fu1 rxy1 rxy2fu wx1 c1 wy2fu c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable; 
+  			  set enable_seqscan=off;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005];
+count          
+
+0              
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2fu: insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 53e1f192b0..d3965fe73f 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -67,3 +67,4 @@ test: vacuum-reltuples
 test: timeouts
 test: vacuum-concurrent-drop
 test: predicate-gist
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000000..e45c9396aa
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,134 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan  (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+  create table gin_tbl(id int4, p int4[]);
+  insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+}
+
+teardown
+{
+  drop table gin_tbl;
+}
+
+session "s1"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+# enable pending list for a small subset of tests
+step "fu1"	{ alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable; 
+  			  set enable_seqscan=off; }
+
+step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g; }
+step "rxy3"	{ select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; }
+step "wx3"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g; }
+step "c1"  { commit; }
+
+session "s2"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy2"	{ select count(*) from gin_tbl where p @> array[5,6]; }
+step "rxy2fu"	{ select count(*) from gin_tbl where p @> array[10000,10005]; }
+step "wy2"	{ insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g; }
+step "wy2fu"	{ insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g; }
+step "rxy4"	{ select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000]; }
+step "wy4"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g; }
+step "c2"	{ commit; }
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
+
+# Test fastupdate = on. First test should pass because fastupdate is off and
+# sessions touches different parts of indexe, second should fail because
+# with fastupdate on whole index should be under predicate lock.
+
+permutation       "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2"
+permutation "fu1" "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2"
+
#33Alvaro Herrera
alvherre@alvh.no-ip.org
In reply to: Teodor Sigaev (#32)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

I don't quite understand the new call in gininsert -- I mean I see that
it wants to check for conflicts even when fastupdate is set, but why?
Maybe, just maybe, it would be better to add a new flag to the
GinCheckForSerializableConflictIn function, that's passed differently
for this one callsite, and then a comment next to it that indicates why
do we test for fastupdates in one case and not the other case.
If you don't like this idea, then I think more commentary on why
fastupdate is not considered in gininsert is warranted.

In startScanEntry, if you "goto restartScanEntry" in the fastupdate
case, are you trying to acquire the same lock again? Maybe the lock
acquire should occur before the goto target? (If this doesn't matter for
some reason, maybe add a comment about it)

--
�lvaro Herrera https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services

#34Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Teodor Sigaev (#32)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On Thu, Mar 29, 2018 at 1:38 PM, Teodor Sigaev <teodor@sigaev.ru> wrote:

The next question what I see: why do not we lock entry leaf pages in some

cases?

I've modified patch to predicate lock each leaf (entry or posting) page.
Now patch reaches commitable state from my point view.

I made some enhancements in comments and readme.

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

Attachments:

Predicate-Locking-in-gin-index_v11.patchapplication/octet-stream; name=Predicate-Locking-in-gin-index_v11.patchDownload
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 37070b3b72..095b1192cb 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index f9daaba52e..642ca1a2c7 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -19,6 +19,7 @@
 #include "access/xloginsert.h"
 #include "lib/ilist.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/rel.h"
 
 /*
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 * Copy a predicate lock from entry tree leaf (containing posting list)
+	 * to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 6fe67f346d..cea963fb1b 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -17,8 +17,10 @@
 #include "access/gin_private.h"
 #include "access/relscan.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -33,11 +35,25 @@ typedef struct pendingPosition
 } pendingPosition;
 
 
+/*
+ * Place predicate lock on GIN page if needed.
+ */
+static void
+GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
+{
+	/*
+	 * When fast update is on then no need in locking pages, because we
+	 * anyway need to lock the whole index.
+	 */
+	if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, blkno, snapshot);
+}
+
 /*
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -52,6 +68,7 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
 		stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE);
 		stack->blkno = BufferGetBlockNumber(stack->buffer);
 		stack->off = FirstOffsetNumber;
+		GinPredicateLockPage(btree->index, stack->blkno, snapshot);
 	}
 
 	return true;
@@ -73,6 +90,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -82,6 +100,11 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	 */
 	for (;;)
 	{
+		/*
+		 * Predicate lock each leaf page in posting tree
+		 */
+		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 		page = BufferGetPage(buffer);
 		if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0)
 		{
@@ -131,6 +154,12 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1);
 
+	/*
+	 * Predicate lock entry leaf page, following pages will be locked by
+	 * moveRightIfItNeeded()
+	 */
+	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +170,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +279,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +352,15 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
+	/*
+	 * If fast update is enabled, we acquire a predicate lock on the entire
+	 * relation as fast update postpones the insertion of tuples into index
+	 * structure due to which we can't detect rw conflicts.
+	 */
+	if (GinGetUseFastUpdate(ginstate->index))
+		PredicateLockRelation(ginstate->index, snapshot);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = true;
 
@@ -370,6 +408,10 @@ restartScanEntry:
 	{
 		IndexTuple	itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off));
 
+		/* Predicate lock visited entry leaf page */
+		GinPredicateLockPage(ginstate->index,
+							 BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 		if (GinIsPostingTree(itup))
 		{
 			BlockNumber rootPostingTree = GinGetPostingTree(itup);
@@ -391,6 +433,12 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			/*
+			 * Predicate lock visited posting tree page, following pages
+			 * will be locked by moveRightIfItNeeded or entryLoadMoreItems
+			 */
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -493,7 +541,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
+			/* Pass all entries <= i as false, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -633,6 +681,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +727,10 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1038,8 +1092,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
-	 * returns FALSE, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries false. If it
+	 * returns false, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1733,6 +1787,13 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 		return;
 	}
 
+	/*
+	 * If fast update is disabled, but some items still exist in the pending
+	 * list, then a predicate lock on the entire relation is required.
+	 */
+	if (GinGetPendingListCleanupSize(scan->indexRelation))
+		PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 23f7285547..a670ae6c68 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = true;
 	}
 	else
 	{
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,12 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		/*
+		 * Do not use GinCheckForSerializableConflictIn() here, because
+		 * it will do nothing (it do actual work only with fastupdate off).
+		 */
+		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
@@ -523,6 +533,14 @@ gininsert(Relation index, Datum *values, bool *isnull,
 	}
 	else
 	{
+		/*
+		 * It's possible that pending list still exists, [auto]vacuum
+		 * should cleanup it, but while it exists we need to check for 
+		 * serializable conflict in the whole index.
+		 */
+		if (GinGetPendingListCleanupSize(index))
+			CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleInsert(ginstate, (OffsetNumber) (i + 1),
 							   values[i], isnull[i],
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 7bac7a1252..5632cc5a77 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -23,6 +23,7 @@
 #include "miscadmin.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 #include "utils/index_selfuncs.h"
 #include "utils/typcache.h"
@@ -49,7 +50,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
@@ -716,3 +717,10 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
+
+void
+GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
+{
+	if (!GinGetUseFastUpdate(relation))
+		CheckForSerializableConflictIn(relation, tuple, buffer);
+}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 398532d80b..6bce58942b 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index e221241f96..9e98af23c8 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -380,6 +380,15 @@ then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist. In case there is a page split,
 we need to copy predicate lock from an original page to all new pages.
 
+    * GIN searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. During a page split, a predicate locks are
+copied from the original page to the new page. In the same way predicate locks
+are copied from entry tree leaf page to freshly created posting tree root.
+However, when fast update is enabled, a predicate lock on the whole index
+relation is required. Fast update postpones the insertion of tuples into index
+structure by temporarily storing them into pending list. That makes us unable
+to detect r-w conflicts using page-level locks.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index a709596a7a..d1df3033a6 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,6 +103,8 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
+extern void GinCheckForSerializableConflictIn(Relation relation,
+				 HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -217,7 +219,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000000..4f5501f6f0
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,756 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10050          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 rxy2fu wx1 c1 wy2fu c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005];
+count          
+
+0              
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2fu: insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: fu1 rxy1 rxy2fu wx1 c1 wy2fu c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable; 
+  			  set enable_seqscan=off;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005];
+count          
+
+0              
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2fu: insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 53e1f192b0..d3965fe73f 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -67,3 +67,4 @@ test: vacuum-reltuples
 test: timeouts
 test: vacuum-concurrent-drop
 test: predicate-gist
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000000..9f0cda8057
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,134 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan  (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+  create table gin_tbl(id int4, p int4[]);
+  insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+}
+
+teardown
+{
+  drop table gin_tbl;
+}
+
+session "s1"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+# enable pending list for a small subset of tests
+step "fu1"	{ alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable; 
+  			  set enable_seqscan=off; }
+
+step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g; }
+step "rxy3"	{ select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; }
+step "wx3"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g; }
+step "c1"  { commit; }
+
+session "s2"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy2"	{ select count(*) from gin_tbl where p @> array[5,6]; }
+step "rxy2fu"	{ select count(*) from gin_tbl where p @> array[10000,10005]; }
+step "wy2"	{ insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g; }
+step "wy2fu"	{ insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g; }
+step "rxy4"	{ select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000]; }
+step "wy4"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g; }
+step "c2"	{ commit; }
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
+
+# Test fastupdate = on. First test should pass because fastupdate is off and
+# sessions touches different parts of index, second should fail because
+# with fastupdate on, then whole index should be under predicate lock.
+
+permutation       "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2"
+permutation "fu1" "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2"
+
#35Teodor Sigaev
teodor@sigaev.ru
In reply to: Alvaro Herrera (#33)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Alvaro Herrera wrote:

I don't quite understand the new call in gininsert -- I mean I see that
it wants to check for conflicts even when fastupdate is set, but why?

If fastupdate is set then we check conflict with whole index, not a particular
pages in it. Predicate lock on penging list pages will be effectively a lock
over index, because every scan will begin from pending list and each insert will
insert into it. I

Maybe, just maybe, it would be better to add a new flag to the
GinCheckForSerializableConflictIn function, that's passed differently
for this one callsite, and then a comment next to it that indicates why
do we test for fastupdates in one case and not the other case.
If you don't like this idea, then I think more commentary on why
fastupdate is not considered in gininsert is warranted.

In startScanEntry, if you "goto restartScanEntry" in the fastupdate
case, are you trying to acquire the same lock again? Maybe the lock
acquire should occur before the goto target? (If this doesn't matter for
some reason, maybe add a comment about it)

Thank you for noticing that, I've completely rework this part. Somehow I
misreaded actual work of GinGetPendingListCleanupSize() :(.

See attached patch
--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

Attachments:

Predicate-Locking-in-gin-index_v12.patchtext/x-patch; name=Predicate-Locking-in-gin-index_v12.patchDownload
commit 699e84fab488145d8109f6466f664024855aca38
Author: Teodor Sigaev <teodor@sigaev.ru>
Date:   Thu Mar 29 20:05:35 2018 +0300

    Predicate locking in GIN index
    
    Predicate locks are used on per page basis only if fastupdate = off, in
    opposite case predicate lock on pending list will effectively lock whole index,
    to reduce locking overhead, just lock a relation. Entry and posting trees are
    essentially B-tree, so locks are acquired on leaf pages only.
    
    Author: Shubham Barai with some editorization by me and Dmitry Ivanov
    Review by: Alexander Korotkov, Dmitry Ivanov, Fedor Sigaev
    Discussion: https://www.postgresql.org/message-id/flat/CALxAEPt5sWW+EwTaKUGFL5_XFcZ0MuGBcyJ70oqbWqr42YKR8Q@mail.gmail.com

diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 37070b3b72..095b1192cb 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -17,6 +17,7 @@
 #include "access/gin_private.h"
 #include "access/ginxlog.h"
 #include "access/xloginsert.h"
+#include "storage/predicate.h"
 #include "miscadmin.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
@@ -515,6 +516,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			btree->fillRoot(btree, newrootpg,
 							BufferGetBlockNumber(lbuffer), newlpage,
 							BufferGetBlockNumber(rbuffer), newrpage);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(lbuffer));
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
+
 		}
 		else
 		{
@@ -524,6 +538,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
 			GinPageGetOpaque(newrpage)->rightlink = savedRightLink;
 			GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT;
 			GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
+
+			if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
+			{
+
+				PredicateLockPageSplit(btree->index,
+						BufferGetBlockNumber(stack->buffer),
+						BufferGetBlockNumber(rbuffer));
+			}
 		}
 
 		/*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index f9daaba52e..642ca1a2c7 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -19,6 +19,7 @@
 #include "access/xloginsert.h"
 #include "lib/ilist.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/rel.h"
 
 /*
@@ -1759,7 +1760,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
  */
 BlockNumber
 createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats)
+				  GinStatsData *buildStats, Buffer entrybuffer)
 {
 	BlockNumber blkno;
 	Buffer		buffer;
@@ -1810,6 +1811,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	page = BufferGetPage(buffer);
 	blkno = BufferGetBlockNumber(buffer);
 
+	/*
+	 * Copy a predicate lock from entry tree leaf (containing posting list)
+	 * to  posting tree.
+	 */
+	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
+
 	START_CRIT_SECTION();
 
 	PageRestoreTempPage(tmppage, page);
@@ -1904,6 +1911,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 6fe67f346d..4b2ae4a54f 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -17,8 +17,10 @@
 #include "access/gin_private.h"
 #include "access/relscan.h"
 #include "miscadmin.h"
+#include "storage/predicate.h"
 #include "utils/datum.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 
 /* GUC parameter */
 int			GinFuzzySearchLimit = 0;
@@ -33,11 +35,25 @@ typedef struct pendingPosition
 } pendingPosition;
 
 
+/*
+ * Place predicate lock on GIN page if needed.
+ */
+static void
+GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
+{
+	/*
+	 * When fast update is on then no need in locking pages, because we
+	 * anyway need to lock the whole index.
+	 */
+	if (!GinGetUseFastUpdate(index))
+			PredicateLockPage(index, blkno, snapshot);
+}
+
 /*
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
-moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot)
 {
 	Page		page = BufferGetPage(stack->buffer);
 
@@ -52,6 +68,7 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
 		stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE);
 		stack->blkno = BufferGetBlockNumber(stack->buffer);
 		stack->off = FirstOffsetNumber;
+		GinPredicateLockPage(btree->index, stack->blkno, snapshot);
 	}
 
 	return true;
@@ -73,6 +90,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	/* Descend to the leftmost leaf page */
 	stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot);
 	buffer = stack->buffer;
+
 	IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */
 
 	freeGinBtreeStack(stack);
@@ -82,6 +100,11 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	 */
 	for (;;)
 	{
+		/*
+		 * Predicate lock each leaf page in posting tree
+		 */
+		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
+
 		page = BufferGetPage(buffer);
 		if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0)
 		{
@@ -131,6 +154,12 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	attnum = scanEntry->attnum;
 	attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1);
 
+	/*
+	 * Predicate lock entry leaf page, following pages will be locked by
+	 * moveRightIfItNeeded()
+	 */
+	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+
 	for (;;)
 	{
 		Page		page;
@@ -141,7 +170,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 		/*
 		 * stack->off points to the interested entry, buffer is already locked
 		 */
-		if (moveRightIfItNeeded(btree, stack) == false)
+		if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 			return true;
 
 		page = BufferGetPage(stack->buffer);
@@ -250,7 +279,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 				Datum		newDatum;
 				GinNullCategory newCategory;
 
-				if (moveRightIfItNeeded(btree, stack) == false)
+				if (moveRightIfItNeeded(btree, stack, snapshot) == false)
 					elog(ERROR, "lost saved point in index");	/* must not happen !!! */
 
 				page = BufferGetPage(stack->buffer);
@@ -323,6 +352,7 @@ restartScanEntry:
 						ginstate);
 	stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
 	page = BufferGetPage(stackEntry->buffer);
+
 	/* ginFindLeafPage() will have already checked snapshot age. */
 	needUnlock = true;
 
@@ -370,6 +400,10 @@ restartScanEntry:
 	{
 		IndexTuple	itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off));
 
+		/* Predicate lock visited entry leaf page */
+		GinPredicateLockPage(ginstate->index,
+							 BufferGetBlockNumber(stackEntry->buffer), snapshot);
+
 		if (GinIsPostingTree(itup))
 		{
 			BlockNumber rootPostingTree = GinGetPostingTree(itup);
@@ -391,6 +425,12 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
+			/*
+			 * Predicate lock visited posting tree page, following pages
+			 * will be locked by moveRightIfItNeeded or entryLoadMoreItems
+			 */
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -493,7 +533,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
+			/* Pass all entries <= i as false, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -633,6 +673,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
+		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
+
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -677,6 +719,10 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
+
+			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
+
+
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1038,8 +1084,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
-	 * returns FALSE, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries false. If it
+	 * returns false, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1730,9 +1776,24 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 	{
 		/* No pending list, so proceed with normal scan */
 		UnlockReleaseBuffer(metabuffer);
+
+		/*
+		 * If fast update is enabled, we acquire a predicate lock on the entire
+		 * relation as fast update postpones the insertion of tuples into index
+		 * structure due to which we can't detect rw conflicts.
+		 */
+		if (GinGetUseFastUpdate(scan->indexRelation))
+			PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 		return;
 	}
 
+	/*
+	 * Pending list is not empty, we need to lock the index doesn't despite on
+	 * fastupdate state
+	 */
+	PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
+
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 23f7285547..6c8009e626 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -22,6 +22,7 @@
 #include "storage/bufmgr.h"
 #include "storage/smgr.h"
 #include "storage/indexfsm.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -48,7 +49,7 @@ static IndexTuple
 addItemPointersToLeafTuple(GinState *ginstate,
 						   IndexTuple old,
 						   ItemPointerData *items, uint32 nitem,
-						   GinStatsData *buildStats)
+						   GinStatsData *buildStats, Buffer buffer)
 {
 	OffsetNumber attnum;
 	Datum		key;
@@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate,
 		postingRoot = createPostingTree(ginstate->index,
 										oldItems,
 										oldNPosting,
-										buildStats);
+										buildStats,
+										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
 		ginInsertItemPointers(ginstate->index, postingRoot,
@@ -127,7 +129,7 @@ static IndexTuple
 buildFreshLeafTuple(GinState *ginstate,
 					OffsetNumber attnum, Datum key, GinNullCategory category,
 					ItemPointerData *items, uint32 nitem,
-					GinStatsData *buildStats)
+					GinStatsData *buildStats, Buffer buffer)
 {
 	IndexTuple	res = NULL;
 	GinPostingList *compressedList;
@@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate,
 		 * Initialize a new posting tree with the TIDs.
 		 */
 		postingRoot = createPostingTree(ginstate->index, items, nitem,
-										buildStats);
+										buildStats, buffer);
 
 		/* And save the root link in the result tuple */
 		GinSetPostingTree(res, postingRoot);
@@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
-										  items, nitem, buildStats);
+										  items, nitem, buildStats, stack->buffer);
 
 		insertdata.isDelete = true;
 	}
 	else
 	{
+		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
-								   items, nitem, buildStats);
+								   items, nitem, buildStats, stack->buffer);
 	}
 
 	/* Insert the new or modified leaf tuple */
@@ -513,6 +517,13 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
+		/*
+		 * Do not use GinCheckForSerializableConflictIn() here, because
+		 * it will do nothing (it does actual work only with fastupdate off).
+		 * Check for conflicts for entire index.
+		 */
+		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
@@ -523,6 +534,16 @@ gininsert(Relation index, Datum *values, bool *isnull,
 	}
 	else
 	{
+		GinStatsData	stats;
+
+		/*
+		 * Fastupdate is off but if pending list isn't empty then we need to
+		 * check conflicts with PredicateLockRelation in scanPendingInsert().
+		 */
+		ginGetStats(index, &stats);
+		if (stats.nPendingPages > 0)
+			CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
+
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleInsert(ginstate, (OffsetNumber) (i + 1),
 							   values[i], isnull[i],
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 7bac7a1252..5632cc5a77 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -23,6 +23,7 @@
 #include "miscadmin.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 #include "utils/index_selfuncs.h"
 #include "utils/typcache.h"
@@ -49,7 +50,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->amsearchnulls = false;
 	amroutine->amstorage = true;
 	amroutine->amclusterable = false;
-	amroutine->ampredlocks = false;
+	amroutine->ampredlocks = true;
 	amroutine->amcanparallel = false;
 	amroutine->amkeytype = InvalidOid;
 
@@ -716,3 +717,10 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
+
+void
+GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
+{
+	if (!GinGetUseFastUpdate(relation))
+		CheckForSerializableConflictIn(relation, tuple, buffer);
+}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 398532d80b..6bce58942b 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -22,6 +22,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/memutils.h"
 
 struct GinVacuumState
@@ -153,11 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 
 	LockBuffer(lBuffer, GIN_EXCLUSIVE);
 
+	page = BufferGetPage(dBuffer);
+	rightlink = GinPageGetOpaque(page)->rightlink;
+
+	/*
+	 * Any insert which would have gone on the leaf block will now go to its
+	 * right sibling.
+	 */
+	PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
+
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-	page = BufferGetPage(dBuffer);
-	rightlink = GinPageGetOpaque(page)->rightlink;
 
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index e221241f96..9e98af23c8 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -380,6 +380,15 @@ then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist. In case there is a page split,
 we need to copy predicate lock from an original page to all new pages.
 
+    * GIN searches acquire predicate locks only on the leaf pages
+of entry tree and posting tree. During a page split, a predicate locks are
+copied from the original page to the new page. In the same way predicate locks
+are copied from entry tree leaf page to freshly created posting tree root.
+However, when fast update is enabled, a predicate lock on the whole index
+relation is required. Fast update postpones the insertion of tuples into index
+structure by temporarily storing them into pending list. That makes us unable
+to detect r-w conflicts using page-level locks.
+
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index a709596a7a..d1df3033a6 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,6 +103,8 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
+extern void GinCheckForSerializableConflictIn(Relation relation,
+				 HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -217,7 +219,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa
 extern int	GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
 extern BlockNumber createPostingTree(Relation index,
 				  ItemPointerData *items, uint32 nitems,
-				  GinStatsData *buildStats);
+				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
 extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
new file mode 100644
index 0000000000..4f5501f6f0
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -0,0 +1,756 @@
+Parsed test spec with 2 sessions
+
+starting permutation: rxy1 wx1 c1 rxy2 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10050          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: rxy2 wy2 c2 rxy1 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10050          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+
+starting permutation: rxy3 wx3 c1 rxy4 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 wy4 c2 rxy3 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 wx1 rxy2 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 wx1 rxy2 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 wx1 rxy2 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 c1 wy2 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy1 rxy2 wx1 wy2 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wx1 wy2 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c1 c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 wx1 c2 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy1 rxy2 wy2 c2 wx1 c1
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 rxy1 wx1 c1 wy2 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
+
+starting permutation: rxy2 rxy1 wx1 wy2 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wx1 wy2 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 rxy1 wy2 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy2 wy2 rxy1 wx1 c1 c2
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step c2: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 wx1 c2 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c2: commit;
+step c1: commit;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+
+starting permutation: rxy2 wy2 rxy1 c2 wx1 c1
+step rxy2: select count(*) from gin_tbl where p @> array[5,6];
+count          
+
+10000          
+step wy2: insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step c2: commit;
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c1: commit;
+
+starting permutation: rxy3 wx3 rxy4 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 wx3 rxy4 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wx3 c1 wy4 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wx3 wy4 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c1 c2
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy3 rxy4 wy4 wx3 c2 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy3 rxy4 wy4 c2 wx3 c1
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wx3 c1 wy4 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wx3 wy4 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 rxy3 wy4 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 rxy3 wy4 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c1 c2
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+step c2: commit;
+
+starting permutation: rxy4 wy4 rxy3 wx3 c2 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c2: commit;
+step c1: commit;
+
+starting permutation: rxy4 wy4 rxy3 c2 wx3 c1
+step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000];
+count          
+
+4              
+step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g;
+step rxy3: select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000];
+count          
+
+4              
+step c2: commit;
+step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g;
+step c1: commit;
+
+starting permutation: rxy1 rxy2fu wx1 c1 wy2fu c2
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005];
+count          
+
+0              
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2fu: insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g;
+step c2: commit;
+
+starting permutation: fu1 rxy1 rxy2fu wx1 c1 wy2fu c2
+step fu1: alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable; 
+  			  set enable_seqscan=off;
+step rxy1: select count(*) from gin_tbl where p @> array[4,5];
+count          
+
+10000          
+step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005];
+count          
+
+0              
+step wx1: insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g;
+step c1: commit;
+step wy2fu: insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g;
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: commit;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 53e1f192b0..d3965fe73f 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -67,3 +67,4 @@ test: vacuum-reltuples
 test: timeouts
 test: vacuum-concurrent-drop
 test: predicate-gist
+test: predicate-gin
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000000..9f0cda8057
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,134 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan  (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+  create table gin_tbl(id int4, p int4[]);
+  insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+}
+
+teardown
+{
+  drop table gin_tbl;
+}
+
+session "s1"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+# enable pending list for a small subset of tests
+step "fu1"	{ alter index ginidx set (fastupdate = on);
+			  commit;
+			  begin isolation level serializable; 
+  			  set enable_seqscan=off; }
+
+step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
+step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
+              (20001, 20050) g; }
+step "rxy3"	{ select count(*) from gin_tbl where p @> array[1,2] or
+              p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; }
+step "wx3"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (1, 50) g; }
+step "c1"  { commit; }
+
+session "s2"
+setup
+{
+  begin isolation level serializable;
+  set enable_seqscan=off;
+}
+
+step "rxy2"	{ select count(*) from gin_tbl where p @> array[5,6]; }
+step "rxy2fu"	{ select count(*) from gin_tbl where p @> array[10000,10005]; }
+step "wy2"	{ insert into gin_tbl select g, array[4,5] from
+              generate_series(20051, 20100) g; }
+step "wy2fu"	{ insert into gin_tbl select g, array[10000,10005] from
+              generate_series(20051, 20100) g; }
+step "rxy4"	{ select count(*) from gin_tbl where p @> array[4000,8000] or
+              p @> array[5000,10000] or p @> array[6000,12000] or
+              p @> array[8000,16000]; }
+step "wy4"	{ insert into gin_tbl select g, array[g,g*2] from generate_series
+              (10000, 10050) g; }
+step "c2"	{ commit; }
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index but one transaction commits before other
+# transaction begins so no r-w conflict.
+
+permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2"
+permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index and also one transaction commits before
+# other transaction begins, so no r-w conflict.
+
+permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2"
+permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1"
+
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access the same part of the index and one transaction begins before other
+# transaction commits so there is a r-w conflict.
+
+permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2"
+permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2"
+permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2"
+permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1"
+permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2"
+permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2"
+permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1"
+permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2"
+permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1"
+permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1"
+
+# An index scan (from one transaction) and an index insert (from another transaction)
+# try to access different parts of the index so no r-w conflict.
+
+permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2"
+permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2"
+permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2"
+permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1"
+permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2"
+permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2"
+permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1"
+permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2"
+permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1"
+permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1"
+
+# Test fastupdate = on. First test should pass because fastupdate is off and
+# sessions touches different parts of index, second should fail because
+# with fastupdate on, then whole index should be under predicate lock.
+
+permutation       "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2"
+permutation "fu1" "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2"
+
#36Alvaro Herrera
alvherre@alvh.no-ip.org
In reply to: Teodor Sigaev (#35)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Teodor Sigaev wrote:

Alvaro Herrera wrote:

I don't quite understand the new call in gininsert -- I mean I see that
it wants to check for conflicts even when fastupdate is set, but why?

If fastupdate is set then we check conflict with whole index, not a
particular pages in it. Predicate lock on penging list pages will be
effectively a lock over index, because every scan will begin from pending
list and each insert will insert into it. I

Oh, right, that makes sense. I'm not sure that the comments explain
this sufficiently -- I think it'd be good to expand that.

Given this patch, it seems clear that serializable mode is much worse
with fastupdate than without it!

--
�lvaro Herrera https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services

#37Teodor Sigaev
teodor@sigaev.ru
In reply to: Alvaro Herrera (#36)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Thanks for everyone, pushed

Oh, right, that makes sense. I'm not sure that the comments explain
this sufficiently -- I think it'd be good to expand that.

I improved comments

Given this patch, it seems clear that serializable mode is much worse
with fastupdate than without it!

That's true. Any list-based approaches like pending lis, brin or bloom indexes
could work only with locking entire relation.

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

#38Tom Lane
tgl@sss.pgh.pa.us
In reply to: Teodor Sigaev (#37)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Teodor Sigaev <teodor@sigaev.ru> writes:

Thanks for everyone, pushed

prion doesn't like this patch, which probably means that something is
trying to use the content of a relcache entry after having closed it.

regards, tom lane

#39Tom Lane
tgl@sss.pgh.pa.us
In reply to: Tom Lane (#38)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

I wrote:

prion doesn't like this patch, which probably means that something is
trying to use the content of a relcache entry after having closed it.

Oh, sorry, scratch that --- looking closer, it was failing before this.

regards, tom lane

#40Heikki Linnakangas
hlinnaka@iki.fi
In reply to: Alexander Korotkov (#25)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On 16/03/18 00:26, Alexander Korotkov wrote:

On Tue, Mar 13, 2018 at 3:26 PM, Andrey Borodin <x4mmm@yandex-team.ru> wrote:

On 13/03/18 14:02, Alexander Korotkov wrote:

And what happen if somebody concurrently set (fastupdate = on)?
Can we miss conflicts because of that?

No, AccessExclusiveLock will prevent this kind of problems with enabling
fastupdate.

True. I didn't notice that ALTER INDEX SET locks index in so high mode.
Thus, everything is fine from this perspective.

Nope, an AccessExclusiveLock is not good enough. Predicate locks stay
around after the transaction has committed and regular locks have been
released.

Attached is a test case that demonstrates a case where we miss a
serialization failure, when fastupdate is turned on concurrently. It
works on v10, but fails to throw a serialization error on v11.

- Heikki

Attachments:

predicate-gin-fastupdate-fail-demo.patchtext/x-patch; name=predicate-gin-fastupdate-fail-demo.patchDownload
diff --git a/src/test/isolation/expected/predicate-gin-fastupdate.out b/src/test/isolation/expected/predicate-gin-fastupdate.out
new file mode 100644
index 0000000000..7d4fa8e024
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-fastupdate.out
@@ -0,0 +1,30 @@
+Parsed test spec with 3 sessions
+
+starting permutation: r1 r2 w1 c1 w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000];
+count          
+
+2              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step w2: INSERT INTO gin_tbl SELECT array[1000,19001];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: r1 r2 w1 c1 fastupdate_on w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000];
+count          
+
+2              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step fastupdate_on: ALTER INDEX ginidx SET (fastupdate = on);
+step w2: INSERT INTO gin_tbl SELECT array[1000,19001];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
diff --git a/src/test/isolation/specs/predicate-gin-fastupdate.spec b/src/test/isolation/specs/predicate-gin-fastupdate.spec
new file mode 100644
index 0000000000..587158f16b
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-fastupdate.spec
@@ -0,0 +1,43 @@
+#
+#
+# 0. fastupdate is off
+# 1. Session 's1' acquires predicate lock on page X
+# 2. fastupdate is turned on
+# 3. Session 's2' inserts a new tuple to the pending list
+#
+setup
+{
+  create table gin_tbl(p int4[]);
+  insert into gin_tbl select array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+
+  create table other_tbl (id int4);
+}
+
+teardown
+{
+  drop table gin_tbl;
+  drop table other_tbl;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r1" { SELECT count(*) FROM gin_tbl WHERE p @> array[1000]; }
+step "w1" { INSERT INTO other_tbl VALUES (42); }
+step "c1" { COMMIT; }
+
+session "s2"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r2" { SELECT * FROM other_tbl; }
+step "w2" { INSERT INTO gin_tbl SELECT array[1000,19001]; }
+step "c2" { COMMIT; }
+
+session "s3"
+step "fastupdate_on" { ALTER INDEX ginidx SET (fastupdate = on); }
+
+# This correctly throws serialization failure.
+permutation "r1" "r2" "w1" "c1" "w2" "c2"
+
+# But if fastupdate is turned on in the middle, we miss it.
+permutation "r1" "r2" "w1" "c1" "fastupdate_on" "w2" "c2"
#41Heikki Linnakangas
hlinnaka@iki.fi
In reply to: Teodor Sigaev (#31)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On 28/03/18 19:53, Teodor Sigaev wrote:

Hi!

I slightly modified test for clean demonstration of difference between
fastupdate on and off. Also I added CheckForSerializableConflictIn() to
fastupdate off codepath, but only in case of non-empty pending list.

The next question what I see: why do not we lock entry leaf pages in some cases?

Why should we?

As I understand, scan should lock any visited page, but now it's true only for
posting tree. Seems, it also should lock pages in entry tree because concurrent
procesess could add new entries which could be matched by partial search, for
example. BTW, partial search (see collectMatchBitmap()) locks correctly entry
tree, but regular startScanEntry() doesn't lock entry page in case of posting
tree, only in case of posting list.

I think this needs some high-level comments or README to explain how the
locking works. It seems pretty ad hoc at the moment. And incorrect.

1. Why do we lock all posting tree pages, even though they all represent
the same value? Isn't it enough to lock the root of the posting tree?

2. Why do we lock any posting tree pages at all, if we lock the entry
tree page anyway? Isn't the lock on the entry tree page sufficient to
cover the key value?

3. Why do we *not* lock the entry leaf page, if there is no match? We
still need a lock to remember that we probed for that value and there
was no match, so that we conflict with a tuple that might be inserted later.

At least #3 is a bug. The attached patch adds an isolation test that
demonstrates it. #1 and #2 are weird, and cause unnecessary locking, so
I think we should fix those too, even if they don't lead to incorrect
results.

Remember, the purpose of predicate locks is to lock key ranges, not
physical pages or tuples in the index. We use leaf pages as handy
shortcut for "any key value that would belong on this page", but it is
just an implementation detail.

I took a stab at fixing those issues, as well as the bug when fastupdate
is turned on concurrently. Does the attached patch look sane to you?

- Heikki

Attachments:

0001-Re-think-predicate-locking-on-GIN-indexes.patchtext/x-patch; name=0001-Re-think-predicate-locking-on-GIN-indexes.patchDownload
From b1ccb28fa8249d644382fd0b9c2a6ab94f6395e7 Mon Sep 17 00:00:00 2001
From: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date: Mon, 9 Apr 2018 13:31:42 +0300
Subject: [PATCH 1/1] Re-think predicate locking on GIN indexes.

The principle behind the locking was not very well thought-out, and not
documented. Add a section in the README to explain how it's supposed to
work, and change the code so that it actually works that way.

This fixes two bugs:

1. If fast update was turned on concurrently, subsequent inserts to the
   pending list would not conflict with predicate locks that were acquired
   earlier, on entry pages. The included 'predicate-gin-fastupdate' test
   demonstrates that. To fix, make all scans acquire a predicate lock on
   the metapage. That lock represents a scan of the pending list, whether
   or not there is a pending list at the moment. Forget about the
   optimization to skip locking/checking for locks, when fastupdate=off.
   Maybe some of that was safe, but I couldn't convince myself of it, so
   better to rip it out and keep things simple.

2. If a scan finds no match, it still needs to lock the entry page. The
   point of predicate locks is to lock the gabs between values, whether
   or not there is a match. The included 'predicate-gin-nomatch' test
   tests that case.

In addition to those two bug fixes, this removes some unnecessary locking,
following the principle laid out in the README. Because all items in
a posting tree have the same key value, a lock on the posting tree root is
enough to cover all the items. (With a very large posting tree, it would
possibly be better to lock the posting tree leaf pages instead, so that a
"skip scan" with a query like "A & B", you could avoid unnecessary conflict
if a new tuple is inserted with A but !B. But let's keep this simple.)

Also, some spelling and whitespace fixes.
---
 src/backend/access/gin/README                      |  34 ++++++
 src/backend/access/gin/ginbtree.c                  |  13 ++-
 src/backend/access/gin/gindatapage.c               |  25 +++--
 src/backend/access/gin/ginfast.c                   |   8 ++
 src/backend/access/gin/ginget.c                    | 116 ++++++++++-----------
 src/backend/access/gin/gininsert.c                 |  34 ++----
 src/backend/access/gin/ginutil.c                   |   7 --
 src/backend/access/gin/ginvacuum.c                 |   1 -
 src/backend/access/gist/gist.c                     |   2 +-
 src/backend/storage/lmgr/README-SSI                |  22 ++--
 src/include/access/gin_private.h                   |   7 +-
 .../expected/predicate-gin-fastupdate.out          |  30 ++++++
 .../isolation/expected/predicate-gin-nomatch.out   |  15 +++
 src/test/isolation/expected/predicate-gin.out      |   4 +-
 src/test/isolation/isolation_schedule              |   2 +
 .../isolation/specs/predicate-gin-fastupdate.spec  |  49 +++++++++
 .../isolation/specs/predicate-gin-nomatch.spec     |  35 +++++++
 src/test/isolation/specs/predicate-gin.spec        |   4 +-
 18 files changed, 281 insertions(+), 127 deletions(-)
 create mode 100644 src/test/isolation/expected/predicate-gin-fastupdate.out
 create mode 100644 src/test/isolation/expected/predicate-gin-nomatch.out
 create mode 100644 src/test/isolation/specs/predicate-gin-fastupdate.spec
 create mode 100644 src/test/isolation/specs/predicate-gin-nomatch.spec

diff --git a/src/backend/access/gin/README b/src/backend/access/gin/README
index 990b5ffa58..cc434b1feb 100644
--- a/src/backend/access/gin/README
+++ b/src/backend/access/gin/README
@@ -331,6 +331,40 @@ page-deletions safe; it stamps the deleted pages with an XID and keeps the
 deleted pages around with the right-link intact until all concurrent scans
 have finished.)
 
+Predicate Locking
+-----------------
+
+GIN supports predicate locking, for serializable snapshot isolation.
+A predicate locks represent that a scan has scanned a range of values.  They
+are not concerned with physical pages as such, but the logical key values.
+A predicate lock on a page covers the key range that would belong on that
+page, whether or not there are any matching tuples there currently.  In other
+words, a predicate lock on an index page covers the "gaps" between the index
+tuples.  To minimize false positives, predicate locks are acquired at the
+finest level possible.
+
+* Like in the B-tree index, it is enough to lock only leaf pages, because all
+  insertions happen at the leaf level.
+
+* In an equality search (i.e. not a partial match search), if a key entry has
+  a posting tree, we lock the posting tree root page, to represent a lock on
+  just that key entry.  Otherwise, we lock the entry tree page.  We also lock
+  the entry tree page if no match is found, to lock the "gap" where the entry
+  would've been, had there been one.
+
+* In a partial match search, we lock all the entry leaf pages that we scan,
+  in addition to locks on posting tree roots, to represent the "gaps" between
+  values.
+
+* In addition to the locks on entry leaf pages and posting tree roots, all
+  scans grab a lock the metapage.  This is to interlock with insertions to
+  the fast update pending list.  An insertion to the pending list can really
+  belong anywhere in the tree, and the lock on the metapage represents that.
+
+The interlock for fastupdate pending lists means that with fastupdate=on,
+we effectively always grab a full-index lock, so you could get a lot of false
+positives.
+
 Compatibility
 -------------
 
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 095b1192cb..5bd0c7a560 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -80,10 +80,21 @@ ginFindLeafPage(GinBtree btree, bool searchMode, Snapshot snapshot)
 
 	stack = (GinBtreeStack *) palloc(sizeof(GinBtreeStack));
 	stack->blkno = btree->rootBlkno;
-	stack->buffer = ReadBuffer(btree->index, btree->rootBlkno);
 	stack->parent = NULL;
 	stack->predictNumber = 1;
 
+	/*
+	 * Start from the root page. If the caller had already pinned it, take
+	 * advantage of that.
+	 */
+	if (BufferIsValid(btree->rootBuffer))
+	{
+		IncrBufferRefCount(btree->rootBuffer);
+		stack->buffer = btree->rootBuffer;
+	}
+	else
+		stack->buffer = ReadBuffer(btree->index, btree->rootBlkno);
+
 	for (;;)
 	{
 		Page		page;
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 642ca1a2c7..837da0720f 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -102,6 +102,8 @@ typedef struct
 	int			nitems;			/* # of items in 'items', if items != NULL */
 } leafSegmentInfo;
 
+static void ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno,
+				   Buffer rootBuffer);
 static ItemPointer dataLeafPageGetUncompressed(Page page, int *nitems);
 static void dataSplitPageInternal(GinBtree btree, Buffer origbuf,
 					  GinBtreeStack *stack,
@@ -1812,8 +1814,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	blkno = BufferGetBlockNumber(buffer);
 
 	/*
-	 * Copy a predicate lock from entry tree leaf (containing posting list)
-	 * to  posting tree.
+	 * Copy any predicate locks from the entry tree leaf (containing posting
+	 * list) to the posting tree.
 	 */
 	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
 
@@ -1840,7 +1842,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 		PageSetLSN(page, recptr);
 	}
 
-	UnlockReleaseBuffer(buffer);
+	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 
 	END_CRIT_SECTION();
 
@@ -1855,22 +1857,26 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	 */
 	if (nitems > nrootitems)
 	{
-		ginInsertItemPointers(index, blkno,
+		ginInsertItemPointers(index, blkno, buffer,
 							  items + nrootitems,
 							  nitems - nrootitems,
 							  buildStats);
 	}
 
+	ReleaseBuffer(buffer);
+
 	return blkno;
 }
 
-void
-ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno)
+static void
+ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno,
+				   Buffer rootBuffer)
 {
 	memset(btree, 0, sizeof(GinBtreeData));
 
 	btree->index = index;
 	btree->rootBlkno = rootBlkno;
+	btree->rootBuffer = rootBuffer;
 
 	btree->findChildPage = dataLocateItem;
 	btree->getLeftMostChild = dataGetLeftMostPage;
@@ -1891,7 +1897,7 @@ ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno)
  * Inserts array of item pointers, may execute several tree scan (very rare)
  */
 void
-ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
+ginInsertItemPointers(Relation index, BlockNumber rootBlkno, Buffer rootBuffer,
 					  ItemPointerData *items, uint32 nitem,
 					  GinStatsData *buildStats)
 {
@@ -1899,7 +1905,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 	GinBtreeDataLeafInsertData insertdata;
 	GinBtreeStack *stack;
 
-	ginPrepareDataScan(&btree, index, rootBlkno);
+	ginPrepareDataScan(&btree, index, rootBlkno, rootBuffer);
 	btree.isBuild = (buildStats != NULL);
 	insertdata.items = items;
 	insertdata.nitem = nitem;
@@ -1911,7 +1917,6 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
-		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
@@ -1925,7 +1930,7 @@ ginScanBeginPostingTree(GinBtree btree, Relation index, BlockNumber rootBlkno,
 {
 	GinBtreeStack *stack;
 
-	ginPrepareDataScan(btree, index, rootBlkno);
+	ginPrepareDataScan(btree, index, rootBlkno, InvalidBuffer);
 
 	btree->fullScan = true;
 
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 615730b8e5..5f624cf6fa 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -31,6 +31,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 
 /* GUC parameter */
@@ -245,6 +246,13 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
 	metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
 	metapage = BufferGetPage(metabuffer);
 
+	/*
+	 * An insertion to the pending list could logically belong anywhere in
+	 * the tree, so it conflicts with all serializable scans.  All scans
+	 * acquire a predicate lock on the metabuffer to represent that.
+	 */
+	CheckForSerializableConflictIn(index, NULL, metabuffer);
+
 	if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GinListPageSize)
 	{
 		/*
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 0e984166fa..ef3cd7dbe2 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -36,20 +36,6 @@ typedef struct pendingPosition
 
 
 /*
- * Place predicate lock on GIN page if needed.
- */
-static void
-GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
-{
-	/*
-	 * When fast update is on then no need in locking pages, because we
-	 * anyway need to lock the whole index.
-	 */
-	if (!GinGetUseFastUpdate(index))
-			PredicateLockPage(index, blkno, snapshot);
-}
-
-/*
  * Goes to the next page if current offset is outside of bounds
  */
 static bool
@@ -68,7 +54,7 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot
 		stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE);
 		stack->blkno = BufferGetBlockNumber(stack->buffer);
 		stack->off = FirstOffsetNumber;
-		GinPredicateLockPage(btree->index, stack->blkno, snapshot);
+		PredicateLockPage(btree->index, stack->blkno, snapshot);
 	}
 
 	return true;
@@ -100,11 +86,6 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	 */
 	for (;;)
 	{
-		/*
-		 * Predicate lock each leaf page in posting tree
-		 */
-		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
-
 		page = BufferGetPage(buffer);
 		if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0)
 		{
@@ -158,7 +139,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	 * Predicate lock entry leaf page, following pages will be locked by
 	 * moveRightIfItNeeded()
 	 */
-	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+	PredicateLockPage(btree->index, stack->buffer, snapshot);
 
 	for (;;)
 	{
@@ -253,6 +234,13 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 
 			LockBuffer(stack->buffer, GIN_UNLOCK);
 
+			/*
+			 * Acquire predicate lock on the posting tree.  We already hold
+			 * a lock on the entry page, but insertions to the posting tree
+			 * don't check for conflicts on that level.
+			 */
+			PredicateLockPage(btree->index, rootPostingTree, snapshot);
+
 			/* Collect all the TIDs in this entry's posting tree */
 			scanPostingTree(btree->index, scanEntry, rootPostingTree,
 							snapshot);
@@ -400,10 +388,6 @@ restartScanEntry:
 	{
 		IndexTuple	itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off));
 
-		/* Predicate lock visited entry leaf page */
-		GinPredicateLockPage(ginstate->index,
-							 BufferGetBlockNumber(stackEntry->buffer), snapshot);
-
 		if (GinIsPostingTree(itup))
 		{
 			BlockNumber rootPostingTree = GinGetPostingTree(itup);
@@ -412,6 +396,13 @@ restartScanEntry:
 			ItemPointerData minItem;
 
 			/*
+			 * This is an equality scan, so lock the root of the posting tree.
+			 * It represents a lock on the exact key value, and covers all the
+			 * items in the posting tree.
+			 */
+			PredicateLockPage(ginstate->index, rootPostingTree, snapshot);
+
+			/*
 			 * We should unlock entry page before touching posting tree to
 			 * prevent deadlocks with vacuum processes. Because entry is never
 			 * deleted from page and posting tree is never reduced to the
@@ -426,12 +417,6 @@ restartScanEntry:
 			entry->buffer = stack->buffer;
 
 			/*
-			 * Predicate lock visited posting tree page, following pages
-			 * will be locked by moveRightIfItNeeded or entryLoadMoreItems
-			 */
-			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
-
-			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
 			 * increased to keep buffer pinned after freeGinBtreeStack() call.
@@ -452,15 +437,38 @@ restartScanEntry:
 			freeGinBtreeStack(stack);
 			entry->isFinished = false;
 		}
-		else if (GinGetNPosting(itup) > 0)
+		else
 		{
-			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
-									   &entry->nlist);
-			entry->predictNumberResult = entry->nlist;
+			/*
+			 * Lock the entry leaf page.  This is more coarse-grained than
+			 * necessary, because it will conflict with any insertions that
+			 * land on the same leaf page, not only the exacty key we searched
+			 * for.  But locking an individual tuple would require updating
+			 * that lock whenever it moves because of insertions or vacuums,
+			 * which seems too complicated.
+			 */
+			PredicateLockPage(ginstate->index,
+							  BufferGetBlockNumber(stackEntry->buffer),
+							  snapshot);
+			if (GinGetNPosting(itup) > 0)
+			{
+				entry->list = ginReadTuple(ginstate, entry->attnum, itup,
+										   &entry->nlist);
+				entry->predictNumberResult = entry->nlist;
 
-			entry->isFinished = false;
+				entry->isFinished = false;
+			}
 		}
 	}
+	else
+	{
+		/*
+		 * No entry found.  Predicate lock the leaf page, to lock the place
+		 * where the entry would've been, had there been one.
+		 */
+		PredicateLockPage(ginstate->index,
+						  BufferGetBlockNumber(stackEntry->buffer), snapshot);
+	}
 
 	if (needUnlock)
 		LockBuffer(stackEntry->buffer, GIN_UNLOCK);
@@ -533,7 +541,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as false, and the rest as MAYBE */
+			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -673,8 +681,6 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
-		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
-
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -719,10 +725,6 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
-
-			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
-
-
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1084,8 +1086,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries false. If it
-	 * returns false, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
+	 * returns FALSE, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1746,8 +1748,7 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
 }
 
 /*
- * Collect all matched rows from pending list into bitmap. Also function
- * takes PendingLockRelation if it's needed.
+ * Collect all matched rows from pending list into bitmap.
  */
 static void
 scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
@@ -1764,6 +1765,12 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 
 	*ntids = 0;
 
+	/*
+	 * Acquire predicate lock on the metapage, to conflict with any
+	 * fastupdate insertions.
+	 */
+	PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot);
+
 	LockBuffer(metabuffer, GIN_SHARE);
 	page = BufferGetPage(metabuffer);
 	TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
@@ -1777,24 +1784,9 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 	{
 		/* No pending list, so proceed with normal scan */
 		UnlockReleaseBuffer(metabuffer);
-
-		/*
-		 * If fast update is enabled, we acquire a predicate lock on the entire
-		 * relation as fast update postpones the insertion of tuples into index
-		 * structure due to which we can't detect rw conflicts.
-		 */
-		if (GinGetUseFastUpdate(scan->indexRelation))
-			PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
-
 		return;
 	}
 
-	/*
-	 * Pending list is not empty, we need to lock the index doesn't despite on
-	 * fastupdate state
-	 */
-	PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
-
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index ec5eebb848..92c77015a1 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -104,7 +104,7 @@ addItemPointersToLeafTuple(GinState *ginstate,
 										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
-		ginInsertItemPointers(ginstate->index, postingRoot,
+		ginInsertItemPointers(ginstate->index, postingRoot, InvalidBuffer,
 							  items, nitem,
 							  buildStats);
 
@@ -207,19 +207,23 @@ ginEntryInsert(GinState *ginstate,
 		{
 			/* add entries to existing posting tree */
 			BlockNumber rootPostingTree = GinGetPostingTree(itup);
+			Buffer		rootBuffer;
 
 			/* release all stack */
 			LockBuffer(stack->buffer, GIN_UNLOCK);
 			freeGinBtreeStack(stack);
 
 			/* insert into posting tree */
-			ginInsertItemPointers(ginstate->index, rootPostingTree,
+			rootBuffer = ReadBuffer(ginstate->index, rootPostingTree);
+			CheckForSerializableConflictIn(ginstate->index, NULL, rootBuffer);
+			ginInsertItemPointers(ginstate->index, rootPostingTree, rootBuffer,
 								  items, nitem,
 								  buildStats);
+			ReleaseBuffer(rootBuffer);
 			return;
 		}
 
-		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
+		CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
 										  items, nitem, buildStats, stack->buffer);
@@ -228,7 +232,7 @@ ginEntryInsert(GinState *ginstate,
 	}
 	else
 	{
-		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
+		CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
 								   items, nitem, buildStats, stack->buffer);
@@ -517,18 +521,6 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
-		/*
-		 * With fastupdate on each scan and each insert begin with access to
-		 * pending list, so it effectively lock entire index. In this case
-		 * we aquire predicate lock and check for conflicts over index relation,
-		 * and hope that it will reduce locking overhead.
-		 *
-		 * Do not use GinCheckForSerializableConflictIn() here, because
-		 * it will do nothing (it does actual work only with fastupdate off).
-		 * Check for conflicts for entire index.
-		 */
-		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
-
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
@@ -539,16 +531,6 @@ gininsert(Relation index, Datum *values, bool *isnull,
 	}
 	else
 	{
-		GinStatsData	stats;
-
-		/*
-		 * Fastupdate is off but if pending list isn't empty then we need to
-		 * check conflicts with PredicateLockRelation in scanPendingInsert().
-		 */
-		ginGetStats(index, &stats);
-		if (stats.nPendingPages > 0)
-			CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
-
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleInsert(ginstate, (OffsetNumber) (i + 1),
 							   values[i], isnull[i],
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 4367523dd9..0a32182dd7 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -718,10 +718,3 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
-
-void
-GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
-{
-	if (!GinGetUseFastUpdate(relation))
-		CheckForSerializableConflictIn(relation, tuple, buffer);
-}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index dd8e31b872..3104bc12b6 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -166,7 +166,6 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
 
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 9007d65ad2..048966924d 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1220,7 +1220,7 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
 	bool		is_split;
 
 	/*
-	 * Check for any rw conflicts (in serialisation isolation level)
+	 * Check for any rw conflicts (in serializable isolation level)
 	 * just before we intend to modify the page
 	 */
 	CheckForSerializableConflictIn(state->r, NULL, stack->buffer);
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index f2b099d1c9..50d2ecca9d 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -373,21 +373,22 @@ index *leaf* pages needed to lock the appropriate index range. If,
 however, a search discovers that no root page has yet been created, a
 predicate lock on the index relation is required.
 
+    * Like a B-tree, GIN searches acquire predicate locks only on the
+leaf pages of entry tree. When performing an equality scan, and an
+entry has a posting tree, the posting tree root is locked instead, to
+lock only that key value. However, fastupdate=on postpones the
+insertion of tuples into index structure by temporarily storing them
+into pending list. That makes us unable to detect r-w conflicts using
+page-level locks. To cope with that, insertions to the pending list
+conflict with all scans.
+
     * GiST searches can determine that there are no matches at any
 level of the index, so we acquire predicate lock at each index
 level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist. In case there is a page split,
-we need to copy predicate lock from an original page to all new pages.
-
-    * GIN searches acquire predicate locks only on the leaf pages
-of entry tree and posting tree. During a page split, a predicate locks are
-copied from the original page to the new page. In the same way predicate locks
-are copied from entry tree leaf page to freshly created posting tree root.
-However, when fast update is enabled, a predicate lock on the whole index
-relation is required. Fast update postpones the insertion of tuples into index
-structure by temporarily storing them into pending list. That makes us unable
-to detect r-w conflicts using page-level locks.
+we need to copy predicate lock from the original page to all the new
+pages.
 
     * Hash index searches acquire predicate locks on the primary
 page of a bucket. It acquires a lock on both the old and new buckets
@@ -395,7 +396,6 @@ for scans that happen concurrently with page splits. During a bucket
 split, a predicate lock is copied from the primary page of an old
 bucket to the primary page of a new bucket.
 
-
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index d1df3033a6..1e2c9dde8b 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,8 +103,6 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
-extern void GinCheckForSerializableConflictIn(Relation relation,
-				 HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -161,6 +159,7 @@ typedef struct GinBtreeData
 
 	Relation	index;
 	BlockNumber rootBlkno;
+	Buffer		rootBuffer;
 	GinState   *ginstate;		/* not valid in a data scan */
 	bool		fullScan;
 	bool		isBuild;
@@ -222,12 +221,12 @@ extern BlockNumber createPostingTree(Relation index,
 				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
-extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
+extern void ginInsertItemPointers(Relation index,
+					  BlockNumber rootBlkno, Buffer rootBuffer,
 					  ItemPointerData *items, uint32 nitem,
 					  GinStatsData *buildStats);
 extern GinBtreeStack *ginScanBeginPostingTree(GinBtree btree, Relation index, BlockNumber rootBlkno, Snapshot snapshot);
 extern void ginDataFillRoot(GinBtree btree, Page root, BlockNumber lblkno, Page lpage, BlockNumber rblkno, Page rpage);
-extern void ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno);
 
 /*
  * This is declared in ginvacuum.c, but is passed between ginVacuumItemPointers
diff --git a/src/test/isolation/expected/predicate-gin-fastupdate.out b/src/test/isolation/expected/predicate-gin-fastupdate.out
new file mode 100644
index 0000000000..7d4fa8e024
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-fastupdate.out
@@ -0,0 +1,30 @@
+Parsed test spec with 3 sessions
+
+starting permutation: r1 r2 w1 c1 w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000];
+count          
+
+2              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step w2: INSERT INTO gin_tbl SELECT array[1000,19001];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: r1 r2 w1 c1 fastupdate_on w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000];
+count          
+
+2              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step fastupdate_on: ALTER INDEX ginidx SET (fastupdate = on);
+step w2: INSERT INTO gin_tbl SELECT array[1000,19001];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
diff --git a/src/test/isolation/expected/predicate-gin-nomatch.out b/src/test/isolation/expected/predicate-gin-nomatch.out
new file mode 100644
index 0000000000..5e733262a4
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-nomatch.out
@@ -0,0 +1,15 @@
+Parsed test spec with 2 sessions
+
+starting permutation: r1 r2 w1 c1 w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[-1];
+count          
+
+0              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step w2: INSERT INTO gin_tbl SELECT array[-1];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
index 4f5501f6f0..bdf8911923 100644
--- a/src/test/isolation/expected/predicate-gin.out
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -737,8 +737,8 @@ step c2: commit;
 starting permutation: fu1 rxy1 rxy2fu wx1 c1 wy2fu c2
 step fu1: alter index ginidx set (fastupdate = on);
 			  commit;
-			  begin isolation level serializable; 
-  			  set enable_seqscan=off;
+			  begin isolation level serializable;
+			  set enable_seqscan=off;
 step rxy1: select count(*) from gin_tbl where p @> array[4,5];
 count          
 
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 6cb3d07240..5203ad582b 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -73,6 +73,8 @@ test: vacuum-concurrent-drop
 test: predicate-hash
 test: predicate-gist
 test: predicate-gin
+test: predicate-gin-fastupdate
+test: predicate-gin-nomatch
 test: partition-key-update-1
 test: partition-key-update-2
 test: partition-key-update-3
diff --git a/src/test/isolation/specs/predicate-gin-fastupdate.spec b/src/test/isolation/specs/predicate-gin-fastupdate.spec
new file mode 100644
index 0000000000..04b8036fc5
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-fastupdate.spec
@@ -0,0 +1,49 @@
+#
+# Test that predicate locking on a GIN index works correctly, even if
+# fastupdate is turned on concurrently.
+#
+# 0. fastupdate is off
+# 1. Session 's1' acquires predicate lock on page X
+# 2. fastupdate is turned on
+# 3. Session 's2' inserts a new tuple to the pending list
+#
+# This test tests that if the lock acquired in step 1 would conflict with
+# the scan in step 1, we detect that conflict correctly, even if fastupdate
+# was turned on in-between.
+#
+setup
+{
+  create table gin_tbl(p int4[]);
+  insert into gin_tbl select array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+
+  create table other_tbl (id int4);
+}
+
+teardown
+{
+  drop table gin_tbl;
+  drop table other_tbl;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r1" { SELECT count(*) FROM gin_tbl WHERE p @> array[1000]; }
+step "w1" { INSERT INTO other_tbl VALUES (42); }
+step "c1" { COMMIT; }
+
+session "s2"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r2" { SELECT * FROM other_tbl; }
+step "w2" { INSERT INTO gin_tbl SELECT array[1000,19001]; }
+step "c2" { COMMIT; }
+
+session "s3"
+step "fastupdate_on" { ALTER INDEX ginidx SET (fastupdate = on); }
+
+# This correctly throws serialization failure.
+permutation "r1" "r2" "w1" "c1" "w2" "c2"
+
+# But if fastupdate is turned on in the middle, we miss it.
+permutation "r1" "r2" "w1" "c1" "fastupdate_on" "w2" "c2"
diff --git a/src/test/isolation/specs/predicate-gin-nomatch.spec b/src/test/isolation/specs/predicate-gin-nomatch.spec
new file mode 100644
index 0000000000..0ad456cb14
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-nomatch.spec
@@ -0,0 +1,35 @@
+#
+# Check that GIN index grabs an appropriate lock, even if there is no match.
+#
+setup
+{
+  create table gin_tbl(p int4[]);
+  insert into gin_tbl select array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+
+  create table other_tbl (id int4);
+}
+
+teardown
+{
+  drop table gin_tbl;
+  drop table other_tbl;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+# Scan with no match.
+step "r1" { SELECT count(*) FROM gin_tbl WHERE p @> array[-1]; }
+step "w1" { INSERT INTO other_tbl VALUES (42); }
+step "c1" { COMMIT; }
+
+session "s2"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r2" { SELECT * FROM other_tbl; }
+# Insert row that would've matched in step "r1"
+step "w2" { INSERT INTO gin_tbl SELECT array[-1]; }
+step "c2" { COMMIT; }
+
+# This should throw serialization failure.
+permutation "r1" "r2" "w1" "c1" "w2" "c2"
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
index 9f0cda8057..a967695867 100644
--- a/src/test/isolation/specs/predicate-gin.spec
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -32,8 +32,8 @@ setup
 # enable pending list for a small subset of tests
 step "fu1"	{ alter index ginidx set (fastupdate = on);
 			  commit;
-			  begin isolation level serializable; 
-  			  set enable_seqscan=off; }
+			  begin isolation level serializable;
+			  set enable_seqscan=off; }
 
 step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
 step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
-- 
2.11.0

#42Teodor Sigaev
teodor@sigaev.ru
In reply to: Heikki Linnakangas (#40)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Attached is a test case that demonstrates a case where we miss a serialization
failure, when fastupdate is turned on concurrently. It works on v10, but fails
to throw a serialization error on v11.

Thank you for reserching!

Proof of concept:
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 43b2fce2c5..b8291f96e2 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -10745,6 +10745,7 @@ ATExecSetRelOptions(Relation rel, List *defList, 
AlterTableType operation,
         case RELKIND_INDEX:
         case RELKIND_PARTITIONED_INDEX:
             (void) index_reloptions(rel->rd_amroutine->amoptions, newOptions, 
true);
+           TransferPredicateLocksToHeapRelation(rel);
             break;
         default:
             ereport(ERROR,

it fixes pointed bug, but will gives false positives. Right place for that in
ginoptions function, but ginoptions doesn't has an access to relation structure
and I don't see a reason why it should.

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

#43Teodor Sigaev
teodor@sigaev.ru
In reply to: Teodor Sigaev (#42)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Ugh, I miss your last email where you another locking protocol. Reading.

Teodor Sigaev wrote:

Attached is a test case that demonstrates a case where we miss a serialization
failure, when fastupdate is turned on concurrently. It works on v10, but fails
to throw a serialization error on v11.

Thank you for reserching!

Proof of concept:
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 43b2fce2c5..b8291f96e2 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -10745,6 +10745,7 @@ ATExecSetRelOptions(Relation rel, List *defList, 
AlterTableType operation,
О©╫О©╫О©╫О©╫О©╫О©╫О©╫ case RELKIND_INDEX:
О©╫О©╫О©╫О©╫О©╫О©╫О©╫ case RELKIND_PARTITIONED_INDEX:
О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫ (void) index_reloptions(rel->rd_amroutine->amoptions, newOptions, 
true);
+О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫ TransferPredicateLocksToHeapRelation(rel);
О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫ break;
О©╫О©╫О©╫О©╫О©╫О©╫О©╫ default:
О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫О©╫ ereport(ERROR,

it fixes pointed bug, but will gives false positives. Right place for that in
ginoptions function, but ginoptions doesn't has an access to relation structure
and I don't see a reason why it should.

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

#44Teodor Sigaev
teodor@sigaev.ru
In reply to: Heikki Linnakangas (#41)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Hi!

1. Why do we lock all posting tree pages, even though they all represent the
same value? Isn't it enough to lock the root of the posting tree?
2. Why do we lock any posting tree pages at all, if we lock the entry tree page
anyway? Isn't the lock on the entry tree page sufficient to cover the key value?
3. Why do we *not* lock the entry leaf page, if there is no match? We still need
a lock to remember that we probed for that value and there was no match, so that
we conflict with a tuple that might be inserted later.

At least #3 is a bug. The attached patch adds an isolation test that
demonstrates it. #1 and #2 are weird, and cause unnecessary locking, so I think
we should fix those too, even if they don't lead to incorrect results.

I can't find a hole here. Agree.

I took a stab at fixing those issues, as well as the bug when fastupdate is
turned on concurrently. Does the attached patch look sane to you?

I like an idea use metapage locking, thank you. Patch seems good, will you push it?

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

#45Alexander Korotkov
a.korotkov@postgrespro.ru
In reply to: Heikki Linnakangas (#41)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Hi!

Thank you for taking a look at this patch. I really appreciate your
attention over complex subjects like this.

On Mon, Apr 9, 2018 at 1:33 PM, Heikki Linnakangas <hlinnaka@iki.fi> wrote:

On 28/03/18 19:53, Teodor Sigaev wrote:

As I understand, scan should lock any visited page, but now it's true
only for

posting tree. Seems, it also should lock pages in entry tree because

concurrent
procesess could add new entries which could be matched by partial search,
for
example. BTW, partial search (see collectMatchBitmap()) locks correctly
entry
tree, but regular startScanEntry() doesn't lock entry page in case of
posting
tree, only in case of posting list.

I think this needs some high-level comments or README to explain how the
locking works. It seems pretty ad hoc at the moment. And incorrect.

I agree that explanation in README in insufficient.

1. Why do we lock all posting tree pages, even though they all represent

the same value? Isn't it enough to lock the root of the posting tree?

2. Why do we lock any posting tree pages at all, if we lock the entry tree
page anyway? Isn't the lock on the entry tree page sufficient to cover the
key value?

I already have similar concerns in [1]. The idea of locking posting tree
leafs was to
get more granular locks. I think you've correctly describe it in the
commit message
here:

With a very large posting tree, it would
possibly be better to lock the posting tree leaf pages instead, so that a
"skip scan" with a query like "A & B", you could avoid unnecessary conflict
if a new tuple is inserted with A but !B. But let's keep this simple.

However, it's very complex and error prone. So, +1 for simplify it for v11.

3. Why do we *not* lock the entry leaf page, if there is no match? We
still need a lock to remember that we probed for that value and there was
no match, so that we conflict with a tuple that might be inserted later.

+1

At least #3 is a bug. The attached patch adds an isolation test that

demonstrates it. #1 and #2 are weird, and cause unnecessary locking, so I
think we should fix those too, even if they don't lead to incorrect results.

Remember, the purpose of predicate locks is to lock key ranges, not
physical pages or tuples in the index. We use leaf pages as handy shortcut
for "any key value that would belong on this page", but it is just an
implementation detail.

I took a stab at fixing those issues, as well as the bug when fastupdate
is turned on concurrently. Does the attached patch look sane to you?

Teodor has already answered you, and I'd like to mention that your patch
looks good for me too.

1. /messages/by-id/CAPpHfdvED_-7KbJp-e
i4zRZu1brLgkJt4CA-uxF0iRO9WX2Sqw%40mail.gmail.com

------
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

#46Alvaro Herrera
alvherre@alvh.no-ip.org
In reply to: Heikki Linnakangas (#41)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Heikki Linnakangas wrote:

Remember, the purpose of predicate locks is to lock key ranges, not physical
pages or tuples in the index. We use leaf pages as handy shortcut for "any
key value that would belong on this page", but it is just an implementation
detail.

Hmm ... so, thinking about pending list locking, would it work to
acquire locks on the posting tree's root of each item in the pending
list, when the item is put in the pending list? (even if we insert the
item in the pending list instead of its posting tree).

--
�lvaro Herrera https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services

#47Andrey Borodin
x4mmm@yandex-team.ru
In reply to: Teodor Sigaev (#44)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

9 апр. 2018 г., в 19:50, Teodor Sigaev <teodor@sigaev.ru> написал(а):

3. Why do we *not* lock the entry leaf page, if there is no match? We still need a lock to remember that we probed for that value and there was no match, so that we conflict with a tuple that might be inserted later.
At least #3 is a bug. The attached patch adds an isolation test that demonstrates it. #1 and #2 are weird, and cause unnecessary locking, so I think we should fix those too, even if they don't lead to incorrect results.

I can't find a hole here. Agree.

Please correct me if I'm wrong.
Let's say we have posting trees for word A and word B.
We are looking for a document that contains both.
We will read through all posting tree of A, but only through some segments of B.
If we will not find anything in B, we have to lock only segments where we actually were looking, not all the posting tree of B.

BTW I do not think that we lock ranges. We lock possibility of appearance of tuples that we might find. Ranges are shortcuts for places where we were looking.. That's how I understand, chances are I'm missing something.

Best regards, Andrey Borodin.

#48Teodor Sigaev
teodor@sigaev.ru
In reply to: Alvaro Herrera (#46)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Alvaro Herrera wrote:

Heikki Linnakangas wrote:

Remember, the purpose of predicate locks is to lock key ranges, not physical
pages or tuples in the index. We use leaf pages as handy shortcut for "any
key value that would belong on this page", but it is just an implementation
detail.

Hmm ... so, thinking about pending list locking, would it work to
acquire locks on the posting tree's root of each item in the pending
list, when the item is put in the pending list? (even if we insert the
item in the pending list instead of its posting tree).

Items in pending list doesn't use posting tree or list, pending list is just
list of pair (ItemPointer to heap, entry) represented as IndexTuple. There is no
order in pending list, so Heikki suggests to lock metapage always instead of
locking whole relation if fastupdate=on. If fastupdate is off then insertion
process will not change metapage.

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

#49Heikki Linnakangas
hlinnaka@iki.fi
In reply to: Alvaro Herrera (#46)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On 09/04/18 18:04, Alvaro Herrera wrote:

Heikki Linnakangas wrote:

Remember, the purpose of predicate locks is to lock key ranges, not physical
pages or tuples in the index. We use leaf pages as handy shortcut for "any
key value that would belong on this page", but it is just an implementation
detail.

Hmm ... so, thinking about pending list locking, would it work to
acquire locks on the posting tree's root of each item in the pending
list, when the item is put in the pending list? (even if we insert the
item in the pending list instead of its posting tree).

Hmm, you mean, when inserting a new tuple? Yes, that would be correct. I
don't think it would perform very well, though. If you have to traverse
down to the posting trees, anyway, then you might as well insert the new
tuples there directly, and forget about the pending list.

- Heikki

#50Heikki Linnakangas
hlinnaka@iki.fi
In reply to: Andrey Borodin (#47)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

On 09/04/18 18:21, Andrey Borodin wrote:

9 апр. 2018 г., в 19:50, Teodor Sigaev <teodor@sigaev.ru>
написал(а):

3. Why do we *not* lock the entry leaf page, if there is no
match? We still need a lock to remember that we probed for that
value and there was no match, so that we conflict with a tuple
that might be inserted later. At least #3 is a bug. The attached
patch adds an isolation test that demonstrates it. #1 and #2 are
weird, and cause unnecessary locking, so I think we should fix
those too, even if they don't lead to incorrect results.

I can't find a hole here. Agree.

Please correct me if I'm wrong. Let's say we have posting trees for
word A and word B. We are looking for a document that contains both.
We will read through all posting tree of A, but only through some
segments of B. If we will not find anything in B, we have to lock
only segments where we actually were looking, not all the posting
tree of B.

True, that works. It was not clear from the code or comments that that
was intended. I'm not sure if that's worthwhile, compared to locking
just the posting tree root block. I'll let Teodor decide..

BTW I do not think that we lock ranges. We lock possibility of
appearance of tuples that we might find. Ranges are shortcuts for
places where we were looking.. That's how I understand, chances are
I'm missing something.

Yeah, that's one way of thinking about it.

- Heikki

#51Andrey Borodin
x4mmm@yandex-team.ru
In reply to: Heikki Linnakangas (#50)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

9 апр. 2018 г., в 23:04, Heikki Linnakangas <hlinnaka@iki.fi> написал(а):

On 09/04/18 18:21, Andrey Borodin wrote:

9 апр. 2018 г., в 19:50, Teodor Sigaev <teodor@sigaev.ru>
написал(а):

3. Why do we *not* lock the entry leaf page, if there is no
match? We still need a lock to remember that we probed for that
value and there was no match, so that we conflict with a tuple
that might be inserted later. At least #3 is a bug. The attached
patch adds an isolation test that demonstrates it. #1 and #2 are
weird, and cause unnecessary locking, so I think we should fix
those too, even if they don't lead to incorrect results.

I can't find a hole here. Agree.

Please correct me if I'm wrong. Let's say we have posting trees for
word A and word B. We are looking for a document that contains both. We will read through all posting tree of A, but only through some
segments of B. If we will not find anything in B, we have to lock
only segments where we actually were looking, not all the posting
tree of B.

True, that works. It was not clear from the code or comments that that was intended. I'm not sure if that's worthwhile, compared to locking just the posting tree root block.

From the text search POV this is kind of bulky granularity: if you have frequent words like "the", "a", "in", conflicts are inevitable.
I'm not sure we have means for picking optimal granularity: should it be ranges of postings, ranges of pages of posting trees, entries, pages of entries or whole index.
Technically, [time for locking] should be less than [time of transaction retry]*[probability of conflict]. Holding this constraint we should minimize [time for locking] + [time of transaction retry]*[probability of conflict].
I suspect that [time for locking] is some orders of magnitude less than time of transaction. So, efforts should be skewed towards smaller granularity to reduce [probability of conflict].
But all this is not real math and have no strength of science.

I'll let Teodor decide..

+1. I belive this is very close to optimal solution :)

Best regards, Andrey Borodin.

#52Teodor Sigaev
teodor@sigaev.ru
In reply to: Heikki Linnakangas (#41)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

I took a stab at fixing those issues, as well as the bug when fastupdate is
turned on concurrently. Does the attached patch look sane to you?

That's look sane, and I believe it should be applied but I see some issue in
your patch:

I don't very happy with rootBuffer added everywhere. ginInsertItemPointers() and
ginPrepareDataScan() now take both args, rootBlkno and rootBuffer, second
could be invalid. As I can see, you did it to call
CheckForSerializableConflictIn() in ginEntryInsert(). Seems, it could be
reverted and CheckForSerializableConflictIn() should be added to
ginFindLeafPage() with searchMode = false.

Rebased patch is attached.

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

Attachments:

0001-Re-think-predicate-locking-on-GIN-indexes-v2.patchtext/x-patch; name=0001-Re-think-predicate-locking-on-GIN-indexes-v2.patchDownload
commit 42f73743d9ddf576d2dd9ece3979b407cd70cbfe
Author: Teodor Sigaev <teodor@sigaev.ru>
Date:   Fri Apr 27 13:14:57 2018 +0300

    Re-think predicate locking on GIN indexes.
    
    The principle behind the locking was not very well thought-out, and not
    documented. Add a section in the README to explain how it's supposed to
    work, and change the code so that it actually works that way.
    
    This fixes two bugs:
    
    1. If fast update was turned on concurrently, subsequent inserts to the
       pending list would not conflict with predicate locks that were acquired
       earlier, on entry pages. The included 'predicate-gin-fastupdate' test
       demonstrates that. To fix, make all scans acquire a predicate lock on
       the metapage. That lock represents a scan of the pending list, whether
       or not there is a pending list at the moment. Forget about the
       optimization to skip locking/checking for locks, when fastupdate=off.
       Maybe some of that was safe, but I couldn't convince myself of it, so
       better to rip it out and keep things simple.
    
    2. If a scan finds no match, it still needs to lock the entry page. The
       point of predicate locks is to lock the gabs between values, whether
       or not there is a match. The included 'predicate-gin-nomatch' test
       tests that case.
    
    In addition to those two bug fixes, this removes some unnecessary locking,
    following the principle laid out in the README. Because all items in
    a posting tree have the same key value, a lock on the posting tree root is
    enough to cover all the items. (With a very large posting tree, it would
    possibly be better to lock the posting tree leaf pages instead, so that a
    "skip scan" with a query like "A & B", you could avoid unnecessary conflict
    if a new tuple is inserted with A but !B. But let's keep this simple.)
    
    Also, some spelling  fixes.

diff --git a/src/backend/access/gin/README b/src/backend/access/gin/README
index 990b5ffa58..cc434b1feb 100644
--- a/src/backend/access/gin/README
+++ b/src/backend/access/gin/README
@@ -331,6 +331,40 @@ page-deletions safe; it stamps the deleted pages with an XID and keeps the
 deleted pages around with the right-link intact until all concurrent scans
 have finished.)
 
+Predicate Locking
+-----------------
+
+GIN supports predicate locking, for serializable snapshot isolation.
+A predicate locks represent that a scan has scanned a range of values.  They
+are not concerned with physical pages as such, but the logical key values.
+A predicate lock on a page covers the key range that would belong on that
+page, whether or not there are any matching tuples there currently.  In other
+words, a predicate lock on an index page covers the "gaps" between the index
+tuples.  To minimize false positives, predicate locks are acquired at the
+finest level possible.
+
+* Like in the B-tree index, it is enough to lock only leaf pages, because all
+  insertions happen at the leaf level.
+
+* In an equality search (i.e. not a partial match search), if a key entry has
+  a posting tree, we lock the posting tree root page, to represent a lock on
+  just that key entry.  Otherwise, we lock the entry tree page.  We also lock
+  the entry tree page if no match is found, to lock the "gap" where the entry
+  would've been, had there been one.
+
+* In a partial match search, we lock all the entry leaf pages that we scan,
+  in addition to locks on posting tree roots, to represent the "gaps" between
+  values.
+
+* In addition to the locks on entry leaf pages and posting tree roots, all
+  scans grab a lock the metapage.  This is to interlock with insertions to
+  the fast update pending list.  An insertion to the pending list can really
+  belong anywhere in the tree, and the lock on the metapage represents that.
+
+The interlock for fastupdate pending lists means that with fastupdate=on,
+we effectively always grab a full-index lock, so you could get a lot of false
+positives.
+
 Compatibility
 -------------
 
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 828c7074b7..69e483ab5f 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -80,10 +80,21 @@ ginFindLeafPage(GinBtree btree, bool searchMode, Snapshot snapshot)
 
 	stack = (GinBtreeStack *) palloc(sizeof(GinBtreeStack));
 	stack->blkno = btree->rootBlkno;
-	stack->buffer = ReadBuffer(btree->index, btree->rootBlkno);
 	stack->parent = NULL;
 	stack->predictNumber = 1;
 
+	/*
+	 * Start from the root page. If the caller had already pinned it, take
+	 * advantage of that.
+	 */
+	if (BufferIsValid(btree->rootBuffer))
+	{
+		IncrBufferRefCount(btree->rootBuffer);
+		stack->buffer = btree->rootBuffer;
+	}
+	else
+		stack->buffer = ReadBuffer(btree->index, btree->rootBlkno);
+
 	for (;;)
 	{
 		Page		page;
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 59bf21744f..837da0720f 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -102,6 +102,8 @@ typedef struct
 	int			nitems;			/* # of items in 'items', if items != NULL */
 } leafSegmentInfo;
 
+static void ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno,
+				   Buffer rootBuffer);
 static ItemPointer dataLeafPageGetUncompressed(Page page, int *nitems);
 static void dataSplitPageInternal(GinBtree btree, Buffer origbuf,
 					  GinBtreeStack *stack,
@@ -1812,8 +1814,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	blkno = BufferGetBlockNumber(buffer);
 
 	/*
-	 * Copy a predicate lock from entry tree leaf (containing posting list) to
-	 * posting tree.
+	 * Copy any predicate locks from the entry tree leaf (containing posting
+	 * list) to the posting tree.
 	 */
 	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
 
@@ -1840,7 +1842,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 		PageSetLSN(page, recptr);
 	}
 
-	UnlockReleaseBuffer(buffer);
+	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 
 	END_CRIT_SECTION();
 
@@ -1855,22 +1857,26 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	 */
 	if (nitems > nrootitems)
 	{
-		ginInsertItemPointers(index, blkno,
+		ginInsertItemPointers(index, blkno, buffer,
 							  items + nrootitems,
 							  nitems - nrootitems,
 							  buildStats);
 	}
 
+	ReleaseBuffer(buffer);
+
 	return blkno;
 }
 
-void
-ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno)
+static void
+ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno,
+				   Buffer rootBuffer)
 {
 	memset(btree, 0, sizeof(GinBtreeData));
 
 	btree->index = index;
 	btree->rootBlkno = rootBlkno;
+	btree->rootBuffer = rootBuffer;
 
 	btree->findChildPage = dataLocateItem;
 	btree->getLeftMostChild = dataGetLeftMostPage;
@@ -1891,7 +1897,7 @@ ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno)
  * Inserts array of item pointers, may execute several tree scan (very rare)
  */
 void
-ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
+ginInsertItemPointers(Relation index, BlockNumber rootBlkno, Buffer rootBuffer,
 					  ItemPointerData *items, uint32 nitem,
 					  GinStatsData *buildStats)
 {
@@ -1899,7 +1905,7 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 	GinBtreeDataLeafInsertData insertdata;
 	GinBtreeStack *stack;
 
-	ginPrepareDataScan(&btree, index, rootBlkno);
+	ginPrepareDataScan(&btree, index, rootBlkno, rootBuffer);
 	btree.isBuild = (buildStats != NULL);
 	insertdata.items = items;
 	insertdata.nitem = nitem;
@@ -1911,7 +1917,6 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
-		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
@@ -1925,7 +1930,7 @@ ginScanBeginPostingTree(GinBtree btree, Relation index, BlockNumber rootBlkno,
 {
 	GinBtreeStack *stack;
 
-	ginPrepareDataScan(btree, index, rootBlkno);
+	ginPrepareDataScan(btree, index, rootBlkno, InvalidBuffer);
 
 	btree->fullScan = true;
 
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 615730b8e5..5f624cf6fa 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -31,6 +31,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 
 /* GUC parameter */
@@ -245,6 +246,13 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
 	metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
 	metapage = BufferGetPage(metabuffer);
 
+	/*
+	 * An insertion to the pending list could logically belong anywhere in
+	 * the tree, so it conflicts with all serializable scans.  All scans
+	 * acquire a predicate lock on the metabuffer to represent that.
+	 */
+	CheckForSerializableConflictIn(index, NULL, metabuffer);
+
 	if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GinListPageSize)
 	{
 		/*
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index f3db7cc640..ef3cd7dbe2 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -35,20 +35,6 @@ typedef struct pendingPosition
 } pendingPosition;
 
 
-/*
- * Place predicate lock on GIN page if needed.
- */
-static void
-GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
-{
-	/*
-	 * When fast update is on then no need in locking pages, because we anyway
-	 * need to lock the whole index.
-	 */
-	if (!GinGetUseFastUpdate(index))
-		PredicateLockPage(index, blkno, snapshot);
-}
-
 /*
  * Goes to the next page if current offset is outside of bounds
  */
@@ -68,7 +54,7 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot
 		stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE);
 		stack->blkno = BufferGetBlockNumber(stack->buffer);
 		stack->off = FirstOffsetNumber;
-		GinPredicateLockPage(btree->index, stack->blkno, snapshot);
+		PredicateLockPage(btree->index, stack->blkno, snapshot);
 	}
 
 	return true;
@@ -100,11 +86,6 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	 */
 	for (;;)
 	{
-		/*
-		 * Predicate lock each leaf page in posting tree
-		 */
-		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
-
 		page = BufferGetPage(buffer);
 		if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0)
 		{
@@ -158,7 +139,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	 * Predicate lock entry leaf page, following pages will be locked by
 	 * moveRightIfItNeeded()
 	 */
-	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+	PredicateLockPage(btree->index, stack->buffer, snapshot);
 
 	for (;;)
 	{
@@ -253,6 +234,13 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 
 			LockBuffer(stack->buffer, GIN_UNLOCK);
 
+			/*
+			 * Acquire predicate lock on the posting tree.  We already hold
+			 * a lock on the entry page, but insertions to the posting tree
+			 * don't check for conflicts on that level.
+			 */
+			PredicateLockPage(btree->index, rootPostingTree, snapshot);
+
 			/* Collect all the TIDs in this entry's posting tree */
 			scanPostingTree(btree->index, scanEntry, rootPostingTree,
 							snapshot);
@@ -400,10 +388,6 @@ restartScanEntry:
 	{
 		IndexTuple	itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off));
 
-		/* Predicate lock visited entry leaf page */
-		GinPredicateLockPage(ginstate->index,
-							 BufferGetBlockNumber(stackEntry->buffer), snapshot);
-
 		if (GinIsPostingTree(itup))
 		{
 			BlockNumber rootPostingTree = GinGetPostingTree(itup);
@@ -411,6 +395,13 @@ restartScanEntry:
 			Page		page;
 			ItemPointerData minItem;
 
+			/*
+			 * This is an equality scan, so lock the root of the posting tree.
+			 * It represents a lock on the exact key value, and covers all the
+			 * items in the posting tree.
+			 */
+			PredicateLockPage(ginstate->index, rootPostingTree, snapshot);
+
 			/*
 			 * We should unlock entry page before touching posting tree to
 			 * prevent deadlocks with vacuum processes. Because entry is never
@@ -425,12 +416,6 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
-			/*
-			 * Predicate lock visited posting tree page, following pages will
-			 * be locked by moveRightIfItNeeded or entryLoadMoreItems
-			 */
-			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
-
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -452,15 +437,38 @@ restartScanEntry:
 			freeGinBtreeStack(stack);
 			entry->isFinished = false;
 		}
-		else if (GinGetNPosting(itup) > 0)
+		else
 		{
-			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
-									   &entry->nlist);
-			entry->predictNumberResult = entry->nlist;
+			/*
+			 * Lock the entry leaf page.  This is more coarse-grained than
+			 * necessary, because it will conflict with any insertions that
+			 * land on the same leaf page, not only the exacty key we searched
+			 * for.  But locking an individual tuple would require updating
+			 * that lock whenever it moves because of insertions or vacuums,
+			 * which seems too complicated.
+			 */
+			PredicateLockPage(ginstate->index,
+							  BufferGetBlockNumber(stackEntry->buffer),
+							  snapshot);
+			if (GinGetNPosting(itup) > 0)
+			{
+				entry->list = ginReadTuple(ginstate, entry->attnum, itup,
+										   &entry->nlist);
+				entry->predictNumberResult = entry->nlist;
 
-			entry->isFinished = false;
+				entry->isFinished = false;
+			}
 		}
 	}
+	else
+	{
+		/*
+		 * No entry found.  Predicate lock the leaf page, to lock the place
+		 * where the entry would've been, had there been one.
+		 */
+		PredicateLockPage(ginstate->index,
+						  BufferGetBlockNumber(stackEntry->buffer), snapshot);
+	}
 
 	if (needUnlock)
 		LockBuffer(stackEntry->buffer, GIN_UNLOCK);
@@ -533,7 +541,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as false, and the rest as MAYBE */
+			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -673,8 +681,6 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
-		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
-
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -719,10 +725,6 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
-
-			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
-
-
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1084,8 +1086,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries false. If it
-	 * returns false, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
+	 * returns FALSE, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1746,8 +1748,7 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
 }
 
 /*
- * Collect all matched rows from pending list into bitmap. Also function
- * takes PendingLockRelation if it's needed.
+ * Collect all matched rows from pending list into bitmap.
  */
 static void
 scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
@@ -1764,6 +1765,12 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 
 	*ntids = 0;
 
+	/*
+	 * Acquire predicate lock on the metapage, to conflict with any
+	 * fastupdate insertions.
+	 */
+	PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot);
+
 	LockBuffer(metabuffer, GIN_SHARE);
 	page = BufferGetPage(metabuffer);
 	TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
@@ -1777,24 +1784,9 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 	{
 		/* No pending list, so proceed with normal scan */
 		UnlockReleaseBuffer(metabuffer);
-
-		/*
-		 * If fast update is enabled, we acquire a predicate lock on the
-		 * entire relation as fast update postpones the insertion of tuples
-		 * into index structure due to which we can't detect rw conflicts.
-		 */
-		if (GinGetUseFastUpdate(scan->indexRelation))
-			PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
-
 		return;
 	}
 
-	/*
-	 * Pending list is not empty, we need to lock the index doesn't despite on
-	 * fastupdate state
-	 */
-	PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
-
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index cf218dd75d..92c77015a1 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -104,7 +104,7 @@ addItemPointersToLeafTuple(GinState *ginstate,
 										buffer);
 
 		/* Now insert the TIDs-to-be-added into the posting tree */
-		ginInsertItemPointers(ginstate->index, postingRoot,
+		ginInsertItemPointers(ginstate->index, postingRoot, InvalidBuffer,
 							  items, nitem,
 							  buildStats);
 
@@ -207,19 +207,23 @@ ginEntryInsert(GinState *ginstate,
 		{
 			/* add entries to existing posting tree */
 			BlockNumber rootPostingTree = GinGetPostingTree(itup);
+			Buffer		rootBuffer;
 
 			/* release all stack */
 			LockBuffer(stack->buffer, GIN_UNLOCK);
 			freeGinBtreeStack(stack);
 
 			/* insert into posting tree */
-			ginInsertItemPointers(ginstate->index, rootPostingTree,
+			rootBuffer = ReadBuffer(ginstate->index, rootPostingTree);
+			CheckForSerializableConflictIn(ginstate->index, NULL, rootBuffer);
+			ginInsertItemPointers(ginstate->index, rootPostingTree, rootBuffer,
 								  items, nitem,
 								  buildStats);
+			ReleaseBuffer(rootBuffer);
 			return;
 		}
 
-		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
+		CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
 										  items, nitem, buildStats, stack->buffer);
@@ -228,7 +232,7 @@ ginEntryInsert(GinState *ginstate,
 	}
 	else
 	{
-		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
+		CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
 								   items, nitem, buildStats, stack->buffer);
@@ -517,18 +521,6 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
-		/*
-		 * With fastupdate on each scan and each insert begin with access to
-		 * pending list, so it effectively lock entire index. In this case we
-		 * aquire predicate lock and check for conflicts over index relation,
-		 * and hope that it will reduce locking overhead.
-		 *
-		 * Do not use GinCheckForSerializableConflictIn() here, because it
-		 * will do nothing (it does actual work only with fastupdate off).
-		 * Check for conflicts for entire index.
-		 */
-		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
-
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
@@ -539,16 +531,6 @@ gininsert(Relation index, Datum *values, bool *isnull,
 	}
 	else
 	{
-		GinStatsData stats;
-
-		/*
-		 * Fastupdate is off but if pending list isn't empty then we need to
-		 * check conflicts with PredicateLockRelation in scanPendingInsert().
-		 */
-		ginGetStats(index, &stats);
-		if (stats.nPendingPages > 0)
-			CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
-
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleInsert(ginstate, (OffsetNumber) (i + 1),
 							   values[i], isnull[i],
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 4367523dd9..0a32182dd7 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -718,10 +718,3 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
-
-void
-GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
-{
-	if (!GinGetUseFastUpdate(relation))
-		CheckForSerializableConflictIn(relation, tuple, buffer);
-}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index dd8e31b872..3104bc12b6 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -166,7 +166,6 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
 
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index f7a9168925..8a42effdf7 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1220,7 +1220,7 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
 	bool		is_split;
 
 	/*
-	 * Check for any rw conflicts (in serialisation isolation level) just
+	 * Check for any rw conflicts (in serializable isolation level) just
 	 * before we intend to modify the page
 	 */
 	CheckForSerializableConflictIn(state->r, NULL, stack->buffer);
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index f2b099d1c9..50d2ecca9d 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -373,21 +373,22 @@ index *leaf* pages needed to lock the appropriate index range. If,
 however, a search discovers that no root page has yet been created, a
 predicate lock on the index relation is required.
 
+    * Like a B-tree, GIN searches acquire predicate locks only on the
+leaf pages of entry tree. When performing an equality scan, and an
+entry has a posting tree, the posting tree root is locked instead, to
+lock only that key value. However, fastupdate=on postpones the
+insertion of tuples into index structure by temporarily storing them
+into pending list. That makes us unable to detect r-w conflicts using
+page-level locks. To cope with that, insertions to the pending list
+conflict with all scans.
+
     * GiST searches can determine that there are no matches at any
 level of the index, so we acquire predicate lock at each index
 level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist. In case there is a page split,
-we need to copy predicate lock from an original page to all new pages.
-
-    * GIN searches acquire predicate locks only on the leaf pages
-of entry tree and posting tree. During a page split, a predicate locks are
-copied from the original page to the new page. In the same way predicate locks
-are copied from entry tree leaf page to freshly created posting tree root.
-However, when fast update is enabled, a predicate lock on the whole index
-relation is required. Fast update postpones the insertion of tuples into index
-structure by temporarily storing them into pending list. That makes us unable
-to detect r-w conflicts using page-level locks.
+we need to copy predicate lock from the original page to all the new
+pages.
 
     * Hash index searches acquire predicate locks on the primary
 page of a bucket. It acquires a lock on both the old and new buckets
@@ -395,7 +396,6 @@ for scans that happen concurrently with page splits. During a bucket
 split, a predicate lock is copied from the primary page of an old
 bucket to the primary page of a new bucket.
 
-
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index c013d60371..1e2c9dde8b 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,8 +103,6 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
-extern void GinCheckForSerializableConflictIn(Relation relation,
-								  HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -161,6 +159,7 @@ typedef struct GinBtreeData
 
 	Relation	index;
 	BlockNumber rootBlkno;
+	Buffer		rootBuffer;
 	GinState   *ginstate;		/* not valid in a data scan */
 	bool		fullScan;
 	bool		isBuild;
@@ -222,12 +221,12 @@ extern BlockNumber createPostingTree(Relation index,
 				  GinStatsData *buildStats, Buffer entrybuffer);
 extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset);
 extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
-extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
+extern void ginInsertItemPointers(Relation index,
+					  BlockNumber rootBlkno, Buffer rootBuffer,
 					  ItemPointerData *items, uint32 nitem,
 					  GinStatsData *buildStats);
 extern GinBtreeStack *ginScanBeginPostingTree(GinBtree btree, Relation index, BlockNumber rootBlkno, Snapshot snapshot);
 extern void ginDataFillRoot(GinBtree btree, Page root, BlockNumber lblkno, Page lpage, BlockNumber rblkno, Page rpage);
-extern void ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno);
 
 /*
  * This is declared in ginvacuum.c, but is passed between ginVacuumItemPointers
diff --git a/src/test/isolation/expected/predicate-gin-fastupdate.out b/src/test/isolation/expected/predicate-gin-fastupdate.out
new file mode 100644
index 0000000000..7d4fa8e024
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-fastupdate.out
@@ -0,0 +1,30 @@
+Parsed test spec with 3 sessions
+
+starting permutation: r1 r2 w1 c1 w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000];
+count          
+
+2              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step w2: INSERT INTO gin_tbl SELECT array[1000,19001];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: r1 r2 w1 c1 fastupdate_on w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000];
+count          
+
+2              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step fastupdate_on: ALTER INDEX ginidx SET (fastupdate = on);
+step w2: INSERT INTO gin_tbl SELECT array[1000,19001];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
diff --git a/src/test/isolation/expected/predicate-gin-nomatch.out b/src/test/isolation/expected/predicate-gin-nomatch.out
new file mode 100644
index 0000000000..5e733262a4
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-nomatch.out
@@ -0,0 +1,15 @@
+Parsed test spec with 2 sessions
+
+starting permutation: r1 r2 w1 c1 w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[-1];
+count          
+
+0              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step w2: INSERT INTO gin_tbl SELECT array[-1];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
index 4f5501f6f0..bdf8911923 100644
--- a/src/test/isolation/expected/predicate-gin.out
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -737,8 +737,8 @@ step c2: commit;
 starting permutation: fu1 rxy1 rxy2fu wx1 c1 wy2fu c2
 step fu1: alter index ginidx set (fastupdate = on);
 			  commit;
-			  begin isolation level serializable; 
-  			  set enable_seqscan=off;
+			  begin isolation level serializable;
+			  set enable_seqscan=off;
 step rxy1: select count(*) from gin_tbl where p @> array[4,5];
 count          
 
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index b3a34a8688..b650e467a6 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -69,6 +69,8 @@ test: vacuum-concurrent-drop
 test: predicate-hash
 test: predicate-gist
 test: predicate-gin
+test: predicate-gin-fastupdate
+test: predicate-gin-nomatch
 test: partition-key-update-1
 test: partition-key-update-2
 test: partition-key-update-3
diff --git a/src/test/isolation/specs/predicate-gin-fastupdate.spec b/src/test/isolation/specs/predicate-gin-fastupdate.spec
new file mode 100644
index 0000000000..04b8036fc5
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-fastupdate.spec
@@ -0,0 +1,49 @@
+#
+# Test that predicate locking on a GIN index works correctly, even if
+# fastupdate is turned on concurrently.
+#
+# 0. fastupdate is off
+# 1. Session 's1' acquires predicate lock on page X
+# 2. fastupdate is turned on
+# 3. Session 's2' inserts a new tuple to the pending list
+#
+# This test tests that if the lock acquired in step 1 would conflict with
+# the scan in step 1, we detect that conflict correctly, even if fastupdate
+# was turned on in-between.
+#
+setup
+{
+  create table gin_tbl(p int4[]);
+  insert into gin_tbl select array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+
+  create table other_tbl (id int4);
+}
+
+teardown
+{
+  drop table gin_tbl;
+  drop table other_tbl;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r1" { SELECT count(*) FROM gin_tbl WHERE p @> array[1000]; }
+step "w1" { INSERT INTO other_tbl VALUES (42); }
+step "c1" { COMMIT; }
+
+session "s2"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r2" { SELECT * FROM other_tbl; }
+step "w2" { INSERT INTO gin_tbl SELECT array[1000,19001]; }
+step "c2" { COMMIT; }
+
+session "s3"
+step "fastupdate_on" { ALTER INDEX ginidx SET (fastupdate = on); }
+
+# This correctly throws serialization failure.
+permutation "r1" "r2" "w1" "c1" "w2" "c2"
+
+# But if fastupdate is turned on in the middle, we miss it.
+permutation "r1" "r2" "w1" "c1" "fastupdate_on" "w2" "c2"
diff --git a/src/test/isolation/specs/predicate-gin-nomatch.spec b/src/test/isolation/specs/predicate-gin-nomatch.spec
new file mode 100644
index 0000000000..0ad456cb14
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-nomatch.spec
@@ -0,0 +1,35 @@
+#
+# Check that GIN index grabs an appropriate lock, even if there is no match.
+#
+setup
+{
+  create table gin_tbl(p int4[]);
+  insert into gin_tbl select array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+
+  create table other_tbl (id int4);
+}
+
+teardown
+{
+  drop table gin_tbl;
+  drop table other_tbl;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+# Scan with no match.
+step "r1" { SELECT count(*) FROM gin_tbl WHERE p @> array[-1]; }
+step "w1" { INSERT INTO other_tbl VALUES (42); }
+step "c1" { COMMIT; }
+
+session "s2"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r2" { SELECT * FROM other_tbl; }
+# Insert row that would've matched in step "r1"
+step "w2" { INSERT INTO gin_tbl SELECT array[-1]; }
+step "c2" { COMMIT; }
+
+# This should throw serialization failure.
+permutation "r1" "r2" "w1" "c1" "w2" "c2"
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
index 9f0cda8057..a967695867 100644
--- a/src/test/isolation/specs/predicate-gin.spec
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -32,8 +32,8 @@ setup
 # enable pending list for a small subset of tests
 step "fu1"	{ alter index ginidx set (fastupdate = on);
 			  commit;
-			  begin isolation level serializable; 
-  			  set enable_seqscan=off; }
+			  begin isolation level serializable;
+			  set enable_seqscan=off; }
 
 step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
 step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
#53Teodor Sigaev
teodor@sigaev.ru
In reply to: Teodor Sigaev (#52)
1 attachment(s)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

I don't very happy with rootBuffer added everywhere. ginInsertItemPointers() and
О©╫ginPrepareDataScan() now take both args, rootBlkno and rootBuffer, second
could be invalid. As I can see, you did it to call
CheckForSerializableConflictIn() in ginEntryInsert(). Seems, it could be
reverted and CheckForSerializableConflictIn() should be added to
ginFindLeafPage() with searchMode = false.

Implemented, v3 is attached.

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/

Attachments:

0001-Re-think-predicate-locking-on-GIN-indexes-v3.patchtext/x-patch; name=0001-Re-think-predicate-locking-on-GIN-indexes-v3.patchDownload
commit 816a9d9cc81a193a10fb49cf26ff2b0214e0ff9b
Author: Teodor Sigaev <teodor@sigaev.ru>
Date:   Sat Apr 28 20:17:46 2018 +0300

    Re-think predicate locking on GIN indexes.
    
    The principle behind the locking was not very well thought-out, and not
    documented. Add a section in the README to explain how it's supposed to
    work, and change the code so that it actually works that way.
    
    This fixes two bugs:
    
    1. If fast update was turned on concurrently, subsequent inserts to the
       pending list would not conflict with predicate locks that were acquired
       earlier, on entry pages. The included 'predicate-gin-fastupdate' test
       demonstrates that. To fix, make all scans acquire a predicate lock on
       the metapage. That lock represents a scan of the pending list, whether
       or not there is a pending list at the moment. Forget about the
       optimization to skip locking/checking for locks, when fastupdate=off.
       Maybe some of that was safe, but I couldn't convince myself of it, so
       better to rip it out and keep things simple.
    2. If a scan finds no match, it still needs to lock the entry page. The
       point of predicate locks is to lock the gabs between values, whether
       or not there is a match. The included 'predicate-gin-nomatch' test
       tests that case.
    
    In addition to those two bug fixes, this removes some unnecessary locking,
    following the principle laid out in the README. Because all items in
    a posting tree have the same key value, a lock on the posting tree root is
    enough to cover all the items. (With a very large posting tree, it would
    possibly be better to lock the posting tree leaf pages instead, so that a
    "skip scan" with a query like "A & B", you could avoid unnecessary conflict
    if a new tuple is inserted with A but !B. But let's keep this simple.)
    
    Also, some spelling  fixes.

diff --git a/src/backend/access/gin/README b/src/backend/access/gin/README
index 990b5ffa58..cc434b1feb 100644
--- a/src/backend/access/gin/README
+++ b/src/backend/access/gin/README
@@ -331,6 +331,40 @@ page-deletions safe; it stamps the deleted pages with an XID and keeps the
 deleted pages around with the right-link intact until all concurrent scans
 have finished.)
 
+Predicate Locking
+-----------------
+
+GIN supports predicate locking, for serializable snapshot isolation.
+A predicate locks represent that a scan has scanned a range of values.  They
+are not concerned with physical pages as such, but the logical key values.
+A predicate lock on a page covers the key range that would belong on that
+page, whether or not there are any matching tuples there currently.  In other
+words, a predicate lock on an index page covers the "gaps" between the index
+tuples.  To minimize false positives, predicate locks are acquired at the
+finest level possible.
+
+* Like in the B-tree index, it is enough to lock only leaf pages, because all
+  insertions happen at the leaf level.
+
+* In an equality search (i.e. not a partial match search), if a key entry has
+  a posting tree, we lock the posting tree root page, to represent a lock on
+  just that key entry.  Otherwise, we lock the entry tree page.  We also lock
+  the entry tree page if no match is found, to lock the "gap" where the entry
+  would've been, had there been one.
+
+* In a partial match search, we lock all the entry leaf pages that we scan,
+  in addition to locks on posting tree roots, to represent the "gaps" between
+  values.
+
+* In addition to the locks on entry leaf pages and posting tree roots, all
+  scans grab a lock the metapage.  This is to interlock with insertions to
+  the fast update pending list.  An insertion to the pending list can really
+  belong anywhere in the tree, and the lock on the metapage represents that.
+
+The interlock for fastupdate pending lists means that with fastupdate=on,
+we effectively always grab a full-index lock, so you could get a lot of false
+positives.
+
 Compatibility
 -------------
 
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 828c7074b7..030d0f4418 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -84,6 +84,9 @@ ginFindLeafPage(GinBtree btree, bool searchMode, Snapshot snapshot)
 	stack->parent = NULL;
 	stack->predictNumber = 1;
 
+	if (!searchMode)
+		CheckForSerializableConflictIn(btree->index, NULL, stack->buffer);
+
 	for (;;)
 	{
 		Page		page;
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 59bf21744f..aeaf8adab0 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -1812,8 +1812,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	blkno = BufferGetBlockNumber(buffer);
 
 	/*
-	 * Copy a predicate lock from entry tree leaf (containing posting list) to
-	 * posting tree.
+	 * Copy any predicate locks from the entry tree leaf (containing posting
+	 * list) to the posting tree.
 	 */
 	PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
 
@@ -1864,7 +1864,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
 	return blkno;
 }
 
-void
+static void
 ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno)
 {
 	memset(btree, 0, sizeof(GinBtreeData));
@@ -1911,7 +1911,6 @@ ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 		btree.itemptr = insertdata.items[insertdata.curitem];
 		stack = ginFindLeafPage(&btree, false, NULL);
 
-		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
 		ginInsertValue(&btree, stack, &insertdata, buildStats);
 	}
 }
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 615730b8e5..5f624cf6fa 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -31,6 +31,7 @@
 #include "postmaster/autovacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/lmgr.h"
+#include "storage/predicate.h"
 #include "utils/builtins.h"
 
 /* GUC parameter */
@@ -245,6 +246,13 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
 	metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
 	metapage = BufferGetPage(metabuffer);
 
+	/*
+	 * An insertion to the pending list could logically belong anywhere in
+	 * the tree, so it conflicts with all serializable scans.  All scans
+	 * acquire a predicate lock on the metabuffer to represent that.
+	 */
+	CheckForSerializableConflictIn(index, NULL, metabuffer);
+
 	if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GinListPageSize)
 	{
 		/*
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index f3db7cc640..ef3cd7dbe2 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -35,20 +35,6 @@ typedef struct pendingPosition
 } pendingPosition;
 
 
-/*
- * Place predicate lock on GIN page if needed.
- */
-static void
-GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
-{
-	/*
-	 * When fast update is on then no need in locking pages, because we anyway
-	 * need to lock the whole index.
-	 */
-	if (!GinGetUseFastUpdate(index))
-		PredicateLockPage(index, blkno, snapshot);
-}
-
 /*
  * Goes to the next page if current offset is outside of bounds
  */
@@ -68,7 +54,7 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot
 		stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE);
 		stack->blkno = BufferGetBlockNumber(stack->buffer);
 		stack->off = FirstOffsetNumber;
-		GinPredicateLockPage(btree->index, stack->blkno, snapshot);
+		PredicateLockPage(btree->index, stack->blkno, snapshot);
 	}
 
 	return true;
@@ -100,11 +86,6 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 	 */
 	for (;;)
 	{
-		/*
-		 * Predicate lock each leaf page in posting tree
-		 */
-		GinPredicateLockPage(index, BufferGetBlockNumber(buffer), snapshot);
-
 		page = BufferGetPage(buffer);
 		if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0)
 		{
@@ -158,7 +139,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	 * Predicate lock entry leaf page, following pages will be locked by
 	 * moveRightIfItNeeded()
 	 */
-	GinPredicateLockPage(btree->index, stack->buffer, snapshot);
+	PredicateLockPage(btree->index, stack->buffer, snapshot);
 
 	for (;;)
 	{
@@ -253,6 +234,13 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 
 			LockBuffer(stack->buffer, GIN_UNLOCK);
 
+			/*
+			 * Acquire predicate lock on the posting tree.  We already hold
+			 * a lock on the entry page, but insertions to the posting tree
+			 * don't check for conflicts on that level.
+			 */
+			PredicateLockPage(btree->index, rootPostingTree, snapshot);
+
 			/* Collect all the TIDs in this entry's posting tree */
 			scanPostingTree(btree->index, scanEntry, rootPostingTree,
 							snapshot);
@@ -400,10 +388,6 @@ restartScanEntry:
 	{
 		IndexTuple	itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off));
 
-		/* Predicate lock visited entry leaf page */
-		GinPredicateLockPage(ginstate->index,
-							 BufferGetBlockNumber(stackEntry->buffer), snapshot);
-
 		if (GinIsPostingTree(itup))
 		{
 			BlockNumber rootPostingTree = GinGetPostingTree(itup);
@@ -411,6 +395,13 @@ restartScanEntry:
 			Page		page;
 			ItemPointerData minItem;
 
+			/*
+			 * This is an equality scan, so lock the root of the posting tree.
+			 * It represents a lock on the exact key value, and covers all the
+			 * items in the posting tree.
+			 */
+			PredicateLockPage(ginstate->index, rootPostingTree, snapshot);
+
 			/*
 			 * We should unlock entry page before touching posting tree to
 			 * prevent deadlocks with vacuum processes. Because entry is never
@@ -425,12 +416,6 @@ restartScanEntry:
 											rootPostingTree, snapshot);
 			entry->buffer = stack->buffer;
 
-			/*
-			 * Predicate lock visited posting tree page, following pages will
-			 * be locked by moveRightIfItNeeded or entryLoadMoreItems
-			 */
-			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
-
 			/*
 			 * We keep buffer pinned because we need to prevent deletion of
 			 * page during scan. See GIN's vacuum implementation. RefCount is
@@ -452,15 +437,38 @@ restartScanEntry:
 			freeGinBtreeStack(stack);
 			entry->isFinished = false;
 		}
-		else if (GinGetNPosting(itup) > 0)
+		else
 		{
-			entry->list = ginReadTuple(ginstate, entry->attnum, itup,
-									   &entry->nlist);
-			entry->predictNumberResult = entry->nlist;
+			/*
+			 * Lock the entry leaf page.  This is more coarse-grained than
+			 * necessary, because it will conflict with any insertions that
+			 * land on the same leaf page, not only the exacty key we searched
+			 * for.  But locking an individual tuple would require updating
+			 * that lock whenever it moves because of insertions or vacuums,
+			 * which seems too complicated.
+			 */
+			PredicateLockPage(ginstate->index,
+							  BufferGetBlockNumber(stackEntry->buffer),
+							  snapshot);
+			if (GinGetNPosting(itup) > 0)
+			{
+				entry->list = ginReadTuple(ginstate, entry->attnum, itup,
+										   &entry->nlist);
+				entry->predictNumberResult = entry->nlist;
 
-			entry->isFinished = false;
+				entry->isFinished = false;
+			}
 		}
 	}
+	else
+	{
+		/*
+		 * No entry found.  Predicate lock the leaf page, to lock the place
+		 * where the entry would've been, had there been one.
+		 */
+		PredicateLockPage(ginstate->index,
+						  BufferGetBlockNumber(stackEntry->buffer), snapshot);
+	}
 
 	if (needUnlock)
 		LockBuffer(stackEntry->buffer, GIN_UNLOCK);
@@ -533,7 +541,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
 
 		for (i = 0; i < key->nentries - 1; i++)
 		{
-			/* Pass all entries <= i as false, and the rest as MAYBE */
+			/* Pass all entries <= i as FALSE, and the rest as MAYBE */
 			for (j = 0; j <= i; j++)
 				key->entryRes[entryIndexes[j]] = GIN_FALSE;
 			for (j = i + 1; j < key->nentries; j++)
@@ -673,8 +681,6 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 		entry->btree.fullScan = false;
 		stack = ginFindLeafPage(&entry->btree, true, snapshot);
 
-		GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(stack->buffer), snapshot);
-
 		/* we don't need the stack, just the buffer. */
 		entry->buffer = stack->buffer;
 		IncrBufferRefCount(entry->buffer);
@@ -719,10 +725,6 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
 			entry->buffer = ginStepRight(entry->buffer,
 										 ginstate->index,
 										 GIN_SHARE);
-
-			GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
-
-
 			page = BufferGetPage(entry->buffer);
 		}
 		stepright = true;
@@ -1084,8 +1086,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
 	 * lossy page even when none of the other entries match.
 	 *
 	 * Our strategy is to call the tri-state consistent function, with the
-	 * lossy-page entries set to MAYBE, and all the other entries false. If it
-	 * returns false, none of the lossy items alone are enough for a match, so
+	 * lossy-page entries set to MAYBE, and all the other entries FALSE. If it
+	 * returns FALSE, none of the lossy items alone are enough for a match, so
 	 * we don't need to return a lossy-page pointer. Otherwise, return a
 	 * lossy-page pointer to indicate that the whole heap page must be
 	 * checked.  (On subsequent calls, we'll do nothing until minItem is past
@@ -1746,8 +1748,7 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
 }
 
 /*
- * Collect all matched rows from pending list into bitmap. Also function
- * takes PendingLockRelation if it's needed.
+ * Collect all matched rows from pending list into bitmap.
  */
 static void
 scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
@@ -1764,6 +1765,12 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 
 	*ntids = 0;
 
+	/*
+	 * Acquire predicate lock on the metapage, to conflict with any
+	 * fastupdate insertions.
+	 */
+	PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot);
+
 	LockBuffer(metabuffer, GIN_SHARE);
 	page = BufferGetPage(metabuffer);
 	TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
@@ -1777,24 +1784,9 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 	{
 		/* No pending list, so proceed with normal scan */
 		UnlockReleaseBuffer(metabuffer);
-
-		/*
-		 * If fast update is enabled, we acquire a predicate lock on the
-		 * entire relation as fast update postpones the insertion of tuples
-		 * into index structure due to which we can't detect rw conflicts.
-		 */
-		if (GinGetUseFastUpdate(scan->indexRelation))
-			PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
-
 		return;
 	}
 
-	/*
-	 * Pending list is not empty, we need to lock the index doesn't despite on
-	 * fastupdate state
-	 */
-	PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
-
 	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index cf218dd75d..5281eb6823 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -219,7 +219,7 @@ ginEntryInsert(GinState *ginstate,
 			return;
 		}
 
-		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
+		CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer);
 		/* modify an existing leaf entry */
 		itup = addItemPointersToLeafTuple(ginstate, itup,
 										  items, nitem, buildStats, stack->buffer);
@@ -228,7 +228,7 @@ ginEntryInsert(GinState *ginstate,
 	}
 	else
 	{
-		GinCheckForSerializableConflictIn(btree.index, NULL, stack->buffer);
+		CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer);
 		/* no match, so construct a new leaf entry */
 		itup = buildFreshLeafTuple(ginstate, attnum, key, category,
 								   items, nitem, buildStats, stack->buffer);
@@ -517,18 +517,6 @@ gininsert(Relation index, Datum *values, bool *isnull,
 
 		memset(&collector, 0, sizeof(GinTupleCollector));
 
-		/*
-		 * With fastupdate on each scan and each insert begin with access to
-		 * pending list, so it effectively lock entire index. In this case we
-		 * aquire predicate lock and check for conflicts over index relation,
-		 * and hope that it will reduce locking overhead.
-		 *
-		 * Do not use GinCheckForSerializableConflictIn() here, because it
-		 * will do nothing (it does actual work only with fastupdate off).
-		 * Check for conflicts for entire index.
-		 */
-		CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
-
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleFastCollect(ginstate, &collector,
 									(OffsetNumber) (i + 1),
@@ -539,16 +527,6 @@ gininsert(Relation index, Datum *values, bool *isnull,
 	}
 	else
 	{
-		GinStatsData stats;
-
-		/*
-		 * Fastupdate is off but if pending list isn't empty then we need to
-		 * check conflicts with PredicateLockRelation in scanPendingInsert().
-		 */
-		ginGetStats(index, &stats);
-		if (stats.nPendingPages > 0)
-			CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
-
 		for (i = 0; i < ginstate->origTupdesc->natts; i++)
 			ginHeapTupleInsert(ginstate, (OffsetNumber) (i + 1),
 							   values[i], isnull[i],
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 4367523dd9..0a32182dd7 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -718,10 +718,3 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
 	END_CRIT_SECTION();
 }
-
-void
-GinCheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
-{
-	if (!GinGetUseFastUpdate(relation))
-		CheckForSerializableConflictIn(relation, tuple, buffer);
-}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index dd8e31b872..3104bc12b6 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -166,7 +166,6 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
 	START_CRIT_SECTION();
 
 	/* Unlink the page by changing left sibling's rightlink */
-
 	page = BufferGetPage(lBuffer);
 	GinPageGetOpaque(page)->rightlink = rightlink;
 
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index f7a9168925..8a42effdf7 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1220,7 +1220,7 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
 	bool		is_split;
 
 	/*
-	 * Check for any rw conflicts (in serialisation isolation level) just
+	 * Check for any rw conflicts (in serializable isolation level) just
 	 * before we intend to modify the page
 	 */
 	CheckForSerializableConflictIn(state->r, NULL, stack->buffer);
diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI
index f2b099d1c9..50d2ecca9d 100644
--- a/src/backend/storage/lmgr/README-SSI
+++ b/src/backend/storage/lmgr/README-SSI
@@ -373,21 +373,22 @@ index *leaf* pages needed to lock the appropriate index range. If,
 however, a search discovers that no root page has yet been created, a
 predicate lock on the index relation is required.
 
+    * Like a B-tree, GIN searches acquire predicate locks only on the
+leaf pages of entry tree. When performing an equality scan, and an
+entry has a posting tree, the posting tree root is locked instead, to
+lock only that key value. However, fastupdate=on postpones the
+insertion of tuples into index structure by temporarily storing them
+into pending list. That makes us unable to detect r-w conflicts using
+page-level locks. To cope with that, insertions to the pending list
+conflict with all scans.
+
     * GiST searches can determine that there are no matches at any
 level of the index, so we acquire predicate lock at each index
 level during a GiST search. An index insert at the leaf level can
 then be trusted to ripple up to all levels and locations where
 conflicting predicate locks may exist. In case there is a page split,
-we need to copy predicate lock from an original page to all new pages.
-
-    * GIN searches acquire predicate locks only on the leaf pages
-of entry tree and posting tree. During a page split, a predicate locks are
-copied from the original page to the new page. In the same way predicate locks
-are copied from entry tree leaf page to freshly created posting tree root.
-However, when fast update is enabled, a predicate lock on the whole index
-relation is required. Fast update postpones the insertion of tuples into index
-structure by temporarily storing them into pending list. That makes us unable
-to detect r-w conflicts using page-level locks.
+we need to copy predicate lock from the original page to all the new
+pages.
 
     * Hash index searches acquire predicate locks on the primary
 page of a bucket. It acquires a lock on both the old and new buckets
@@ -395,7 +396,6 @@ for scans that happen concurrently with page splits. During a bucket
 split, a predicate lock is copied from the primary page of an old
 bucket to the primary page of a new bucket.
 
-
     * The effects of page splits, overflows, consolidations, and
 removals must be carefully reviewed to ensure that predicate locks
 aren't "lost" during those operations, or kept with pages which could
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index c013d60371..f0baac6586 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -103,8 +103,6 @@ extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple);
 extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
 				 GinNullCategory *category);
-extern void GinCheckForSerializableConflictIn(Relation relation,
-								  HeapTuple tuple, Buffer buffer);
 
 /* gininsert.c */
 extern IndexBuildResult *ginbuild(Relation heap, Relation index,
@@ -227,7 +225,6 @@ extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno,
 					  GinStatsData *buildStats);
 extern GinBtreeStack *ginScanBeginPostingTree(GinBtree btree, Relation index, BlockNumber rootBlkno, Snapshot snapshot);
 extern void ginDataFillRoot(GinBtree btree, Page root, BlockNumber lblkno, Page lpage, BlockNumber rblkno, Page rpage);
-extern void ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno);
 
 /*
  * This is declared in ginvacuum.c, but is passed between ginVacuumItemPointers
diff --git a/src/test/isolation/expected/predicate-gin-fastupdate.out b/src/test/isolation/expected/predicate-gin-fastupdate.out
new file mode 100644
index 0000000000..7d4fa8e024
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-fastupdate.out
@@ -0,0 +1,30 @@
+Parsed test spec with 3 sessions
+
+starting permutation: r1 r2 w1 c1 w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000];
+count          
+
+2              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step w2: INSERT INTO gin_tbl SELECT array[1000,19001];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
+
+starting permutation: r1 r2 w1 c1 fastupdate_on w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000];
+count          
+
+2              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step fastupdate_on: ALTER INDEX ginidx SET (fastupdate = on);
+step w2: INSERT INTO gin_tbl SELECT array[1000,19001];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
diff --git a/src/test/isolation/expected/predicate-gin-nomatch.out b/src/test/isolation/expected/predicate-gin-nomatch.out
new file mode 100644
index 0000000000..5e733262a4
--- /dev/null
+++ b/src/test/isolation/expected/predicate-gin-nomatch.out
@@ -0,0 +1,15 @@
+Parsed test spec with 2 sessions
+
+starting permutation: r1 r2 w1 c1 w2 c2
+step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[-1];
+count          
+
+0              
+step r2: SELECT * FROM other_tbl;
+id             
+
+step w1: INSERT INTO other_tbl VALUES (42);
+step c1: COMMIT;
+step w2: INSERT INTO gin_tbl SELECT array[-1];
+ERROR:  could not serialize access due to read/write dependencies among transactions
+step c2: COMMIT;
diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out
index 4f5501f6f0..bdf8911923 100644
--- a/src/test/isolation/expected/predicate-gin.out
+++ b/src/test/isolation/expected/predicate-gin.out
@@ -737,8 +737,8 @@ step c2: commit;
 starting permutation: fu1 rxy1 rxy2fu wx1 c1 wy2fu c2
 step fu1: alter index ginidx set (fastupdate = on);
 			  commit;
-			  begin isolation level serializable; 
-  			  set enable_seqscan=off;
+			  begin isolation level serializable;
+			  set enable_seqscan=off;
 step rxy1: select count(*) from gin_tbl where p @> array[4,5];
 count          
 
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index b3a34a8688..b650e467a6 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -69,6 +69,8 @@ test: vacuum-concurrent-drop
 test: predicate-hash
 test: predicate-gist
 test: predicate-gin
+test: predicate-gin-fastupdate
+test: predicate-gin-nomatch
 test: partition-key-update-1
 test: partition-key-update-2
 test: partition-key-update-3
diff --git a/src/test/isolation/specs/predicate-gin-fastupdate.spec b/src/test/isolation/specs/predicate-gin-fastupdate.spec
new file mode 100644
index 0000000000..04b8036fc5
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-fastupdate.spec
@@ -0,0 +1,49 @@
+#
+# Test that predicate locking on a GIN index works correctly, even if
+# fastupdate is turned on concurrently.
+#
+# 0. fastupdate is off
+# 1. Session 's1' acquires predicate lock on page X
+# 2. fastupdate is turned on
+# 3. Session 's2' inserts a new tuple to the pending list
+#
+# This test tests that if the lock acquired in step 1 would conflict with
+# the scan in step 1, we detect that conflict correctly, even if fastupdate
+# was turned on in-between.
+#
+setup
+{
+  create table gin_tbl(p int4[]);
+  insert into gin_tbl select array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+
+  create table other_tbl (id int4);
+}
+
+teardown
+{
+  drop table gin_tbl;
+  drop table other_tbl;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r1" { SELECT count(*) FROM gin_tbl WHERE p @> array[1000]; }
+step "w1" { INSERT INTO other_tbl VALUES (42); }
+step "c1" { COMMIT; }
+
+session "s2"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r2" { SELECT * FROM other_tbl; }
+step "w2" { INSERT INTO gin_tbl SELECT array[1000,19001]; }
+step "c2" { COMMIT; }
+
+session "s3"
+step "fastupdate_on" { ALTER INDEX ginidx SET (fastupdate = on); }
+
+# This correctly throws serialization failure.
+permutation "r1" "r2" "w1" "c1" "w2" "c2"
+
+# But if fastupdate is turned on in the middle, we miss it.
+permutation "r1" "r2" "w1" "c1" "fastupdate_on" "w2" "c2"
diff --git a/src/test/isolation/specs/predicate-gin-nomatch.spec b/src/test/isolation/specs/predicate-gin-nomatch.spec
new file mode 100644
index 0000000000..0ad456cb14
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin-nomatch.spec
@@ -0,0 +1,35 @@
+#
+# Check that GIN index grabs an appropriate lock, even if there is no match.
+#
+setup
+{
+  create table gin_tbl(p int4[]);
+  insert into gin_tbl select array[g, g*2,g*3] from generate_series(1, 10000) g;
+  insert into gin_tbl select array[4,5,6] from generate_series(10001, 20000) g;
+  create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+
+  create table other_tbl (id int4);
+}
+
+teardown
+{
+  drop table gin_tbl;
+  drop table other_tbl;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+# Scan with no match.
+step "r1" { SELECT count(*) FROM gin_tbl WHERE p @> array[-1]; }
+step "w1" { INSERT INTO other_tbl VALUES (42); }
+step "c1" { COMMIT; }
+
+session "s2"
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; }
+step "r2" { SELECT * FROM other_tbl; }
+# Insert row that would've matched in step "r1"
+step "w2" { INSERT INTO gin_tbl SELECT array[-1]; }
+step "c2" { COMMIT; }
+
+# This should throw serialization failure.
+permutation "r1" "r2" "w1" "c1" "w2" "c2"
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
index 9f0cda8057..a967695867 100644
--- a/src/test/isolation/specs/predicate-gin.spec
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -32,8 +32,8 @@ setup
 # enable pending list for a small subset of tests
 step "fu1"	{ alter index ginidx set (fastupdate = on);
 			  commit;
-			  begin isolation level serializable; 
-  			  set enable_seqscan=off; }
+			  begin isolation level serializable;
+			  set enable_seqscan=off; }
 
 step "rxy1"	{ select count(*) from gin_tbl where p @> array[4,5]; }
 step "wx1"	{ insert into gin_tbl select g, array[5,6] from generate_series
#54Teodor Sigaev
teodor@sigaev.ru
In reply to: Teodor Sigaev (#53)
Re: [HACKERS] GSoC 2017: weekly progress reports (week 6)

Thanks to everyone, v3 is pushed.

Teodor Sigaev wrote:

I don't very happy with rootBuffer added everywhere. ginInsertItemPointers()
and О©╫О©╫ginPrepareDataScan() now take both args, rootBlkno and rootBuffer,
second could be invalid. As I can see, you did it to call
CheckForSerializableConflictIn() in ginEntryInsert(). Seems, it could be
reverted and CheckForSerializableConflictIn() should be added to
ginFindLeafPage() with searchMode = false.

Implemented, v3 is attached.

--
Teodor Sigaev E-mail: teodor@sigaev.ru
WWW: http://www.sigaev.ru/