diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index ec6240fbae..77441d19d1 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -191,18 +191,18 @@ static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
 #define FAST_PATH_LOCKNUMBER_OFFSET		1
 #define FAST_PATH_MASK					((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 #define FAST_PATH_GET_BITS(proc, n) \
-	(((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
+	((proc)->fpInfo[n].bits & FAST_PATH_MASK)
 #define FAST_PATH_BIT_POSITION(n, l) \
 	(AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
 	 AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
 	 AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
-	 ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
+	 ((l) - FAST_PATH_LOCKNUMBER_OFFSET))
 #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
-	 (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
+	 (proc)->fpInfo[n].bits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
-	 (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
+	 (proc)->fpInfo[n].bits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
-	 ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
+	 ((proc)->fpInfo[n].bits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
 /*
  * The fast-path lock mechanism is concerned only with relation locks on
@@ -2596,14 +2596,14 @@ static bool
 FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
 {
 	uint32		f;
-	uint32		unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
+	uint32		unused_slot = MyProc->fpNextSlot;
 
 	/* Scan for existing entry for this relid, remembering empty slot. */
-	for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
+	for (f = 0; f < MyProc->fpNextSlot; f++)
 	{
 		if (FAST_PATH_GET_BITS(MyProc, f) == 0)
-			unused_slot = f;
-		else if (MyProc->fpRelId[f] == relid)
+			unused_slot = Min(unused_slot, f);
+		else if (MyProc->fpInfo[f].relid == relid)
 		{
 			Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
 			FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
@@ -2614,7 +2614,8 @@ FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
 	/* If no existing entry, use any empty slot. */
 	if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
 	{
-		MyProc->fpRelId[unused_slot] = relid;
+		MyProc->fpNextSlot = Max(MyProc->fpNextSlot, unused_slot + 1);
+		MyProc->fpInfo[unused_slot].relid = relid;
 		FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
 		++FastPathLocalUseCount;
 		return true;
@@ -2634,21 +2635,30 @@ FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
 {
 	uint32		f;
 	bool		result = false;
+	int			lastSlot = -1;
 
 	FastPathLocalUseCount = 0;
-	for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
+	for (f = 0; f < MyProc->fpNextSlot; f++)
 	{
-		if (MyProc->fpRelId[f] == relid
+		if (MyProc->fpInfo[f].relid == relid
 			&& FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
 		{
 			Assert(!result);
 			FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
 			result = true;
 			/* we continue iterating so as to update FastPathLocalUseCount */
+
+			/* XXX could we stop iterating, maybe after updating the count? */
 		}
 		if (FAST_PATH_GET_BITS(MyProc, f) != 0)
+		{
 			++FastPathLocalUseCount;
+			lastSlot = f;
+		}
 	}
+
+	MyProc->fpNextSlot = (lastSlot + 1);
+
 	return result;
 }
 
@@ -2701,12 +2711,12 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
 			continue;
 		}
 
-		for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
+		for (f = 0; f < proc->fpNextSlot; f++)
 		{
 			uint32		lockmode;
 
 			/* Look for an allocated slot matching the given relid. */
-			if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
+			if (relid != proc->fpInfo[f].relid || FAST_PATH_GET_BITS(proc, f) == 0)
 				continue;
 
 			/* Find or create lock object. */
@@ -2759,12 +2769,12 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
 
 	LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
 
-	for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
+	for (f = 0; f < MyProc->fpNextSlot; f++)
 	{
 		uint32		lockmode;
 
 		/* Look for an allocated slot matching the given relid. */
-		if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
+		if (relid != MyProc->fpInfo[f].relid || FAST_PATH_GET_BITS(MyProc, f) == 0)
 			continue;
 
 		/* If we don't have a lock of the given mode, forget it! */
@@ -2941,12 +2951,12 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 				continue;
 			}
 
-			for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
+			for (f = 0; f < proc->fpNextSlot; f++)
 			{
 				uint32		lockmask;
 
 				/* Look for an allocated slot matching the given relid. */
-				if (relid != proc->fpRelId[f])
+				if (relid != proc->fpInfo[f].relid)
 					continue;
 				lockmask = FAST_PATH_GET_BITS(proc, f);
 				if (!lockmask)
@@ -3604,7 +3614,7 @@ GetLockStatusData(void)
 
 		LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
 
-		for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
+		for (f = 0; f < proc->fpNextSlot; ++f)
 		{
 			LockInstanceData *instance;
 			uint32		lockbits = FAST_PATH_GET_BITS(proc, f);
@@ -3622,7 +3632,7 @@ GetLockStatusData(void)
 
 			instance = &data->locks[el];
 			SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
-								 proc->fpRelId[f]);
+								 proc->fpInfo[f].relid);
 			instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
 			instance->waitLockMode = NoLock;
 			instance->backend = proc->backendId;
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index ef74f32693..8e1552dd3b 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -82,7 +82,13 @@ struct XidCache
  * rather than the main lock table.  This eases contention on the lock
  * manager LWLocks.  See storage/lmgr/README for additional details.
  */
-#define		FP_LOCK_SLOTS_PER_BACKEND 16
+#define		FP_LOCK_SLOTS_PER_BACKEND 64
+
+typedef struct FPLockSlot
+{
+	uint8		bits;
+	Oid			relid;
+} FPLockSlot;
 
 /*
  * An invalid pgprocno.  Must be larger than the maximum number of PGPROC
@@ -288,8 +294,8 @@ struct PGPROC
 
 	/* Lock manager data, recording fast-path locks taken by this backend. */
 	LWLock		fpInfoLock;		/* protects per-backend fast-path state */
-	uint64		fpLockBits;		/* lock modes held for each fast-path slot */
-	Oid			fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
+	int			fpNextSlot;		/* the slot after the last used slot */
+	FPLockSlot	fpInfo[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
 	bool		fpVXIDLock;		/* are we holding a fast-path VXID lock? */
 	LocalTransactionId fpLocalTransactionId;	/* lxid for fast-path VXID
 												 * lock */
