diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index ec6240fbae..cc872a9990 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -191,18 +191,18 @@ static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
 #define FAST_PATH_LOCKNUMBER_OFFSET		1
 #define FAST_PATH_MASK					((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 #define FAST_PATH_GET_BITS(proc, n) \
-	(((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
+	((proc)->fpInfo[n].bits & FAST_PATH_MASK)
 #define FAST_PATH_BIT_POSITION(n, l) \
 	(AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
 	 AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
 	 AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
-	 ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
+	 ((l) - FAST_PATH_LOCKNUMBER_OFFSET))
 #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
-	 (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
+	 (proc)->fpInfo[n].bits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
-	 (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
+	 (proc)->fpInfo[n].bits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
-	 ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
+	 ((proc)->fpInfo[n].bits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
 /*
  * The fast-path lock mechanism is concerned only with relation locks on
@@ -2603,7 +2603,7 @@ FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
 	{
 		if (FAST_PATH_GET_BITS(MyProc, f) == 0)
 			unused_slot = f;
-		else if (MyProc->fpRelId[f] == relid)
+		else if (MyProc->fpInfo[f].relid == relid)
 		{
 			Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
 			FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
@@ -2614,7 +2614,7 @@ FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
 	/* If no existing entry, use any empty slot. */
 	if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
 	{
-		MyProc->fpRelId[unused_slot] = relid;
+		MyProc->fpInfo[unused_slot].relid = relid;
 		FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
 		++FastPathLocalUseCount;
 		return true;
@@ -2638,7 +2638,7 @@ FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
 	FastPathLocalUseCount = 0;
 	for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
 	{
-		if (MyProc->fpRelId[f] == relid
+		if (MyProc->fpInfo[f].relid == relid
 			&& FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
 		{
 			Assert(!result);
@@ -2706,7 +2706,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
 			uint32		lockmode;
 
 			/* Look for an allocated slot matching the given relid. */
-			if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
+			if (relid != proc->fpInfo[f].relid || FAST_PATH_GET_BITS(proc, f) == 0)
 				continue;
 
 			/* Find or create lock object. */
@@ -2764,7 +2764,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
 		uint32		lockmode;
 
 		/* Look for an allocated slot matching the given relid. */
-		if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
+		if (relid != MyProc->fpInfo[f].relid || FAST_PATH_GET_BITS(MyProc, f) == 0)
 			continue;
 
 		/* If we don't have a lock of the given mode, forget it! */
@@ -2946,7 +2946,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 				uint32		lockmask;
 
 				/* Look for an allocated slot matching the given relid. */
-				if (relid != proc->fpRelId[f])
+				if (relid != proc->fpInfo[f].relid)
 					continue;
 				lockmask = FAST_PATH_GET_BITS(proc, f);
 				if (!lockmask)
@@ -3622,7 +3622,7 @@ GetLockStatusData(void)
 
 			instance = &data->locks[el];
 			SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
-								 proc->fpRelId[f]);
+								 proc->fpInfo[f].relid);
 			instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
 			instance->waitLockMode = NoLock;
 			instance->backend = proc->backendId;
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index ef74f32693..e22259e657 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -82,7 +82,13 @@ struct XidCache
  * rather than the main lock table.  This eases contention on the lock
  * manager LWLocks.  See storage/lmgr/README for additional details.
  */
-#define		FP_LOCK_SLOTS_PER_BACKEND 16
+#define		FP_LOCK_SLOTS_PER_BACKEND 64
+
+typedef struct FPLockSlot
+{
+	uint8		bits;
+	Oid			relid;
+} FPLockSlot;
 
 /*
  * An invalid pgprocno.  Must be larger than the maximum number of PGPROC
@@ -288,8 +294,7 @@ struct PGPROC
 
 	/* Lock manager data, recording fast-path locks taken by this backend. */
 	LWLock		fpInfoLock;		/* protects per-backend fast-path state */
-	uint64		fpLockBits;		/* lock modes held for each fast-path slot */
-	Oid			fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
+	FPLockSlot	fpInfo[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
 	bool		fpVXIDLock;		/* are we holding a fast-path VXID lock? */
 	LocalTransactionId fpLocalTransactionId;	/* lxid for fast-path VXID
 												 * lock */
