ProcArrayLock contention
I've been playing with the attached patch, which adds an additional
light-weight lock mode, LW_SHARED2. LW_SHARED2 conflicts with
LW_SHARED and LW_EXCLUSIVE, but not with itself. The patch changes
ProcArrayEndTransaction() to use this new mode. IOW, multiple
processes can commit at the same time, and multiple processes can take
snapshots at the same time, but nobody can take a snapshot while
someone else is committing.
Needless to say, I don't we'd really want to apply this, because
adding a LW_SHARED2 mode that's probably only useful for ProcArrayLock
would be a pretty ugly wart. But the results are interesting.
pgbench, scale factor 100, unlogged tables, Nate Boley's 32-core AMD
box, shared_buffers = 8GB, maintenance_work_mem = 1GB,
synchronous_commit = off, checkpoint_segments = 300,
checkpoint_timeout = 15min, checkpoint_completion_target = 0.9,
wal_writer_delay = 20ms, results are median of three five-minute runs:
#clients tps(master) tps(lwshared2)
1 657.984859 683.251582
8 4748.906750 4946.069238
32 10695.160555 17530.390578
80 7727.563437 16099.549506
That's a pretty impressive speedup, but there's trouble in paradise.
With 80 clients (but not 32 or fewer), I occasionally get the
following error:
ERROR: t_xmin is uncommitted in tuple to be updated
So it seems that there's some way in which this locking is actually
incorrect, though I'm not seeing what it is at the moment. Either
that, or there's some bug in the existing code that happens to be
exposed by this change.
The patch also produces a (much smaller) speedup with regular tables,
but it's hard to know how seriously to take that until the locking
issue is debugged.
Any ideas?
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
Attachments:
lwshared2.patchapplication/octet-stream; name=lwshared2.patchDownload
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 477982d..a555c91 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -319,7 +319,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
gxact->proc.inCommit = false;
gxact->proc.vacuumFlags = 0;
gxact->proc.lwWaiting = false;
- gxact->proc.lwExclusive = false;
+ gxact->proc.lwMode = LW_SHARED;
gxact->proc.lwWaitLink = NULL;
gxact->proc.waitLock = NULL;
gxact->proc.waitProcLock = NULL;
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 1a48485..5b60029 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -359,7 +359,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
*/
Assert(TransactionIdIsValid(proc->xid));
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ LWLockAcquire(ProcArrayLock, LW_SHARED2);
proc->xid = InvalidTransactionId;
proc->lxid = InvalidLocalTransactionId;
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 079eb29..8658c83 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -32,6 +32,7 @@
#include "storage/proc.h"
#include "storage/spin.h"
+#define LWLOCK_STATS 1
/* We use the ShmemLock spinlock to protect LWLockAssign */
extern slock_t *ShmemLock;
@@ -42,7 +43,8 @@ typedef struct LWLock
slock_t mutex; /* Protects LWLock and queue of PGPROCs */
bool releaseOK; /* T if ok to release waiters */
char exclusive; /* # of exclusive holders (0 or 1) */
- int shared; /* # of shared holders (0..MaxBackends) */
+ int shared1; /* # of shared1 holders (0..MaxBackends) */
+ int shared2; /* # of shared2 holders (0..MaxBackends) */
PGPROC *head; /* head of list of waiting PGPROCs */
PGPROC *tail; /* tail of list of waiting PGPROCs */
/* tail is undefined when head is NULL */
@@ -92,7 +94,8 @@ static bool lock_addin_request_allowed = true;
#ifdef LWLOCK_STATS
static int counts_for_pid = 0;
-static int *sh_acquire_counts;
+static int *sh1_acquire_counts;
+static int *sh2_acquire_counts;
static int *ex_acquire_counts;
static int *block_counts;
#endif
@@ -104,9 +107,9 @@ inline static void
PRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock)
{
if (Trace_lwlocks)
- elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d",
+ elog(LOG, "%s(%d): excl %d shared1 %d shared2 %d head %p rOK %d",
where, (int) lockid,
- (int) lock->exclusive, lock->shared, lock->head,
+ (int) lock->exclusive, lock->shared1, lock->shared2, lock->head,
(int) lock->releaseOK);
}
@@ -135,9 +138,9 @@ print_lwlock_stats(int code, Datum arg)
for (i = 0; i < numLocks; i++)
{
- if (sh_acquire_counts[i] || ex_acquire_counts[i] || block_counts[i])
- fprintf(stderr, "PID %d lwlock %d: shacq %u exacq %u blk %u\n",
- MyProcPid, i, sh_acquire_counts[i], ex_acquire_counts[i],
+ if (sh1_acquire_counts[i] || sh2_acquire_counts[i] || ex_acquire_counts[i] || block_counts[i])
+ fprintf(stderr, "PID %d lwlock %d: shacq1 %u shacq2 %u exacq %u blk %u\n",
+ MyProcPid, i, sh1_acquire_counts[i], sh2_acquire_counts[i], ex_acquire_counts[i],
block_counts[i]);
}
@@ -268,7 +271,8 @@ CreateLWLocks(void)
SpinLockInit(&lock->lock.mutex);
lock->lock.releaseOK = true;
lock->lock.exclusive = 0;
- lock->lock.shared = 0;
+ lock->lock.shared1 = 0;
+ lock->lock.shared2 = 0;
lock->lock.head = NULL;
lock->lock.tail = NULL;
}
@@ -336,7 +340,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
int numLocks = LWLockCounter[1];
- sh_acquire_counts = calloc(numLocks, sizeof(int));
+ sh1_acquire_counts = calloc(numLocks, sizeof(int));
+ sh2_acquire_counts = calloc(numLocks, sizeof(int));
ex_acquire_counts = calloc(numLocks, sizeof(int));
block_counts = calloc(numLocks, sizeof(int));
counts_for_pid = MyProcPid;
@@ -345,8 +350,10 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
/* Count lock acquisition attempts */
if (mode == LW_EXCLUSIVE)
ex_acquire_counts[lockid]++;
+ else if (mode == LW_SHARED)
+ sh1_acquire_counts[lockid]++;
else
- sh_acquire_counts[lockid]++;
+ sh2_acquire_counts[lockid]++;
#endif /* LWLOCK_STATS */
/*
@@ -397,7 +404,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
{
- if (lock->exclusive == 0 && lock->shared == 0)
+ if (lock->exclusive == 0 && lock->shared1 == 0 && lock->shared2 == 0)
{
lock->exclusive++;
mustwait = false;
@@ -405,11 +412,21 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
else
mustwait = true;
}
+ else if (mode == LW_SHARED)
+ {
+ if (lock->exclusive == 0 && lock->shared2 == 0)
+ {
+ lock->shared1++;
+ mustwait = false;
+ }
+ else
+ mustwait = true;
+ }
else
{
- if (lock->exclusive == 0)
+ if (lock->exclusive == 0 && lock->shared1 == 0)
{
- lock->shared++;
+ lock->shared2++;
mustwait = false;
}
else
@@ -430,7 +447,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
elog(PANIC, "cannot wait without a PGPROC structure");
proc->lwWaiting = true;
- proc->lwExclusive = (mode == LW_EXCLUSIVE);
+ proc->lwMode = mode;
proc->lwWaitLink = NULL;
if (lock->head == NULL)
lock->head = proc;
@@ -525,7 +542,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
{
- if (lock->exclusive == 0 && lock->shared == 0)
+ if (lock->exclusive == 0 && lock->shared1 == 0 && lock->shared2 == 0)
{
lock->exclusive++;
mustwait = false;
@@ -533,11 +550,21 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
else
mustwait = true;
}
+ else if (mode == LW_SHARED)
+ {
+ if (lock->exclusive == 0 && lock->shared2 == 0)
+ {
+ lock->shared1++;
+ mustwait = false;
+ }
+ else
+ mustwait = true;
+ }
else
{
- if (lock->exclusive == 0)
+ if (lock->exclusive == 0 && lock->shared1 == 0)
{
- lock->shared++;
+ lock->shared2++;
mustwait = false;
}
else
@@ -598,10 +625,12 @@ LWLockRelease(LWLockId lockid)
/* Release my hold on lock */
if (lock->exclusive > 0)
lock->exclusive--;
+ else if (lock->shared1 > 0)
+ lock->shared1--;
else
{
- Assert(lock->shared > 0);
- lock->shared--;
+ Assert(lock->shared2 > 0);
+ lock->shared2--;
}
/*
@@ -613,7 +642,8 @@ LWLockRelease(LWLockId lockid)
head = lock->head;
if (head != NULL)
{
- if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
+ if (lock->exclusive == 0 && lock->shared1 == 0 && lock->shared2 == 0
+ && lock->releaseOK)
{
/*
* Remove the to-be-awakened PGPROCs from the queue. If the front
@@ -621,10 +651,12 @@ LWLockRelease(LWLockId lockid)
* as many waiters as want shared access.
*/
proc = head;
- if (!proc->lwExclusive)
+ if (proc->lwMode != LW_EXCLUSIVE)
{
+ LWLockMode wake_mode = proc->lwMode;
+
while (proc->lwWaitLink != NULL &&
- !proc->lwWaitLink->lwExclusive)
+ proc->lwWaitLink->lwMode == wake_mode)
proc = proc->lwWaitLink;
}
/* proc is now the last PGPROC to be released */
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index eda3a98..be48a47 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -326,7 +326,7 @@ InitProcess(void)
if (IsAutoVacuumWorkerProcess())
MyProc->vacuumFlags |= PROC_IS_AUTOVACUUM;
MyProc->lwWaiting = false;
- MyProc->lwExclusive = false;
+ MyProc->lwMode = LW_SHARED;
MyProc->lwWaitLink = NULL;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
@@ -480,7 +480,7 @@ InitAuxiliaryProcess(void)
MyProc->inCommit = false;
MyProc->vacuumFlags = 0;
MyProc->lwWaiting = false;
- MyProc->lwExclusive = false;
+ MyProc->lwMode = LW_SHARED;
MyProc->lwWaitLink = NULL;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index 438a48d..3b1c1c0 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -94,7 +94,8 @@ typedef enum LWLockId
typedef enum LWLockMode
{
LW_EXCLUSIVE,
- LW_SHARED
+ LW_SHARED,
+ LW_SHARED2
} LWLockMode;
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 6e798b1..d4220a7 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -116,7 +116,7 @@ struct PGPROC
/* Info about LWLock the process is currently waiting for, if any. */
bool lwWaiting; /* true if waiting for an LW lock */
- bool lwExclusive; /* true if waiting for exclusive access */
+ LWLockMode lwMode; /* mode we're waiting for */
struct PGPROC *lwWaitLink; /* next waiter for same LW lock */
/* Info about lock the process is currently waiting for, if any. */
hi,
I've been playing with the attached patch, which adds an additional
light-weight lock mode, LW_SHARED2. LW_SHARED2 conflicts with
LW_SHARED and LW_EXCLUSIVE, but not with itself. The patch changes
ProcArrayEndTransaction() to use this new mode. IOW, multiple
processes can commit at the same time, and multiple processes can take
snapshots at the same time, but nobody can take a snapshot while
someone else is committing.Needless to say, I don't we'd really want to apply this, because
adding a LW_SHARED2 mode that's probably only useful for ProcArrayLock
would be a pretty ugly wart. But the results are interesting.
pgbench, scale factor 100, unlogged tables, Nate Boley's 32-core AMD
box, shared_buffers = 8GB, maintenance_work_mem = 1GB,
synchronous_commit = off, checkpoint_segments = 300,
checkpoint_timeout = 15min, checkpoint_completion_target = 0.9,
wal_writer_delay = 20ms, results are median of three five-minute runs:#clients tps(master) tps(lwshared2)
1 657.984859 683.251582
8 4748.906750 4946.069238
32 10695.160555 17530.390578
80 7727.563437 16099.549506That's a pretty impressive speedup, but there's trouble in paradise.
With 80 clients (but not 32 or fewer), I occasionally get the
following error:ERROR: t_xmin is uncommitted in tuple to be updated
So it seems that there's some way in which this locking is actually
incorrect, though I'm not seeing what it is at the moment. Either
that, or there's some bug in the existing code that happens to be
exposed by this change.The patch also produces a (much smaller) speedup with regular tables,
but it's hard to know how seriously to take that until the locking
issue is debugged.Any ideas?
latestCompletedXid got backward due to concurrent updates
and it fooled TransactionIdIsInProgress?
YAMAMOTO Takashi
Show quoted text
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
On Tue, Nov 8, 2011 at 4:52 AM, Robert Haas <robertmhaas@gmail.com> wrote:
With 80 clients (but not 32 or fewer), I occasionally get the
following error:ERROR: t_xmin is uncommitted in tuple to be updated
So it seems that there's some way in which this locking is actually
incorrect, though I'm not seeing what it is at the moment. Either
that, or there's some bug in the existing code that happens to be
exposed by this change.
The semantics of shared locks is that they jump the existing queue, so
this patch allows locks to be held in sequences not previously seen
when using exclusive locks.
For me, the second kind of lock should queue up normally, but then be
released en masse when possible. So queue like an exclusive, but wake
like a shared. Vaguely remember shared_queued.v1.patch
That can then produce flip-flop lock parties. A slight problem there
is that when shared locks queue they don't all queue together, a
problem which the other patch addresses, written long ago.
--
Simon Riggs http://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Training & Services
Attachments:
shared_lwlock.v1.patchapplication/octet-stream; name=shared_lwlock.v1.patchDownload
Index: src/backend/storage/lmgr/lwlock.c
===================================================================
RCS file: /home/sriggs/pg/REPOSITORY/pgsql/src/backend/storage/lmgr/lwlock.c,v
retrieving revision 1.49
diff -c -r1.49 lwlock.c
*** src/backend/storage/lmgr/lwlock.c 15 Nov 2007 21:14:38 -0000 1.49
--- src/backend/storage/lmgr/lwlock.c 4 Dec 2007 11:20:20 -0000
***************
*** 42,47 ****
--- 42,48 ----
int shared; /* # of shared holders (0..MaxBackends) */
PGPROC *head; /* head of list of waiting PGPROCs */
PGPROC *tail; /* tail of list of waiting PGPROCs */
+ PGPROC *shared_tail; /* tail of list of shared waiters */
/* tail is undefined when head is NULL */
} LWLock;
***************
*** 419,430 ****
proc->lwWaiting = true;
proc->lwExclusive = (mode == LW_EXCLUSIVE);
! proc->lwWaitLink = NULL;
! if (lock->head == NULL)
! lock->head = proc;
else
! lock->tail->lwWaitLink = proc;
! lock->tail = proc;
/* Can release the mutex now */
SpinLockRelease(&lock->mutex);
--- 420,443 ----
proc->lwWaiting = true;
proc->lwExclusive = (mode == LW_EXCLUSIVE);
! proc->lwWaitLink = NULL; /* back of the main queue */
!
! if (mode == LW_EXCLUSIVE || lock->shared_tail == NULL)
! {
! if (lock->head == NULL)
! lock->head = proc;
! else
! lock->tail->lwWaitLink = proc;
! lock->tail = proc;
! }
else
! lock->shared_tail->lwSWaitLink = proc;
!
! if (mode == LW_SHARED)
! {
! lock->shared_tail = proc;
! proc->lwSWaitLink = NULL; /* back of the shared queue */
! }
/* Can release the mutex now */
SpinLockRelease(&lock->mutex);
***************
*** 604,620 ****
if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
{
/*
! * Remove the to-be-awakened PGPROCs from the queue. If the front
! * waiter wants exclusive lock, awaken him only. Otherwise awaken
! * as many waiters as want shared access.
*/
proc = head;
if (!proc->lwExclusive)
! {
! while (proc->lwWaitLink != NULL &&
! !proc->lwWaitLink->lwExclusive)
! proc = proc->lwWaitLink;
! }
/* proc is now the last PGPROC to be released */
lock->head = proc->lwWaitLink;
proc->lwWaitLink = NULL;
--- 617,628 ----
if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
{
/*
! * Remove the next PGPROC from the queue. There is only ever one
! * to remove whether it is X or S
*/
proc = head;
if (!proc->lwExclusive)
! lock->shared_tail = NULL;
/* proc is now the last PGPROC to be released */
lock->head = proc->lwWaitLink;
proc->lwWaitLink = NULL;
***************
*** 640,647 ****
{
LOG_LWDEBUG("LWLockRelease", lockid, "release waiter");
proc = head;
! head = proc->lwWaitLink;
! proc->lwWaitLink = NULL;
proc->lwWaiting = false;
PGSemaphoreUnlock(&proc->sem);
}
--- 648,663 ----
{
LOG_LWDEBUG("LWLockRelease", lockid, "release waiter");
proc = head;
! if (proc->lwExclusive)
! {
! head = proc->lwWaitLink;
! proc->lwWaitLink = NULL;
! }
! else
! {
! head = proc->lwSWaitLink;
! proc->lwSWaitLink = NULL;
! }
proc->lwWaiting = false;
PGSemaphoreUnlock(&proc->sem);
}
Index: src/backend/storage/lmgr/proc.c
===================================================================
RCS file: /home/sriggs/pg/REPOSITORY/pgsql/src/backend/storage/lmgr/proc.c,v
retrieving revision 1.197
diff -c -r1.197 proc.c
*** src/backend/storage/lmgr/proc.c 15 Nov 2007 21:14:38 -0000 1.197
--- src/backend/storage/lmgr/proc.c 4 Dec 2007 11:18:30 -0000
***************
*** 297,302 ****
--- 297,303 ----
MyProc->lwWaiting = false;
MyProc->lwExclusive = false;
MyProc->lwWaitLink = NULL;
+ MyProc->lwSWaitLink = NULL;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
Index: src/include/storage/proc.h
===================================================================
RCS file: /home/sriggs/pg/REPOSITORY/pgsql/src/include/storage/proc.h,v
retrieving revision 1.102
diff -c -r1.102 proc.h
*** src/include/storage/proc.h 15 Nov 2007 21:14:44 -0000 1.102
--- src/include/storage/proc.h 4 Dec 2007 11:18:15 -0000
***************
*** 97,102 ****
--- 97,103 ----
bool lwWaiting; /* true if waiting for an LW lock */
bool lwExclusive; /* true if waiting for exclusive access */
struct PGPROC *lwWaitLink; /* next waiter for same LW lock */
+ struct PGPROC *lwSWaitLink; /* next waiter for same LW lock */
/* Info about lock the process is currently waiting for, if any. */
/* waitLock and waitProcLock are NULL if not currently waiting. */
shared_queued.v1.patchapplication/octet-stream; name=shared_queued.v1.patchDownload
Index: src/backend/access/transam/xact.c
===================================================================
RCS file: /projects/cvsroot/pgsql/src/backend/access/transam/xact.c,v
retrieving revision 1.221
diff -c -r1.221 xact.c
*** src/backend/access/transam/xact.c 20 Jun 2006 22:51:59 -0000 1.221
--- src/backend/access/transam/xact.c 27 Jun 2006 14:59:22 -0000
***************
*** 717,723 ****
*/
madeTCentries = (MyLastRecPtr.xrecoff != 0);
if (madeTCentries)
! LWLockAcquire(CheckpointStartLock, LW_SHARED);
/*
* We only need to log the commit in XLOG if the transaction made any
--- 717,723 ----
*/
madeTCentries = (MyLastRecPtr.xrecoff != 0);
if (madeTCentries)
! LWLockAcquire(CheckpointStartLock, LW_SHARED_QUEUED);
/*
* We only need to log the commit in XLOG if the transaction made any
Index: src/backend/storage/lmgr/lwlock.c
===================================================================
RCS file: /projects/cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v
retrieving revision 1.39
diff -c -r1.39 lwlock.c
*** src/backend/storage/lmgr/lwlock.c 21 Apr 2006 16:45:12 -0000 1.39
--- src/backend/storage/lmgr/lwlock.c 27 Jun 2006 14:59:23 -0000
***************
*** 362,378 ****
lock->releaseOK = true;
/* If I can get the lock, do so quickly. */
! if (mode == LW_EXCLUSIVE)
! {
! if (lock->exclusive == 0 && lock->shared == 0)
! {
! lock->exclusive++;
! mustwait = false;
! }
! else
! mustwait = true;
! }
! else
{
if (lock->exclusive == 0)
{
--- 362,368 ----
lock->releaseOK = true;
/* If I can get the lock, do so quickly. */
! if (mode == LW_SHARED)
{
if (lock->exclusive == 0)
{
***************
*** 382,387 ****
--- 372,396 ----
else
mustwait = true;
}
+ else
+ {
+ if (mode == LW_EXCLUSIVE) /* LW_EXCLUSIVE */
+ if (lock->exclusive == 0 && lock->shared == 0)
+ {
+ lock->exclusive++;
+ mustwait = false;
+ }
+ else
+ mustwait = true;
+ else /* LW_SHARED_QUEUED */
+ if (lock->exclusive == 0 && lock->head == NULL)
+ {
+ lock->shared++;
+ mustwait = false;
+ }
+ else
+ mustwait = true;
+ }
if (!mustwait)
break; /* got the lock */
***************
*** 484,500 ****
SpinLockAcquire(&lock->mutex);
/* If I can get the lock, do so quickly. */
! if (mode == LW_EXCLUSIVE)
! {
! if (lock->exclusive == 0 && lock->shared == 0)
! {
! lock->exclusive++;
! mustwait = false;
! }
! else
! mustwait = true;
! }
! else
{
if (lock->exclusive == 0)
{
--- 493,499 ----
SpinLockAcquire(&lock->mutex);
/* If I can get the lock, do so quickly. */
! if (mode == LW_SHARED)
{
if (lock->exclusive == 0)
{
***************
*** 504,509 ****
--- 503,527 ----
else
mustwait = true;
}
+ else
+ {
+ if (mode == LW_EXCLUSIVE) /* LW_EXCLUSIVE */
+ if (lock->exclusive == 0 && lock->shared == 0)
+ {
+ lock->exclusive++;
+ mustwait = false;
+ }
+ else
+ mustwait = true;
+ else /* LW_SHARED_QUEUED */
+ if (lock->exclusive == 0 && lock->head == NULL)
+ {
+ lock->shared++;
+ mustwait = false;
+ }
+ else
+ mustwait = true;
+ }
/* We are done updating shared state of the lock itself. */
SpinLockRelease(&lock->mutex);
***************
*** 622,627 ****
--- 640,646 ----
}
+
/*
* LWLockReleaseAll - release all currently-held locks
*
Index: src/include/storage/lwlock.h
===================================================================
RCS file: /projects/cvsroot/pgsql/src/include/storage/lwlock.h,v
retrieving revision 1.28
diff -c -r1.28 lwlock.h
*** src/include/storage/lwlock.h 8 May 2006 00:00:17 -0000 1.28
--- src/include/storage/lwlock.h 27 Jun 2006 14:59:24 -0000
***************
*** 57,63 ****
typedef enum LWLockMode
{
LW_EXCLUSIVE,
! LW_SHARED
} LWLockMode;
--- 57,64 ----
typedef enum LWLockMode
{
LW_EXCLUSIVE,
! LW_SHARED,
! LW_SHARED_QUEUED
} LWLockMode;
On Tue, Nov 8, 2011 at 2:24 AM, YAMAMOTO Takashi <yamt@mwd.biglobe.ne.jp> wrote:
latestCompletedXid got backward due to concurrent updates
and it fooled TransactionIdIsInProgress?
Ah ha! I bet that's it.
I think this could be avoided by a more sophisticated locking scheme.
Instead of waking up all the people trying to do
ProcArrayEndTransaction() and letting them all run simultaneously,
wake up one of them. That one guy goes and clears all the XID fields
and updates latestCompletedXid, and then wakes up all the others (who
now don't even need to reacquire the spinlock to "release" the lock,
because they never really held it in the first place, but yet the work
they needed done is done).
The trick is to make something like that work within the confines of
the LWLock mechanism. It strikes me that we have a number of places
in the system where it would be useful to leverage the queuing and
error handling facilities that the lwlock mechanism provides, but have
different rules for handling lock conflicts - either different lock
modes, or request combining, or whatever. lwlock.c is an awfully big
chunk of code to cut-and-paste if you need an lwlock with three modes,
or some primitive that has behavior similar to an lwlock overall but
with some differences in detail. I wonder if there's a way that we
could usefully refactor things to make that sort of thing easier.
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company