diff -dcrpN postgresql.1/doc/src/sgml/config.sgml postgresql.2/doc/src/sgml/config.sgml
*** postgresql.1/doc/src/sgml/config.sgml	2012-04-30 08:18:06.609737757 +0200
--- postgresql.2/doc/src/sgml/config.sgml	2012-05-07 19:39:39.839157405 +0200
*************** COPY postgres_log FROM '/full/path/to/lo
*** 4958,4964 ****
          milliseconds, starting from the time the command arrives at the server
          from the client.  If <varname>log_min_error_statement</> is set to
          <literal>ERROR</> or lower, the statement that timed out will also be
!         logged.  A value of zero (the default) turns this off.
         </para>
  
         <para>
--- 4958,4967 ----
          milliseconds, starting from the time the command arrives at the server
          from the client.  If <varname>log_min_error_statement</> is set to
          <literal>ERROR</> or lower, the statement that timed out will also be
!         logged.  The timeout may happen any time, i.e. while waiting for locks
!         on database objects or in case of a large result set, during data
!         retrieval from the server after all locks were successfully acquired.
!         A value of zero (the default) turns this off.
         </para>
  
         <para>
*************** COPY postgres_log FROM '/full/path/to/lo
*** 4969,4974 ****
--- 4972,5005 ----
        </listitem>
       </varlistentry>
  
+      <varlistentry id="guc-lock-timeout" xreflabel="lock_timeout">
+       <term><varname>lock_timeout</varname> (<type>integer</type>)</term>
+       <indexterm>
+        <primary><varname>lock_timeout</> configuration parameter</primary>
+       </indexterm>
+       <listitem>
+        <para>
+         Abort any statement that tries to acquire a heavy-weight lock on rows,
+         pages, tables, indices or other objects and the lock has to wait more
+         than the specified number of milliseconds, starting from the time the
+         command arrives at the server from the client. If the statement involves
+         more than one such lock, the timeout applies to every one of them.
+         This makes the statement possibly wait for up to N * <varname>lock_timeout</>
+         time in the worst case where N is the number of locks attempted to acquire.
+         As opposed to <varname>statement_timeout</>, this timeout (and the error)
+         may only occur while waiting for locks. If <varname>log_min_error_statement</>
+         is set to <literal>ERROR</> or lower, the statement that timed out will
+         also be logged. A value of zero (the default) turns off the limitation.
+        </para>
+ 
+        <para>
+         Setting <varname>lock_timeout</> in
+         <filename>postgresql.conf</> is not recommended because it
+         affects all sessions.
+        </para>      
+       </listitem>   
+      </varlistentry>
+ 
       <varlistentry id="guc-vacuum-freeze-table-age" xreflabel="vacuum_freeze_table_age">
        <term><varname>vacuum_freeze_table_age</varname> (<type>integer</type>)</term>
        <indexterm>
diff -dcrpN postgresql.1/doc/src/sgml/ref/lock.sgml postgresql.2/doc/src/sgml/ref/lock.sgml
*** postgresql.1/doc/src/sgml/ref/lock.sgml	2012-04-16 19:57:22.229913063 +0200
--- postgresql.2/doc/src/sgml/ref/lock.sgml	2012-05-07 19:39:39.840157411 +0200
*************** LOCK [ TABLE ] [ ONLY ] <replaceable cla
*** 39,46 ****
     <literal>NOWAIT</literal> is specified, <command>LOCK
     TABLE</command> does not wait to acquire the desired lock: if it
     cannot be acquired immediately, the command is aborted and an
!    error is emitted.  Once obtained, the lock is held for the
!    remainder of the current transaction.  (There is no <command>UNLOCK
     TABLE</command> command; locks are always released at transaction
     end.)
    </para>
--- 39,49 ----
     <literal>NOWAIT</literal> is specified, <command>LOCK
     TABLE</command> does not wait to acquire the desired lock: if it
     cannot be acquired immediately, the command is aborted and an
!    error is emitted. If <varname>lock_timeout</varname> is set to a value
!    higher than 0, and the lock cannot be acquired under the specified
!    timeout value in milliseconds, the command is aborted and an error
!    is emitted. Once obtained, the lock is held for the remainder of  
!    the current transaction.  (There is no <command>UNLOCK
     TABLE</command> command; locks are always released at transaction
     end.)
    </para>
diff -dcrpN postgresql.1/doc/src/sgml/ref/select.sgml postgresql.2/doc/src/sgml/ref/select.sgml
*** postgresql.1/doc/src/sgml/ref/select.sgml	2012-04-16 19:57:22.233913109 +0200
--- postgresql.2/doc/src/sgml/ref/select.sgml	2012-05-07 19:39:39.842157422 +0200
*************** FOR SHARE [ OF <replaceable class="param
*** 1199,1204 ****
--- 1199,1211 ----
     </para>
  
     <para>
+     If <literal>NOWAIT</> option is not specified and <varname>lock_timeout</varname>
+     is set to a value higher than 0, and the lock needs to wait more than
+     the specified value in milliseconds, the command reports an error after
+     timing out, rather than waiting indefinitely.
+    </para>
+ 
+    <para>
      If specific tables are named in <literal>FOR UPDATE</literal>
      or <literal>FOR SHARE</literal>,
      then only rows coming from those tables are locked; any other
diff -dcrpN postgresql.1/src/backend/port/posix_sema.c postgresql.2/src/backend/port/posix_sema.c
*** postgresql.1/src/backend/port/posix_sema.c	2012-04-16 19:57:22.438915489 +0200
--- postgresql.2/src/backend/port/posix_sema.c	2012-05-07 19:39:39.843157427 +0200
***************
*** 24,29 ****
--- 24,30 ----
  #include "miscadmin.h"
  #include "storage/ipc.h"
  #include "storage/pg_sema.h"
+ #include "storage/timeout.h"
  
  
  #ifdef USE_NAMED_POSIX_SEMAPHORES
*************** PGSemaphoreTryLock(PGSemaphore sema)
*** 313,315 ****
--- 314,341 ----
  
  	return true;
  }
+ 
+ /*
+  * PGSemaphoreTimedLock
+  *
+  * Lock a semaphore (decrement count), blocking if count would be < 0
+  * Return if lock_timeout expired
+  */
+ void
+ PGSemaphoreTimedLock(PGSemaphore sema, bool interruptOK)
+ {
+ 	int			errStatus;
+ 
+ 	do
+ 	{
+ 		ImmediateInterruptOK = interruptOK;
+ 		CHECK_FOR_INTERRUPTS();
+ 		errStatus = sem_wait(PG_SEM_REF(sema));
+ 		ImmediateInterruptOK = false;
+ 	} while (errStatus < 0 && errno == EINTR && !get_timeout_indicator(LOCK_TIMEOUT));
+ 
+ 	if (get_timeout_indicator(LOCK_TIMEOUT))
+ 		return;
+ 	if (errStatus < 0)
+ 		elog(FATAL, "sem_wait failed: %m");
+ }
diff -dcrpN postgresql.1/src/backend/port/sysv_sema.c postgresql.2/src/backend/port/sysv_sema.c
*** postgresql.1/src/backend/port/sysv_sema.c	2012-04-16 19:57:22.438915489 +0200
--- postgresql.2/src/backend/port/sysv_sema.c	2012-05-07 19:39:39.843157427 +0200
***************
*** 30,35 ****
--- 30,36 ----
  #include "miscadmin.h"
  #include "storage/ipc.h"
  #include "storage/pg_sema.h"
+ #include "storage/timeout.h"
  
  
  #ifndef HAVE_UNION_SEMUN
*************** PGSemaphoreTryLock(PGSemaphore sema)
*** 495,497 ****
--- 496,528 ----
  
  	return true;
  }
+ 
+ /*
+  * PGSemaphoreTimedLock
+  *
+  * Lock a semaphore (decrement count), blocking if count would be < 0
+  * Return if lock_timeout expired
+  */
+ void
+ PGSemaphoreTimedLock(PGSemaphore sema, bool interruptOK)
+ {
+ 	int			errStatus;
+ 	struct sembuf sops;
+ 
+ 	sops.sem_op = -1;			/* decrement */
+ 	sops.sem_flg = 0;
+ 	sops.sem_num = sema->semNum;
+ 
+ 	do
+ 	{
+ 		ImmediateInterruptOK = interruptOK;
+ 		CHECK_FOR_INTERRUPTS();
+ 		errStatus = semop(sema->semId, &sops, 1);
+ 		ImmediateInterruptOK = false;
+ 	} while (errStatus < 0 && errno == EINTR && !get_timeout_indicator(LOCK_TIMEOUT));
+ 
+ 	if (get_timeout_indicator(LOCK_TIMEOUT))
+ 		return;
+ 	if (errStatus < 0)
+ 		elog(FATAL, "semop(id=%d) failed: %m", sema->semId);
+ }
diff -dcrpN postgresql.1/src/backend/port/win32_sema.c postgresql.2/src/backend/port/win32_sema.c
*** postgresql.1/src/backend/port/win32_sema.c	2012-04-16 19:57:22.439915501 +0200
--- postgresql.2/src/backend/port/win32_sema.c	2012-05-07 19:39:39.843157427 +0200
***************
*** 16,21 ****
--- 16,22 ----
  #include "miscadmin.h"
  #include "storage/ipc.h"
  #include "storage/pg_sema.h"
+ #include "storage/timeout.h"
  
  static HANDLE *mySemSet;		/* IDs of sema sets acquired so far */
  static int	numSems;			/* number of sema sets acquired so far */
*************** PGSemaphoreTryLock(PGSemaphore sema)
*** 205,207 ****
--- 206,263 ----
  	/* keep compiler quiet */
  	return false;
  }
+ 
+ /*
+  * PGSemaphoreTimedLock
+  *
+  * Lock a semaphore (decrement count), blocking if count would be < 0.
+  * Serve the interrupt if interruptOK is true.
+  * Return if lock_timeout expired.
+  */
+ void
+ PGSemaphoreTimedLock(PGSemaphore sema, bool interruptOK)
+ {
+ 	DWORD		ret;
+ 	HANDLE		wh[2];
+ 
+ 	wh[0] = *sema;
+ 	wh[1] = pgwin32_signal_event;
+ 
+ 	/*
+ 	 * As in other implementations of PGSemaphoreLock, we need to check for
+ 	 * cancel/die interrupts each time through the loop.  But here, there is
+ 	 * no hidden magic about whether the syscall will internally service a
+ 	 * signal --- we do that ourselves.
+ 	 */
+ 	do
+ 	{
+ 		ImmediateInterruptOK = interruptOK;
+ 		CHECK_FOR_INTERRUPTS();
+ 
+ 		errno = 0;
+ 		ret = WaitForMultipleObjectsEx(2, wh, FALSE, INFINITE, TRUE);
+ 
+ 		if (ret == WAIT_OBJECT_0)
+ 		{
+ 			/* We got it! */
+ 			return;
+ 		}
+ 		else if (ret == WAIT_OBJECT_0 + 1)
+ 		{
+ 			/* Signal event is set - we have a signal to deliver */
+ 			pgwin32_dispatch_queued_signals();
+ 			errno = EINTR;
+ 		}
+ 		else
+ 			/* Otherwise we are in trouble */
+ 			errno = EIDRM;
+ 
+ 		ImmediateInterruptOK = false;
+ 	} while (errno == EINTR && !get_timeout_indicator(LOCK_TIMEOUT));
+ 
+ 	if (get_timeout_indicator(LOCK_TIMEOUT))
+ 		return;
+ 	if (errno != 0)
+ 		ereport(FATAL,
+ 				(errmsg("could not lock semaphore: error code %d", (int) GetLastError())));
+ }
diff -dcrpN postgresql.1/src/backend/storage/lmgr/lmgr.c postgresql.2/src/backend/storage/lmgr/lmgr.c
*** postgresql.1/src/backend/storage/lmgr/lmgr.c	2012-04-16 19:57:22.459915733 +0200
--- postgresql.2/src/backend/storage/lmgr/lmgr.c	2012-05-07 19:39:39.844157432 +0200
***************
*** 19,26 ****
--- 19,29 ----
  #include "access/transam.h"
  #include "access/xact.h"
  #include "catalog/catalog.h"
+ #include "catalog/pg_database.h"
  #include "miscadmin.h"
  #include "storage/lmgr.h"
+ #include "utils/lsyscache.h"
+ #include "storage/proc.h"
  #include "storage/procarray.h"
  #include "utils/inval.h"
  
*************** LockRelationOid(Oid relid, LOCKMODE lock
*** 78,83 ****
--- 81,101 ----
  
  	res = LockAcquire(&tag, lockmode, false, false);
  
+ 	if (res == LOCKACQUIRE_NOT_AVAIL)
+ 	{
+ 		char	   *relname = get_rel_name(relid);
+ 		if (relname)
+ 			ereport(ERROR,
+ 					(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+ 						errmsg("could not obtain lock on relation \"%s\"",
+ 						relname)));
+ 		else
+ 			ereport(ERROR,
+ 					(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+ 						errmsg("could not obtain lock on relation with OID %u",
+ 						relid)));
+ 	}
+ 
  	/*
  	 * Now that we have the lock, check for invalidation messages, so that we
  	 * will update or flush any stale relcache entry before we try to use it.
*************** LockRelation(Relation relation, LOCKMODE
*** 174,179 ****
--- 192,203 ----
  
  	res = LockAcquire(&tag, lockmode, false, false);
  
+ 	if (res == LOCKACQUIRE_NOT_AVAIL)
+ 		ereport(ERROR,
+ 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+ 					errmsg("could not obtain lock on relation \"%s\"",
+ 				RelationGetRelationName(relation))));
+ 
  	/*
  	 * Now that we have the lock, check for invalidation messages; see notes
  	 * in LockRelationOid.
*************** LockRelationIdForSession(LockRelId *reli
*** 251,257 ****
  
  	SET_LOCKTAG_RELATION(tag, relid->dbId, relid->relId);
  
! 	(void) LockAcquire(&tag, lockmode, true, false);
  }
  
  /*
--- 275,294 ----
  
  	SET_LOCKTAG_RELATION(tag, relid->dbId, relid->relId);
  
! 	if (LockAcquire(&tag, lockmode, true, false) == LOCKACQUIRE_NOT_AVAIL)
! 	{
! 		char	   *relname = get_rel_name(relid->relId);
! 		if (relname)
! 			ereport(ERROR,
! 					(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 						errmsg("could not obtain lock on relation \"%s\"",
! 						relname)));
! 		else
! 			ereport(ERROR,
! 					(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 						errmsg("could not obtain lock on relation with OID %u",
! 						relid->relId)));
! 	}
  }
  
  /*
*************** LockRelationForExtension(Relation relati
*** 286,292 ****
  								relation->rd_lockInfo.lockRelId.dbId,
  								relation->rd_lockInfo.lockRelId.relId);
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  }
  
  /*
--- 323,333 ----
  								relation->rd_lockInfo.lockRelId.dbId,
  								relation->rd_lockInfo.lockRelId.relId);
  
! 	if (LockAcquire(&tag, lockmode, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain lock on index \"%s\"",
! 				RelationGetRelationName(relation))));
  }
  
  /*
*************** LockPage(Relation relation, BlockNumber
*** 320,326 ****
  					 relation->rd_lockInfo.lockRelId.relId,
  					 blkno);
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  }
  
  /*
--- 361,371 ----
  					 relation->rd_lockInfo.lockRelId.relId,
  					 blkno);
  
! 	if (LockAcquire(&tag, lockmode, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain lock on page %u of relation \"%s\"",
! 				blkno, RelationGetRelationName(relation))));
  }
  
  /*
*************** LockTuple(Relation relation, ItemPointer
*** 376,382 ****
  					  ItemPointerGetBlockNumber(tid),
  					  ItemPointerGetOffsetNumber(tid));
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  }
  
  /*
--- 421,431 ----
  					  ItemPointerGetBlockNumber(tid),
  					  ItemPointerGetOffsetNumber(tid));
  
! 	if (LockAcquire(&tag, lockmode, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain lock on row in relation \"%s\"",
! 				RelationGetRelationName(relation))));
  }
  
  /*
*************** XactLockTableInsert(TransactionId xid)
*** 430,436 ****
  
  	SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, false, false);
  }
  
  /*
--- 479,488 ----
  
  	SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 	if (LockAcquire(&tag, ExclusiveLock, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain lock on transaction with ID %u", xid)));
  }
  
  /*
*************** XactLockTableWait(TransactionId xid)
*** 474,480 ****
  
  		SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 		(void) LockAcquire(&tag, ShareLock, false, false);
  
  		LockRelease(&tag, ShareLock, false);
  
--- 526,535 ----
  
  		SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 		if (LockAcquire(&tag, ShareLock, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 			ereport(ERROR,
! 					(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 						errmsg("could not obtain lock on transaction with ID %u", xid)));
  
  		LockRelease(&tag, ShareLock, false);
  
*************** LockDatabaseObject(Oid classid, Oid obji
*** 535,541 ****
  					   objid,
  					   objsubid);
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  
  	/* Make sure syscaches are up-to-date with any changes we waited for */
  	AcceptInvalidationMessages();
--- 590,600 ----
  					   objid,
  					   objsubid);
  
! 	if (LockAcquire(&tag, lockmode, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain lock on class:object: %u:%u",
! 				classid, objid)));
  
  	/* Make sure syscaches are up-to-date with any changes we waited for */
  	AcceptInvalidationMessages();
*************** LockSharedObject(Oid classid, Oid objid,
*** 576,582 ****
  					   objid,
  					   objsubid);
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  
  	/* Make sure syscaches are up-to-date with any changes we waited for */
  	AcceptInvalidationMessages();
--- 635,645 ----
  					   objid,
  					   objsubid);
  
! 	if (LockAcquire(&tag, lockmode, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain lock on class:object: %u:%u",
! 				classid, objid)));
  
  	/* Make sure syscaches are up-to-date with any changes we waited for */
  	AcceptInvalidationMessages();
*************** LockSharedObjectForSession(Oid classid,
*** 618,624 ****
  					   objid,
  					   objsubid);
  
! 	(void) LockAcquire(&tag, lockmode, true, false);
  }
  
  /*
--- 681,702 ----
  					   objid,
  					   objsubid);
  
! 	if (LockAcquire(&tag, lockmode, true, false) == LOCKACQUIRE_NOT_AVAIL)
! 		switch(classid)
! 		{
! 		case DatabaseRelationId:
! 			ereport(ERROR,
! 					(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 						errmsg("could not obtain lock on database with ID %u",
! 					objid)));
! 			break;
! 		default:
! 			ereport(ERROR,
! 					(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 						errmsg("could not obtain lock on class:object: %u:%u",
! 					classid, objid)));
! 			break;
! 		}
  }
  
  /*
diff -dcrpN postgresql.1/src/backend/storage/lmgr/lock.c postgresql.2/src/backend/storage/lmgr/lock.c
*** postgresql.1/src/backend/storage/lmgr/lock.c	2012-05-05 13:22:40.776667140 +0200
--- postgresql.2/src/backend/storage/lmgr/lock.c	2012-05-07 19:44:13.166705948 +0200
*************** static PROCLOCK *SetupLockInTable(LockMe
*** 340,346 ****
  static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
  static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
  static void FinishStrongLockAcquire(void);
! static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
  static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
  static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
  			PROCLOCK *proclock, LockMethod lockMethodTable);
--- 340,346 ----
  static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
  static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
  static void FinishStrongLockAcquire(void);
! static int WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
  static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
  static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
  			PROCLOCK *proclock, LockMethod lockMethodTable);
*************** ProcLockHashCode(const PROCLOCKTAG *proc
*** 546,552 ****
   *	dontWait: if true, don't wait to acquire lock
   *
   * Returns one of:
!  *		LOCKACQUIRE_NOT_AVAIL		lock not available, and dontWait=true
   *		LOCKACQUIRE_OK				lock successfully acquired
   *		LOCKACQUIRE_ALREADY_HELD	incremented count for lock already held
   *
--- 546,552 ----
   *	dontWait: if true, don't wait to acquire lock
   *
   * Returns one of:
!  *		LOCKACQUIRE_NOT_AVAIL		lock not available, either dontWait=true or timeout
   *		LOCKACQUIRE_OK				lock successfully acquired
   *		LOCKACQUIRE_ALREADY_HELD	incremented count for lock already held
   *
*************** LockAcquireExtended(const LOCKTAG *lockt
*** 856,862 ****
  										 locktag->locktag_type,
  										 lockmode);
  
! 		WaitOnLock(locallock, owner);
  
  		TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
  										locktag->locktag_field2,
--- 856,862 ----
  										 locktag->locktag_type,
  										 lockmode);
  
! 		status = WaitOnLock(locallock, owner);
  
  		TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
  										locktag->locktag_field2,
*************** LockAcquireExtended(const LOCKTAG *lockt
*** 871,898 ****
  		 * done when the lock was granted to us --- see notes in WaitOnLock.
  		 */
  
! 		/*
! 		 * Check the proclock entry status, in case something in the ipc
! 		 * communication doesn't work correctly.
! 		 */
! 		if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
  		{
! 			AbortStrongLockAcquire();
! 			PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
! 			LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
! 			/* Should we retry ? */
! 			LWLockRelease(partitionLock);
! 			elog(ERROR, "LockAcquire failed");
  		}
- 		PROCLOCK_PRINT("LockAcquire: granted", proclock);
- 		LOCK_PRINT("LockAcquire: granted", lock, lockmode);
  	}
  
! 	/*
! 	 * Lock state is fully up-to-date now; if we error out after this, no
! 	 * special error cleanup is required.
! 	 */
! 	FinishStrongLockAcquire();
  
  	LWLockRelease(partitionLock);
  
--- 871,921 ----
  		 * done when the lock was granted to us --- see notes in WaitOnLock.
  		 */
  
! 		switch (status)
  		{
! 		case STATUS_OK:
! 			/*
! 			 * Check the proclock entry status, in case something in the ipc
! 			 * communication doesn't work correctly.
! 			 */
! 			if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
! 			{
! 				AbortStrongLockAcquire();
! 				PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
! 				LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
! 				/* Should we retry ? */
! 				LWLockRelease(partitionLock);
! 				elog(ERROR, "LockAcquire failed");
! 			}
! 			PROCLOCK_PRINT("LockAcquire: granted", proclock);
! 			LOCK_PRINT("LockAcquire: granted", lock, lockmode);
! 			break;
! 		case STATUS_WAITING:
! 			PROCLOCK_PRINT("LockAcquire: timed out", proclock);
! 			LOCK_PRINT("LockAcquire: timed out", lock, lockmode);
! 			break;
! 		default:
! 			elog(ERROR, "LockAcquire invalid status");
! 			break;
  		}
  	}
  
! 	if (status == STATUS_WAITING)
! 	{
! 		/*
! 		 * lock_timeout was set and WaitOnLock() indicated
! 		 * we timed out. Clean up manually.
! 		 */
! 		AbortStrongLockAcquire();
! 	}
! 	else
! 	{
! 		/*
! 		 * Lock state is fully up-to-date now; if we error out after this, no
! 		 * special error cleanup is required.
! 		 */
! 		FinishStrongLockAcquire();
! 	}
  
  	LWLockRelease(partitionLock);
  
*************** LockAcquireExtended(const LOCKTAG *lockt
*** 911,917 ****
  							   locktag->locktag_field2);
  	}
  
! 	return LOCKACQUIRE_OK;
  }
  
  /*
--- 934,940 ----
  							   locktag->locktag_field2);
  	}
  
! 	return (status == STATUS_OK ? LOCKACQUIRE_OK : LOCKACQUIRE_NOT_AVAIL);
  }
  
  /*
*************** GrantAwaitedLock(void)
*** 1429,1442 ****
   * Caller must have set MyProc->heldLocks to reflect locks already held
   * on the lockable object by this process.
   *
   * The appropriate partition lock must be held at entry.
   */
! static void
  WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
  {
  	LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
  	LockMethod	lockMethodTable = LockMethods[lockmethodid];
  	char	   *volatile new_status = NULL;
  
  	LOCK_PRINT("WaitOnLock: sleeping on lock",
  			   locallock->lock, locallock->tag.mode);
--- 1452,1471 ----
   * Caller must have set MyProc->heldLocks to reflect locks already held
   * on the lockable object by this process.
   *
+  * Result: returns value of ProcSleep()
+  *	STATUS_OK if we acquired the lock
+  *	STATUS_ERROR if not (deadlock)
+  *	STATUS_WAITING if not (timeout)
+  *
   * The appropriate partition lock must be held at entry.
   */
! static int
  WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
  {
  	LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
  	LockMethod	lockMethodTable = LockMethods[lockmethodid];
  	char	   *volatile new_status = NULL;
+ 	int		wait_status;
  
  	LOCK_PRINT("WaitOnLock: sleeping on lock",
  			   locallock->lock, locallock->tag.mode);
*************** WaitOnLock(LOCALLOCK *locallock, Resourc
*** 1478,1485 ****
  	 */
  	PG_TRY();
  	{
! 		if (ProcSleep(locallock, lockMethodTable) != STATUS_OK)
  		{
  			/*
  			 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
  			 * now.
--- 1507,1519 ----
  	 */
  	PG_TRY();
  	{
! 		wait_status = ProcSleep(locallock, lockMethodTable);
! 		switch (wait_status)
  		{
+ 		case STATUS_OK:
+ 		case STATUS_WAITING:
+ 			break;
+ 		default:
  			/*
  			 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
  			 * now.
*************** WaitOnLock(LOCALLOCK *locallock, Resourc
*** 1524,1531 ****
  		pfree(new_status);
  	}
  
! 	LOCK_PRINT("WaitOnLock: wakeup on lock",
  			   locallock->lock, locallock->tag.mode);
  }
  
  /*
--- 1558,1571 ----
  		pfree(new_status);
  	}
  
! 	if (wait_status == STATUS_OK)
! 		LOCK_PRINT("WaitOnLock: wakeup on lock",
! 			   locallock->lock, locallock->tag.mode);
! 	else if (wait_status == STATUS_WAITING)
! 		LOCK_PRINT("WaitOnLock: timeout on lock",
  			   locallock->lock, locallock->tag.mode);
+ 
+ 	return wait_status;
  }
  
  /*
*************** VirtualXactLock(VirtualTransactionId vxi
*** 3843,3849 ****
  	LWLockRelease(proc->backendLock);
  
  	/* Time to wait. */
! 	(void) LockAcquire(&tag, ShareLock, false, false);
  
  	LockRelease(&tag, ShareLock, false);
  	return true;
--- 3883,3893 ----
  	LWLockRelease(proc->backendLock);
  
  	/* Time to wait. */
! 	if (LockAcquire(&tag, ShareLock, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain lock on virtual transaction with ID %u",
! 				vxid.localTransactionId)));
  
  	LockRelease(&tag, ShareLock, false);
  	return true;
diff -dcrpN postgresql.1/src/backend/storage/lmgr/proc.c postgresql.2/src/backend/storage/lmgr/proc.c
*** postgresql.1/src/backend/storage/lmgr/proc.c	2012-05-07 19:39:07.966976479 +0200
--- postgresql.2/src/backend/storage/lmgr/proc.c	2012-05-07 19:39:39.846157444 +0200
*************** LockErrorCleanup(void)
*** 637,644 ****
  	if (lockAwaited == NULL)
  		return;
  
! 	/* Turn off the deadlock timer, if it's still running (see ProcSleep) */
  	disable_timeout(DEADLOCK_TIMEOUT, false);
  
  	/* Unlink myself from the wait queue, if on it (might not be anymore!) */
  	partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
--- 637,648 ----
  	if (lockAwaited == NULL)
  		return;
  
! 	/*
! 	 * Turn off the deadlock and lock timeout timers,
! 	 * if they are still running (see ProcSleep)
! 	 */
  	disable_timeout(DEADLOCK_TIMEOUT, false);
+ 	disable_timeout(LOCK_TIMEOUT, false);
  
  	/* Unlink myself from the wait queue, if on it (might not be anymore!) */
  	partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
*************** ProcQueueInit(PROC_QUEUE *queue)
*** 874,880 ****
   * The lock table's partition lock must be held at entry, and will be held
   * at exit.
   *
!  * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
   *
   * ASSUME: that no one will fiddle with the queue until after
   *		we release the partition lock.
--- 878,887 ----
   * The lock table's partition lock must be held at entry, and will be held
   * at exit.
   *
!  * Result:
!  *	STATUS_OK if we acquired the lock
!  *	STATUS_ERROR if not (deadlock)
!  *	STATUS_WAITING if not (timeout)
   *
   * ASSUME: that no one will fiddle with the queue until after
   *		we release the partition lock.
*************** ProcSleep(LOCALLOCK *locallock, LockMeth
*** 896,901 ****
--- 903,909 ----
  	LOCKMASK	myHeldLocks = MyProc->heldLocks;
  	bool		early_deadlock = false;
  	bool		allow_autovacuum_cancel = true;
+ 	bool		timeout_detected = false;
  	int			myWaitStatus;
  	PGPROC	   *proc;
  	int			i;
*************** ProcSleep(LOCALLOCK *locallock, LockMeth
*** 1037,1044 ****
  		elog(FATAL, "could not set timer for process wakeup");
  
  	/*
! 	 * If someone wakes us between LWLockRelease and PGSemaphoreLock,
! 	 * PGSemaphoreLock will not block.	The wakeup is "saved" by the semaphore
  	 * implementation.	While this is normally good, there are cases where a
  	 * saved wakeup might be leftover from a previous operation (for example,
  	 * we aborted ProcWaitForSignal just before someone did ProcSendSignal).
--- 1045,1058 ----
  		elog(FATAL, "could not set timer for process wakeup");
  
  	/*
! 	 * Queue the timer for lock timeout, too.
! 	 */
! 	if (!enable_timeout(LOCK_TIMEOUT, LockTimeout))
! 		elog(FATAL, "could not set timer for process wakeup");
! 
! 	/*
! 	 * If someone wakes us between LWLockRelease and PGSemaphoreTimedLock,
! 	 * PGSemaphoreTimedLock will not block.	The wakeup is "saved" by the semaphore
  	 * implementation.	While this is normally good, there are cases where a
  	 * saved wakeup might be leftover from a previous operation (for example,
  	 * we aborted ProcWaitForSignal just before someone did ProcSendSignal).
*************** ProcSleep(LOCALLOCK *locallock, LockMeth
*** 1055,1061 ****
  	 */
  	do
  	{
! 		PGSemaphoreLock(&MyProc->sem, true);
  
  		/*
  		 * waitStatus could change from STATUS_WAITING to something else
--- 1069,1080 ----
  	 */
  	do
  	{
! 		PGSemaphoreTimedLock(&MyProc->sem, true);
! 
! 		/* Check and keep the lock timeout indicator for later checks */
! 		timeout_detected = get_timeout_indicator(LOCK_TIMEOUT);
! 		if (timeout_detected)
! 			break;
  
  		/*
  		 * waitStatus could change from STATUS_WAITING to something else
*************** ProcSleep(LOCALLOCK *locallock, LockMeth
*** 1184,1195 ****
  	} while (myWaitStatus == STATUS_WAITING);
  
  	/*
! 	 * Disable the timer, if it's still running
  	 */
  	if (!disable_timeout(DEADLOCK_TIMEOUT, false))
  		elog(FATAL, "could not disable timer for process wakeup");
  
  	/*
  	 * Re-acquire the lock table's partition lock.  We have to do this to hold
  	 * off cancel/die interrupts before we can mess with lockAwaited (else we
  	 * might have a missed or duplicated locallock update).
--- 1203,1220 ----
  	} while (myWaitStatus == STATUS_WAITING);
  
  	/*
! 	 * Disable the deadlock timer, if it's still running
  	 */
  	if (!disable_timeout(DEADLOCK_TIMEOUT, false))
  		elog(FATAL, "could not disable timer for process wakeup");
  
  	/*
+ 	 * Disable the lock timeout timer, if it's still running
+ 	 */
+ 	if (!disable_timeout(LOCK_TIMEOUT, false))
+ 		elog(FATAL, "could not disable timer for process wakeup");
+ 
+ 	/*
  	 * Re-acquire the lock table's partition lock.  We have to do this to hold
  	 * off cancel/die interrupts before we can mess with lockAwaited (else we
  	 * might have a missed or duplicated locallock update).
*************** ProcSleep(LOCALLOCK *locallock, LockMeth
*** 1197,1202 ****
--- 1222,1236 ----
  	LWLockAcquire(partitionLock, LW_EXCLUSIVE);
  
  	/*
+ 	 * If we're in timeout, so:
+ 	 *	1. we're not waiting anymore and
+ 	 *	2. we're not the one that the lock will be granted to,
+ 	 * remove ourselves from the wait queue.
+ 	 */
+ 	if (timeout_detected)
+ 		RemoveFromWaitQueue(MyProc, hashcode);
+ 
+ 	/*
  	 * We no longer want LockErrorCleanup to do anything.
  	 */
  	lockAwaited = NULL;
*************** ProcSleep(LOCALLOCK *locallock, LockMeth
*** 1210,1217 ****
  	/*
  	 * We don't have to do anything else, because the awaker did all the
  	 * necessary update of the lock table and MyProc.
  	 */
! 	return MyProc->waitStatus;
  }
  
  
--- 1244,1253 ----
  	/*
  	 * We don't have to do anything else, because the awaker did all the
  	 * necessary update of the lock table and MyProc.
+ 	 * RemoveFromWaitQueue() have set MyProc->waitStatus = STATUS_ERROR,
+ 	 * we need to distinguish this case.
  	 */
! 	return (timeout_detected ? STATUS_WAITING : MyProc->waitStatus);
  }
  
  
diff -dcrpN postgresql.1/src/backend/storage/lmgr/timeout.c postgresql.2/src/backend/storage/lmgr/timeout.c
*** postgresql.1/src/backend/storage/lmgr/timeout.c	2012-05-07 19:39:07.966976479 +0200
--- postgresql.2/src/backend/storage/lmgr/timeout.c	2012-05-07 19:39:39.846157444 +0200
***************
*** 34,39 ****
--- 34,40 ----
  /* GUC variables */
  int			DeadlockTimeout = 1000;
  int			StatementTimeout = 0;
+ int			LockTimeout = 0;
  
  /*
   * This is used by ProcSleep() in proc.c
*************** static void InitDeadLock(TimestampTz sta
*** 50,55 ****
--- 51,60 ----
  static void DestroyDeadLock(bool keep_indicator);
  static bool CheckDeadLock(void);
  
+ static void InitLockTimeout(TimestampTz start_time, TimestampTz fin_time);
+ static void DestroyLockTimeout(bool keep_indicator);
+ static bool CheckLockTimeout(void);
+ 
  static void InitStatementTimeout(TimestampTz start_time, TimestampTz fin_time);
  static void DestroyStatementTimeout(bool keep_indicator);
  static bool CheckStatementTimeout(void);
*************** static timeout_params base_timeouts[TIME
*** 98,103 ****
--- 103,115 ----
  	},
  
  	{
+ 		LOCK_TIMEOUT, false, false,
+ 		InitLockTimeout, DestroyLockTimeout,
+ 		CheckLockTimeout, GetCurrentTimestamp,
+ 		0
+ 	},
+ 
+ 	{
  		STATEMENT_TIMEOUT, false, false,
  		InitStatementTimeout, DestroyStatementTimeout,
  		CheckStatementTimeout, GetCurrentStatementStartTimestamp,
*************** check_done:
*** 380,385 ****
--- 392,427 ----
  	return true;
  }
  
+ /*
+  * Functions to manage lock timeout
+  */
+ 
+ static void
+ InitLockTimeout(TimestampTz start_time, TimestampTz fin_time)
+ {
+ 	InitTimeout(LOCK_TIMEOUT, start_time, fin_time);
+ }
+ 
+ static void
+ DestroyLockTimeout(bool keep_indicator)
+ {
+ 	DestroyTimeout(LOCK_TIMEOUT, keep_indicator);
+ }
+ 
+ static bool
+ CheckLockTimeout(void)
+ {
+ 	TimestampTz now;
+ 
+ 	now = GetCurrentTimestamp();
+ 
+ 	if (now < base_timeouts[LOCK_TIMEOUT].fin_time)
+ 		return false;
+ 
+ 	base_timeouts[LOCK_TIMEOUT].indicator = true;
+ 	return true;
+ }
+ 
  /*
   * Functions to manage statement timeout
   */
diff -dcrpN postgresql.1/src/backend/utils/adt/lockfuncs.c postgresql.2/src/backend/utils/adt/lockfuncs.c
*** postgresql.1/src/backend/utils/adt/lockfuncs.c	2012-04-16 19:57:22.473915895 +0200
--- postgresql.2/src/backend/utils/adt/lockfuncs.c	2012-05-07 19:39:39.848157456 +0200
*************** pg_advisory_lock_int8(PG_FUNCTION_ARGS)
*** 421,427 ****
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, true, false);
  
  	PG_RETURN_VOID();
  }
--- 421,431 ----
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	if (LockAcquire(&tag, ExclusiveLock, true, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain exclusive advisory lock on value %llu",
! 								(long long)key)));
  
  	PG_RETURN_VOID();
  }
*************** pg_advisory_xact_lock_int8(PG_FUNCTION_A
*** 438,444 ****
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, false, false);
  
  	PG_RETURN_VOID();
  }
--- 442,452 ----
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	if (LockAcquire(&tag, ExclusiveLock, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain exclusive advisory lock on value %llu",
! 								(long long)key)));
  
  	PG_RETURN_VOID();
  }
*************** pg_advisory_lock_shared_int8(PG_FUNCTION
*** 454,460 ****
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	(void) LockAcquire(&tag, ShareLock, true, false);
  
  	PG_RETURN_VOID();
  }
--- 462,472 ----
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	if (LockAcquire(&tag, ShareLock, true, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain shared advisory lock on value %llu",
! 								(long long)key)));
  
  	PG_RETURN_VOID();
  }
*************** pg_advisory_xact_lock_shared_int8(PG_FUN
*** 471,477 ****
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	(void) LockAcquire(&tag, ShareLock, false, false);
  
  	PG_RETURN_VOID();
  }
--- 483,493 ----
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	if (LockAcquire(&tag, ShareLock, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain shared advisory lock on value %llu",
! 								(long long)key)));
  
  	PG_RETURN_VOID();
  }
*************** pg_advisory_lock_int4(PG_FUNCTION_ARGS)
*** 604,610 ****
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, true, false);
  
  	PG_RETURN_VOID();
  }
--- 620,630 ----
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	if (LockAcquire(&tag, ExclusiveLock, true, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain exclusive advisory lock on values %u:%u",
! 								key1, key2)));
  
  	PG_RETURN_VOID();
  }
*************** pg_advisory_xact_lock_int4(PG_FUNCTION_A
*** 622,628 ****
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, false, false);
  
  	PG_RETURN_VOID();
  }
--- 642,652 ----
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	if (LockAcquire(&tag, ExclusiveLock, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain exclusive advisory lock on values %u:%u",
! 								key1, key2)));
  
  	PG_RETURN_VOID();
  }
*************** pg_advisory_lock_shared_int4(PG_FUNCTION
*** 639,645 ****
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	(void) LockAcquire(&tag, ShareLock, true, false);
  
  	PG_RETURN_VOID();
  }
--- 663,673 ----
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	if (LockAcquire(&tag, ShareLock, true, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain shared advisory lock on values %u:%u",
! 								key1, key2)));
  
  	PG_RETURN_VOID();
  }
*************** pg_advisory_xact_lock_shared_int4(PG_FUN
*** 657,663 ****
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	(void) LockAcquire(&tag, ShareLock, false, false);
  
  	PG_RETURN_VOID();
  }
--- 685,695 ----
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	if (LockAcquire(&tag, ShareLock, false, false) == LOCKACQUIRE_NOT_AVAIL)
! 		ereport(ERROR,
! 				(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
! 					errmsg("could not obtain shared advisory lock on values %u:%u",
! 								key1, key2)));
  
  	PG_RETURN_VOID();
  }
diff -dcrpN postgresql.1/src/backend/utils/misc/guc.c postgresql.2/src/backend/utils/misc/guc.c
*** postgresql.1/src/backend/utils/misc/guc.c	2012-05-07 19:39:07.970976505 +0200
--- postgresql.2/src/backend/utils/misc/guc.c	2012-05-07 19:39:39.852157478 +0200
*************** static struct config_int ConfigureNamesI
*** 1861,1866 ****
--- 1861,1877 ----
  	},
  
  	{
+ 		{"lock_timeout", PGC_USERSET, CLIENT_CONN_STATEMENT,
+ 			gettext_noop("Sets the maximum allowed timeout for any lock taken by a statement."),
+ 			gettext_noop("A value of 0 turns off the timeout."),
+ 			GUC_UNIT_MS
+ 		},
+ 		&LockTimeout,
+ 		0, 0, INT_MAX,
+ 		NULL, NULL, NULL
+ 	},
+ 
+ 	{
  		{"vacuum_freeze_min_age", PGC_USERSET, CLIENT_CONN_STATEMENT,
  			gettext_noop("Minimum age at which VACUUM should freeze a table row."),
  			NULL
diff -dcrpN postgresql.1/src/backend/utils/misc/postgresql.conf.sample postgresql.2/src/backend/utils/misc/postgresql.conf.sample
*** postgresql.1/src/backend/utils/misc/postgresql.conf.sample	2012-04-30 08:18:06.651737938 +0200
--- postgresql.2/src/backend/utils/misc/postgresql.conf.sample	2012-05-07 19:39:39.853157484 +0200
***************
*** 528,533 ****
--- 528,536 ----
  #------------------------------------------------------------------------------
  
  #deadlock_timeout = 1s
+ #lock_timeout = 0			# timeout value for heavy-weight locks
+ 					# taken by statements. 0 disables timeout
+ 					# unit in milliseconds, default is 0
  #max_locks_per_transaction = 64		# min 10
  					# (change requires restart)
  # Note:  Each lock table slot uses ~270 bytes of shared memory, and there are
diff -dcrpN postgresql.1/src/include/storage/pg_sema.h postgresql.2/src/include/storage/pg_sema.h
*** postgresql.1/src/include/storage/pg_sema.h	2012-04-16 19:57:22.672918205 +0200
--- postgresql.2/src/include/storage/pg_sema.h	2012-05-07 19:39:39.854157490 +0200
*************** extern void PGSemaphoreUnlock(PGSemaphor
*** 80,83 ****
--- 80,89 ----
  /* Lock a semaphore only if able to do so without blocking */
  extern bool PGSemaphoreTryLock(PGSemaphore sema);
  
+ /*
+  * Lock a semaphore (decrement count), blocking for at most
+  * "lock_timeout" milliseconds if count would be < 0
+  */
+ extern void PGSemaphoreTimedLock(PGSemaphore sema, bool interruptOK);
+ 
  #endif   /* PG_SEMA_H */
diff -dcrpN postgresql.1/src/include/storage/timeout.h postgresql.2/src/include/storage/timeout.h
*** postgresql.1/src/include/storage/timeout.h	2012-05-07 19:39:07.971976511 +0200
--- postgresql.2/src/include/storage/timeout.h	2012-05-07 19:39:39.854157490 +0200
***************
*** 19,27 ****
--- 19,29 ----
  /* configurable options */
  extern int	DeadlockTimeout;
  extern int	StatementTimeout;
+ extern int	LockTimeout;
  
  typedef enum TimeoutName {
  	DEADLOCK_TIMEOUT,
+ 	LOCK_TIMEOUT,
  	STATEMENT_TIMEOUT,
  	STANDBY_DEADLOCK_TIMEOUT,
  	STANDBY_TIMEOUT,
diff -dcrpN postgresql.1/src/test/regress/expected/prepared_xacts.out postgresql.2/src/test/regress/expected/prepared_xacts.out
*** postgresql.1/src/test/regress/expected/prepared_xacts.out	2012-04-16 19:57:22.776919413 +0200
--- postgresql.2/src/test/regress/expected/prepared_xacts.out	2012-05-07 19:39:39.855157495 +0200
*************** set statement_timeout to 2000;
*** 198,203 ****
--- 198,210 ----
  SELECT * FROM pxtest3;
  ERROR:  canceling statement due to statement timeout
  reset statement_timeout;
+ -- pxtest3 should be locked because of the pending DROP
+ set lock_timeout to 2000;
+ SELECT * FROM pxtest3;
+ ERROR:  could not obtain lock on relation "pxtest3"
+ LINE 1: SELECT * FROM pxtest3;
+                       ^
+ reset lock_timeout;
  -- Disconnect, we will continue testing in a different backend
  \c -
  -- There should still be two prepared transactions
*************** set statement_timeout to 2000;
*** 213,218 ****
--- 220,232 ----
  SELECT * FROM pxtest3;
  ERROR:  canceling statement due to statement timeout
  reset statement_timeout;
+ -- pxtest3 should be locked because of the pending DROP
+ set lock_timeout to 2000;
+ SELECT * FROM pxtest3;
+ ERROR:  could not obtain lock on relation "pxtest3"
+ LINE 1: SELECT * FROM pxtest3;
+                       ^
+ reset lock_timeout;
  -- Commit table creation
  COMMIT PREPARED 'regress-one';
  \d pxtest2
diff -dcrpN postgresql.1/src/test/regress/sql/prepared_xacts.sql postgresql.2/src/test/regress/sql/prepared_xacts.sql
*** postgresql.1/src/test/regress/sql/prepared_xacts.sql	2012-04-16 19:57:22.796919644 +0200
--- postgresql.2/src/test/regress/sql/prepared_xacts.sql	2012-05-07 19:39:39.855157495 +0200
*************** set statement_timeout to 2000;
*** 126,131 ****
--- 126,136 ----
  SELECT * FROM pxtest3;
  reset statement_timeout;
  
+ -- pxtest3 should be locked because of the pending DROP
+ set lock_timeout to 2000;
+ SELECT * FROM pxtest3;
+ reset lock_timeout;
+ 
  -- Disconnect, we will continue testing in a different backend
  \c -
  
*************** set statement_timeout to 2000;
*** 137,142 ****
--- 142,152 ----
  SELECT * FROM pxtest3;
  reset statement_timeout;
  
+ -- pxtest3 should be locked because of the pending DROP
+ set lock_timeout to 2000;
+ SELECT * FROM pxtest3;
+ reset lock_timeout;
+ 
  -- Commit table creation
  COMMIT PREPARED 'regress-one';
  \d pxtest2
