commit b0f845b9311875ed2d47e285db7d542ff26eeabb
Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date:   Tue Jan 24 14:06:05 2012 +0200

    Add welcome_message GUC, a free-form message to display at psql login.
    
    Such a banner can be useful to reminding people to be careful when connecting
    to a production database, for example.
    
    Jim Mlodgenski

diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index e55b503..72b8b46 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -558,6 +558,19 @@ SET ENABLE_SEQSCAN TO OFF;
       </listitem>
      </varlistentry>
 
+     <varlistentry id="guc-welcome-message" xreflabel="welcome_message">
+      <term><varname>welcome_message</varname> (<type>string</type>)</term>
+      <indexterm>
+       <primary><varname>welcome_message</> configuration parameter</primary>
+      </indexterm>
+      <listitem>
+       <para>
+        A message that will be displayed when connecting with
+        <application>psql</>, or any other client application that supports it.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry id="guc-tcp-keepalives-idle" xreflabel="tcp_keepalives_idle">
       <term><varname>tcp_keepalives_idle</varname> (<type>integer</type>)</term>
       <indexterm>
diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml
index 72c9384..6573901 100644
--- a/doc/src/sgml/libpq.sgml
+++ b/doc/src/sgml/libpq.sgml
@@ -1487,6 +1487,7 @@ const char *PQparameterStatus(const PGconn *conn, const char *paramName);
        <varname>server_encoding</>,
        <varname>client_encoding</>,
        <varname>application_name</>,
+       <varname>welcome_message</>,
        <varname>is_superuser</>,
        <varname>session_authorization</>,
        <varname>DateStyle</>,
@@ -1499,7 +1500,8 @@ const char *PQparameterStatus(const PGconn *conn, const char *paramName);
        <varname>standard_conforming_strings</> was not reported by releases
        before 8.1;
        <varname>IntervalStyle</> was not reported by releases before 8.4;
-       <varname>application_name</> was not reported by releases before 9.0.)
+       <varname>application_name</> was not reported by releases before 9.0;
+       <varname>welcome_message</> was not reported by releases before 9.2.)
        Note that
        <varname>server_version</>,
        <varname>server_encoding</> and
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 9fc96b2..d7f04f8 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -445,6 +445,9 @@ int			tcp_keepalives_idle;
 int			tcp_keepalives_interval;
 int			tcp_keepalives_count;
 
+/* This is not needed outside this module */
+static char *welcome_message;
+
 /*
  * These variables are all dummies that don't do anything, except in some
  * cases provide the value for SHOW to display.  The real state is elsewhere
@@ -3018,6 +3021,17 @@ static struct config_string ConfigureNamesString[] =
 		check_application_name, assign_application_name, NULL
 	},
 
+	{
+		{"welcome_message", PGC_USERSET, CONN_AUTH_SETTINGS,
+		 gettext_noop("Message displayed to the user when connecting."),
+		 NULL,
+		 GUC_REPORT | GUC_NO_SHOW_ALL
+		},
+		&welcome_message,
+		"",
+		NULL, NULL, NULL
+	},
+
 	/* End-of-list marker */
 	{
 		{NULL, 0, 0, NULL, NULL}, NULL, NULL, NULL, NULL, NULL
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index 315db46..bbe11ca 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -73,6 +73,7 @@
 					# (change requires restart)
 #bonjour_name = ''			# defaults to the computer name
 					# (change requires restart)
+#welcome_message = ''			# message displayed in psql after login
 
 # - Security and Authentication -
 
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 6c3f0aa..ee9d443 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -1653,6 +1653,8 @@ do_connect(char *dbname, char *user, char *host, char *port)
 		else
 			printf(_("You are now connected to database \"%s\" as user \"%s\".\n"),
 				   PQdb(pset.db), PQuser(pset.db));
+
+		printWelcomeMessage();
 	}
 
 	if (o_conn)
@@ -1707,6 +1709,23 @@ connection_warnings(bool in_startup)
 
 
 /*
+ * printWelcomeMessage
+ *
+ * Prints welcome_message, if any
+ */
+void
+printWelcomeMessage(void)
+{
+	const char *message;
+
+	message = PQparameterStatus(pset.db, "welcome_message");
+
+	if (message && message[0] != '\0')
+		printf(_("%s\n"), message);
+}
+
+
+/*
  * printSSLInfo
  *
  * Prints information about the current SSL connection, if SSL is in use
diff --git a/src/bin/psql/command.h b/src/bin/psql/command.h
index f0bcea0..f9dd7f5 100644
--- a/src/bin/psql/command.h
+++ b/src/bin/psql/command.h
@@ -36,6 +36,8 @@ extern bool do_pset(const char *param,
 
 extern void connection_warnings(bool in_startup);
 
+extern void printWelcomeMessage(void);
+
 extern void SyncVariables(void);
 
 extern void UnsyncVariables(void);
diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c
index 8b1864c..7062dd9 100644
--- a/src/bin/psql/startup.c
+++ b/src/bin/psql/startup.c
@@ -302,7 +302,10 @@ main(int argc, char *argv[])
 
 		connection_warnings(true);
 		if (!pset.quiet && !pset.notty)
+		{
 			printf(_("Type \"help\" for help.\n\n"));
+			printWelcomeMessage();
+		}
 		if (!pset.notty)
 			initializeInput(options.no_readline ? 0 : 1);
 		if (options.action_string)		/* -f - was used */

commit c172b7b02e6f6008d6dad66ddee8f67faf223c5b
Author: Simon Riggs <simon@2ndQuadrant.com>
Date:   Mon Jan 23 23:37:32 2012 +0000

    Resolve timing issue with logging locks for Hot Standby.
    We log AccessExclusiveLocks for replay onto standby nodes,
    but because of timing issues on ProcArray it is possible to
    log a lock that is still held by a just committed transaction
    that is very soon to be removed. To avoid any timing issue we
    avoid applying locks made by transactions with InvalidXid.
    
    Simon Riggs, bug report Tom Lane, diagnosis Pavan Deolasee

diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 6ea0a28..dc2768b 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -499,7 +499,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
 	 * Remove stale transactions, if any.
 	 */
 	ExpireOldKnownAssignedTransactionIds(running->oldestRunningXid);
-	StandbyReleaseOldLocks(running->oldestRunningXid);
+	StandbyReleaseOldLocks(running->xcnt, running->xids);
 
 	/*
 	 * If our snapshot is already valid, nothing else to do...
@@ -554,12 +554,6 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
 	 */
 
 	/*
-	 * Release any locks belonging to old transactions that are not running
-	 * according to the running-xacts record.
-	 */
-	StandbyReleaseOldLocks(running->nextXid);
-
-	/*
 	 * Nobody else is running yet, but take locks anyhow
 	 */
 	LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index c88557c..dc6833b 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -525,7 +525,9 @@ StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
 	LOCKTAG		locktag;
 
 	/* Already processed? */
-	if (TransactionIdDidCommit(xid) || TransactionIdDidAbort(xid))
+	if (!TransactionIdIsValid(xid) ||
+		TransactionIdDidCommit(xid) ||
+		TransactionIdDidAbort(xid))
 		return;
 
 	elog(trace_recovery(DEBUG4),
@@ -607,34 +609,86 @@ StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids)
 }
 
 /*
- * StandbyReleaseLocksMany
- *		Release standby locks held by XIDs < removeXid
- *
- * If keepPreparedXacts is true, keep prepared transactions even if
- * they're older than removeXid
+ * Called at end of recovery and when we see a shutdown checkpoint.
  */
-static void
-StandbyReleaseLocksMany(TransactionId removeXid, bool keepPreparedXacts)
+void
+StandbyReleaseAllLocks(void)
+{
+	ListCell   *cell,
+			   *prev,
+			   *next;
+	LOCKTAG		locktag;
+
+	elog(trace_recovery(DEBUG2), "release all standby locks");
+
+	prev = NULL;
+	for (cell = list_head(RecoveryLockList); cell; cell = next)
+	{
+		xl_standby_lock *lock = (xl_standby_lock *) lfirst(cell);
+
+		next = lnext(cell);
+
+		elog(trace_recovery(DEBUG4),
+			 "releasing recovery lock: xid %u db %u rel %u",
+			 lock->xid, lock->dbOid, lock->relOid);
+		SET_LOCKTAG_RELATION(locktag, lock->dbOid, lock->relOid);
+		if (!LockRelease(&locktag, AccessExclusiveLock, true))
+			elog(LOG,
+				 "RecoveryLockList contains entry for lock no longer recorded by lock manager: xid %u database %u relation %u",
+				 lock->xid, lock->dbOid, lock->relOid);
+		RecoveryLockList = list_delete_cell(RecoveryLockList, cell, prev);
+		pfree(lock);
+	}
+}
+
+/*
+ * StandbyReleaseOldLocks
+ *		Release standby locks held by XIDs that aren't running, as long
+ *		as they're not prepared transactions.
+ */
+void
+StandbyReleaseOldLocks(int nxids, TransactionId *xids)
 {
 	ListCell   *cell,
 			   *prev,
 			   *next;
 	LOCKTAG		locktag;
 
-	/*
-	 * Release all matching locks.
-	 */
 	prev = NULL;
 	for (cell = list_head(RecoveryLockList); cell; cell = next)
 	{
 		xl_standby_lock *lock = (xl_standby_lock *) lfirst(cell);
+		bool	remove = false;
 
 		next = lnext(cell);
 
-		if (!TransactionIdIsValid(removeXid) || TransactionIdPrecedes(lock->xid, removeXid))
+		Assert(TransactionIdIsValid(lock->xid));
+
+		if (StandbyTransactionIdIsPrepared(lock->xid))
+			remove = false;
+		else
+		{
+			int		i;
+			bool	found = false;
+
+			for (i = 0; i < nxids; i++)
+			{
+				if (lock->xid == xids[i])
+				{
+					found = true;
+					break;
+				}
+			}
+
+			/*
+			 * If its not a running transaction, remove it.
+			 */
+			if (!found)
+				remove = true;
+		}
+
+		if (remove)
 		{
-			if (keepPreparedXacts && StandbyTransactionIdIsPrepared(lock->xid))
-				continue;
 			elog(trace_recovery(DEBUG4),
 				 "releasing recovery lock: xid %u db %u rel %u",
 				 lock->xid, lock->dbOid, lock->relOid);
@@ -652,27 +706,6 @@ StandbyReleaseLocksMany(TransactionId removeXid, bool keepPreparedXacts)
 }
 
 /*
- * Called at end of recovery and when we see a shutdown checkpoint.
- */
-void
-StandbyReleaseAllLocks(void)
-{
-	elog(trace_recovery(DEBUG2), "release all standby locks");
-	StandbyReleaseLocksMany(InvalidTransactionId, false);
-}
-
-/*
- * StandbyReleaseOldLocks
- *		Release standby locks held by XIDs < removeXid, as long
- *		as they're not prepared transactions.
- */
-void
-StandbyReleaseOldLocks(TransactionId removeXid)
-{
-	StandbyReleaseLocksMany(removeXid, true);
-}
-
-/*
  * --------------------------------------------------------------------
  *		Recovery handling for Rmgr RM_STANDBY_ID
  *
@@ -813,6 +846,13 @@ standby_desc(StringInfo buf, uint8 xl_info, char *rec)
  * Later, when we apply the running xact data we must be careful to ignore
  * transactions already committed, since those commits raced ahead when
  * making WAL entries.
+ *
+ * The loose timing also means that locks may be recorded that have a
+ * zero xid, since xids are removed from procs before locks are removed.
+ * So we must prune the lock list down to ensure we hold locks only for
+ * currently running xids, performed by StandbyReleaseOldLocks().
+ * Zero xids should no longer be possible, but we may be replaying WAL
+ * from a time when they were possible.
  */
 void
 LogStandbySnapshot(TransactionId *nextXid)
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 762294a..a98dfca 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -3190,8 +3190,18 @@ GetRunningTransactionLocks(int *nlocks)
 			PGPROC	   *proc = proclock->tag.myProc;
 			PGXACT	   *pgxact = &ProcGlobal->allPgXact[proc->pgprocno];
 			LOCK	   *lock = proclock->tag.myLock;
+			TransactionId xid = pgxact->xid;
 
-			accessExclusiveLocks[index].xid = pgxact->xid;
+			/*
+			 * Don't record locks for transactions if we know they have already
+			 * issued their WAL record for commit but not yet released lock.
+			 * It is still possible that we see locks held by already complete
+			 * transactions, if they haven't yet zeroed their xids.
+			 */
+			if (!TransactionIdIsValid(xid))
+				continue;
+
+			accessExclusiveLocks[index].xid = xid;
 			accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
 			accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
 
diff --git a/src/include/storage/standby.h b/src/include/storage/standby.h
index a539ec2..1027bbc 100644
--- a/src/include/storage/standby.h
+++ b/src/include/storage/standby.h
@@ -48,7 +48,7 @@ extern void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid
 extern void StandbyReleaseLockTree(TransactionId xid,
 					   int nsubxids, TransactionId *subxids);
 extern void StandbyReleaseAllLocks(void);
-extern void StandbyReleaseOldLocks(TransactionId removeXid);
+extern void StandbyReleaseOldLocks(int nxids, TransactionId *xids);
 
 /*
  * XLOG message types

commit b8a91d9d1c7ec75aaecf13df687ec7b5b0ed35a6
Author: Simon Riggs <simon@2ndQuadrant.com>
Date:   Mon Jan 23 23:25:04 2012 +0000

    ALTER <thing> [IF EXISTS] ... allows silent DDL if required,
    e.g. ALTER FOREIGN TABLE IF EXISTS foo RENAME TO bar
    
    Pavel Stehule

diff --git a/doc/src/sgml/ref/alter_foreign_table.sgml b/doc/src/sgml/ref/alter_foreign_table.sgml
index 99e8e90..455527c 100644
--- a/doc/src/sgml/ref/alter_foreign_table.sgml
+++ b/doc/src/sgml/ref/alter_foreign_table.sgml
@@ -21,13 +21,13 @@ PostgreSQL documentation
 
  <refsynopsisdiv>
 <synopsis>
-ALTER FOREIGN TABLE <replaceable class="PARAMETER">name</replaceable>
+ALTER FOREIGN TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
     <replaceable class="PARAMETER">action</replaceable> [, ... ]
-ALTER FOREIGN TABLE <replaceable class="PARAMETER">name</replaceable>
+ALTER FOREIGN TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
     RENAME [ COLUMN ] <replaceable class="PARAMETER">column</replaceable> TO <replaceable class="PARAMETER">new_column</replaceable>
-ALTER FOREIGN TABLE <replaceable class="PARAMETER">name</replaceable>
+ALTER FOREIGN TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
     RENAME TO <replaceable class="PARAMETER">new_name</replaceable>
-ALTER FOREIGN TABLE <replaceable class="PARAMETER">name</replaceable>
+ALTER FOREIGN TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
     SET SCHEMA <replaceable class="PARAMETER">new_schema</replaceable>
 
 <phrase>where <replaceable class="PARAMETER">action</replaceable> is one of:</phrase>
@@ -76,6 +76,16 @@ ALTER FOREIGN TABLE <replaceable class="PARAMETER">name</replaceable>
    </varlistentry>
 
    <varlistentry>
+    <term><literal>IF EXISTS</literal></term>
+    <listitem>
+     <para>
+      Do not throw an error if the sequence does not exist. A notice is issued
+      in this case.
+     </para>
+    </listitem>
+   </varlistentry>
+
+   <varlistentry>
     <term><literal>SET DATA TYPE</literal></term>
     <listitem>
      <para>
diff --git a/doc/src/sgml/ref/alter_index.sgml b/doc/src/sgml/ref/alter_index.sgml
index c701d16..d210077 100644
--- a/doc/src/sgml/ref/alter_index.sgml
+++ b/doc/src/sgml/ref/alter_index.sgml
@@ -21,10 +21,10 @@ PostgreSQL documentation
 
  <refsynopsisdiv>
 <synopsis>
-ALTER INDEX <replaceable class="PARAMETER">name</replaceable> RENAME TO <replaceable class="PARAMETER">new_name</replaceable>
-ALTER INDEX <replaceable class="PARAMETER">name</replaceable> SET TABLESPACE <replaceable class="PARAMETER">tablespace_name</replaceable>
-ALTER INDEX <replaceable class="PARAMETER">name</replaceable> SET ( <replaceable class="PARAMETER">storage_parameter</replaceable> = <replaceable class="PARAMETER">value</replaceable> [, ... ] )
-ALTER INDEX <replaceable class="PARAMETER">name</replaceable> RESET ( <replaceable class="PARAMETER">storage_parameter</replaceable> [, ... ] )
+ALTER INDEX [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable> RENAME TO <replaceable class="PARAMETER">new_name</replaceable>
+ALTER INDEX [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable> SET TABLESPACE <replaceable class="PARAMETER">tablespace_name</replaceable>
+ALTER INDEX [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable> SET ( <replaceable class="PARAMETER">storage_parameter</replaceable> = <replaceable class="PARAMETER">value</replaceable> [, ... ] )
+ALTER INDEX [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable> RESET ( <replaceable class="PARAMETER">storage_parameter</replaceable> [, ... ] )
 </synopsis>
  </refsynopsisdiv>
 
@@ -38,6 +38,16 @@ ALTER INDEX <replaceable class="PARAMETER">name</replaceable> RESET ( <replaceab
   <variablelist>
 
    <varlistentry>
+    <term><literal>IF EXISTS</literal></term>
+    <listitem>
+     <para>
+      Do not throw an error if the index does not exist. A notice is issued
+      in this case.
+     </para>
+    </listitem>
+   </varlistentry>
+
+   <varlistentry>
     <term><literal>RENAME</literal></term>
     <listitem>
      <para>
diff --git a/doc/src/sgml/ref/alter_sequence.sgml b/doc/src/sgml/ref/alter_sequence.sgml
index 79795d9..6ddd461 100644
--- a/doc/src/sgml/ref/alter_sequence.sgml
+++ b/doc/src/sgml/ref/alter_sequence.sgml
@@ -23,15 +23,15 @@ PostgreSQL documentation
 
  <refsynopsisdiv>
 <synopsis>
-ALTER SEQUENCE <replaceable class="parameter">name</replaceable> [ INCREMENT [ BY ] <replaceable class="parameter">increment</replaceable> ]
+ALTER SEQUENCE [ IF EXISTS ] <replaceable class="parameter">name</replaceable> [ INCREMENT [ BY ] <replaceable class="parameter">increment</replaceable> ]
     [ MINVALUE <replaceable class="parameter">minvalue</replaceable> | NO MINVALUE ] [ MAXVALUE <replaceable class="parameter">maxvalue</replaceable> | NO MAXVALUE ]
     [ START [ WITH ] <replaceable class="parameter">start</replaceable> ]
     [ RESTART [ [ WITH ] <replaceable class="parameter">restart</replaceable> ] ]
     [ CACHE <replaceable class="parameter">cache</replaceable> ] [ [ NO ] CYCLE ]
     [ OWNED BY { <replaceable class="parameter">table</replaceable>.<replaceable class="parameter">column</replaceable> | NONE } ]
-ALTER SEQUENCE <replaceable class="parameter">name</replaceable> OWNER TO <replaceable class="PARAMETER">new_owner</replaceable>
-ALTER SEQUENCE <replaceable class="parameter">name</replaceable> RENAME TO <replaceable class="parameter">new_name</replaceable>
-ALTER SEQUENCE <replaceable class="parameter">name</replaceable> SET SCHEMA <replaceable class="parameter">new_schema</replaceable>
+ALTER SEQUENCE [ IF EXISTS ] <replaceable class="parameter">name</replaceable> OWNER TO <replaceable class="PARAMETER">new_owner</replaceable>
+ALTER SEQUENCE [ IF EXISTS ] <replaceable class="parameter">name</replaceable> RENAME TO <replaceable class="parameter">new_name</replaceable>
+ALTER SEQUENCE [ IF EXISTS ] <replaceable class="parameter">name</replaceable> SET SCHEMA <replaceable class="parameter">new_schema</replaceable>
 </synopsis>
  </refsynopsisdiv>
 
@@ -71,6 +71,16 @@ ALTER SEQUENCE <replaceable class="parameter">name</replaceable> SET SCHEMA <rep
      </varlistentry>
 
      <varlistentry>
+      <term><literal>IF EXISTS</literal></term>
+      <listitem>
+       <para>
+        Do not throw an error if the sequence does not exist. A notice is issued
+        in this case.
+       </para>
+      </listitem>
+     </varlistentry>
+
+     <varlistentry>
       <term><replaceable class="parameter">increment</replaceable></term>
       <listitem>
        <para>
diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml
index 6f1917f..951b63b 100644
--- a/doc/src/sgml/ref/alter_table.sgml
+++ b/doc/src/sgml/ref/alter_table.sgml
@@ -21,13 +21,13 @@ PostgreSQL documentation
 
  <refsynopsisdiv>
 <synopsis>
-ALTER TABLE [ ONLY ] <replaceable class="PARAMETER">name</replaceable> [ * ]
+ALTER TABLE [ IF EXISTS ] [ ONLY ] <replaceable class="PARAMETER">name</replaceable> [ * ]
     <replaceable class="PARAMETER">action</replaceable> [, ... ]
-ALTER TABLE [ ONLY ] <replaceable class="PARAMETER">name</replaceable> [ * ]
+ALTER TABLE [ IF EXISTS ] [ ONLY ] <replaceable class="PARAMETER">name</replaceable> [ * ]
     RENAME [ COLUMN ] <replaceable class="PARAMETER">column</replaceable> TO <replaceable class="PARAMETER">new_column</replaceable>
-ALTER TABLE <replaceable class="PARAMETER">name</replaceable>
+ALTER TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
     RENAME TO <replaceable class="PARAMETER">new_name</replaceable>
-ALTER TABLE <replaceable class="PARAMETER">name</replaceable>
+ALTER TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
     SET SCHEMA <replaceable class="PARAMETER">new_schema</replaceable>
 
 <phrase>where <replaceable class="PARAMETER">action</replaceable> is one of:</phrase>
@@ -110,6 +110,16 @@ ALTER TABLE <replaceable class="PARAMETER">name</replaceable>
    </varlistentry>
 
    <varlistentry>
+    <term><literal>IF EXISTS</literal></term>
+    <listitem>
+     <para>
+      Do not throw an error if the table does not exist. A notice is issued
+      in this case.
+     </para>
+    </listitem>
+   </varlistentry>
+
+   <varlistentry>
     <term><literal>SET DATA TYPE</literal></term>
     <listitem>
      <para>
diff --git a/doc/src/sgml/ref/alter_view.sgml b/doc/src/sgml/ref/alter_view.sgml
index e78176b..c518d93 100644
--- a/doc/src/sgml/ref/alter_view.sgml
+++ b/doc/src/sgml/ref/alter_view.sgml
@@ -21,13 +21,13 @@ PostgreSQL documentation
 
  <refsynopsisdiv>
 <synopsis>
-ALTER VIEW <replaceable class="parameter">name</replaceable> ALTER [ COLUMN ] <replaceable class="PARAMETER">column</replaceable> SET DEFAULT <replaceable class="PARAMETER">expression</replaceable>
-ALTER VIEW <replaceable class="parameter">name</replaceable> ALTER [ COLUMN ] <replaceable class="PARAMETER">column</replaceable> DROP DEFAULT
-ALTER VIEW <replaceable class="parameter">name</replaceable> OWNER TO <replaceable class="PARAMETER">new_owner</replaceable>
-ALTER VIEW <replaceable class="parameter">name</replaceable> RENAME TO <replaceable class="parameter">new_name</replaceable>
-ALTER VIEW <replaceable class="parameter">name</replaceable> SET SCHEMA <replaceable class="parameter">new_schema</replaceable>
-ALTER VIEW <replaceable class="parameter">name</replaceable> SET ( <replaceable class="parameter">view_option_name</replaceable> [= <replaceable class="parameter">view_option_value</replaceable>] [, ... ] )
-ALTER VIEW <replaceable class="parameter">name</replaceable> RESET ( <replaceable class="parameter">view_option_name</replaceable> [, ... ] )
+ALTER VIEW [ IF EXISTS ] <replaceable class="parameter">name</replaceable> ALTER [ COLUMN ] <replaceable class="PARAMETER">column</replaceable> SET DEFAULT <replaceable class="PARAMETER">expression</replaceable>
+ALTER VIEW [ IF EXISTS ] <replaceable class="parameter">name</replaceable> ALTER [ COLUMN ] <replaceable class="PARAMETER">column</replaceable> DROP DEFAULT
+ALTER VIEW [ IF EXISTS ] <replaceable class="parameter">name</replaceable> OWNER TO <replaceable class="PARAMETER">new_owner</replaceable>
+ALTER VIEW [ IF EXISTS ] <replaceable class="parameter">name</replaceable> RENAME TO <replaceable class="parameter">new_name</replaceable>
+ALTER VIEW [ IF EXISTS ] <replaceable class="parameter">name</replaceable> SET SCHEMA <replaceable class="parameter">new_schema</replaceable>
+ALTER VIEW [ IF EXISTS ] <replaceable class="parameter">name</replaceable> SET ( <replaceable class="parameter">view_option_name</replaceable> [= <replaceable class="parameter">view_option_value</replaceable>] [, ... ] )
+ALTER VIEW [ IF EXISTS ] <replaceable class="parameter">name</replaceable> RESET ( <replaceable class="parameter">view_option_name</replaceable> [, ... ] )
 </synopsis>
  </refsynopsisdiv>
 
@@ -66,6 +66,16 @@ ALTER VIEW <replaceable class="parameter">name</replaceable> RESET ( <replaceabl
    </varlistentry>
 
    <varlistentry>
+    <term><literal>IF EXISTS</literal></term>
+    <listitem>
+     <para>
+      Do not throw an error if the view does not exist. A notice is issued
+      in this case.
+     </para>
+    </listitem>
+   </varlistentry>
+
+   <varlistentry>
     <term><literal>SET</literal>/<literal>DROP DEFAULT</literal></term>
     <listitem>
      <para>
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index f7712a9..d3739cb 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -425,7 +425,15 @@ AlterSequence(AlterSeqStmt *stmt)
 	List	   *owned_by;
 
 	/* Open and lock sequence. */
-	relid = RangeVarGetRelid(stmt->sequence, AccessShareLock, false);
+	relid = RangeVarGetRelid(stmt->sequence, AccessShareLock, stmt->missing_ok);
+	if (relid == InvalidOid)
+	{
+		ereport(NOTICE,
+				(errmsg("relation \"%s\" does not exist, skipping",
+							stmt->sequence->relname)));
+		return;
+	}
+
 	init_sequence(relid, &elm, &seqrel);
 
 	/* allow ALTER to sequence owner only */
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index cc210f0..cb8ac67 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2310,9 +2310,18 @@ renameatt(RenameStmt *stmt)
 
 	/* lock level taken here should match renameatt_internal */
 	relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
-									 false, false,
+									 stmt->missing_ok, false,
 									 RangeVarCallbackForRenameAttribute,
 									 NULL);
+
+	if (!OidIsValid(relid))
+	{
+		ereport(NOTICE,
+				(errmsg("relation \"%s\" does not exist, skipping",
+							stmt->relation->relname)));
+		return;
+	}
+
 	renameatt_internal(relid,
 					   stmt->subname,	/* old att name */
 					   stmt->newname,	/* new att name */
@@ -2338,10 +2347,18 @@ RenameRelation(RenameStmt *stmt)
 	 * lock escalation.
 	 */
 	relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
-									 false, false,
+									 stmt->missing_ok, false,
 									 RangeVarCallbackForAlterRelation,
 									 (void *) stmt);
 
+	if (!OidIsValid(relid))
+	{
+		ereport(NOTICE,
+				(errmsg("relation \"%s\" does not exist, skipping",
+							stmt->relation->relname)));
+		return;
+	}
+
 	/* Do the work */
 	RenameRelationInternal(relid, stmt->newname);
 }
@@ -2482,7 +2499,7 @@ CheckTableNotInUse(Relation rel, const char *stmt)
 Oid
 AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
 {
-	return RangeVarGetRelidExtended(stmt->relation, lockmode, false, false,
+	return RangeVarGetRelidExtended(stmt->relation, lockmode, stmt->missing_ok, false,
 									RangeVarCallbackForAlterRelation,
 									(void *) stmt);
 }
@@ -9434,9 +9451,18 @@ AlterTableNamespace(AlterObjectSchemaStmt *stmt)
 	RangeVar   *newrv;
 
 	relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
-									 false, false,
+									 stmt->missing_ok, false,
 									 RangeVarCallbackForAlterRelation,
 									 (void *) stmt);
+
+	if (!OidIsValid(relid))
+	{
+		ereport(NOTICE,
+				(errmsg("relation \"%s\" does not exist, skipping",
+							stmt->relation->relname)));
+		return;
+	}
+
 	rel = relation_open(relid, NoLock);
 
 	oldNspOid = RelationGetNamespace(rel);
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 71da0d8..cc3168d 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -2540,6 +2540,7 @@ _copyAlterTableStmt(const AlterTableStmt *from)
 	COPY_NODE_FIELD(relation);
 	COPY_NODE_FIELD(cmds);
 	COPY_SCALAR_FIELD(relkind);
+	COPY_SCALAR_FIELD(missing_ok);
 
 	return newnode;
 }
@@ -2904,6 +2905,7 @@ _copyRenameStmt(const RenameStmt *from)
 	COPY_STRING_FIELD(subname);
 	COPY_STRING_FIELD(newname);
 	COPY_SCALAR_FIELD(behavior);
+	COPY_SCALAR_FIELD(missing_ok);
 
 	return newnode;
 }
@@ -2919,6 +2921,7 @@ _copyAlterObjectSchemaStmt(const AlterObjectSchemaStmt *from)
 	COPY_NODE_FIELD(objarg);
 	COPY_STRING_FIELD(addname);
 	COPY_STRING_FIELD(newschema);
+	COPY_SCALAR_FIELD(missing_ok);
 
 	return newnode;
 }
@@ -3222,6 +3225,7 @@ _copyAlterSeqStmt(const AlterSeqStmt *from)
 
 	COPY_NODE_FIELD(sequence);
 	COPY_NODE_FIELD(options);
+	COPY_SCALAR_FIELD(missing_ok);
 
 	return newnode;
 }
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index ba949db..2295195 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -1010,6 +1010,7 @@ _equalAlterTableStmt(const AlterTableStmt *a, const AlterTableStmt *b)
 	COMPARE_NODE_FIELD(relation);
 	COMPARE_NODE_FIELD(cmds);
 	COMPARE_SCALAR_FIELD(relkind);
+	COMPARE_SCALAR_FIELD(missing_ok);
 
 	return true;
 }
@@ -1310,6 +1311,7 @@ _equalRenameStmt(const RenameStmt *a, const RenameStmt *b)
 	COMPARE_STRING_FIELD(subname);
 	COMPARE_STRING_FIELD(newname);
 	COMPARE_SCALAR_FIELD(behavior);
+	COMPARE_SCALAR_FIELD(missing_ok);
 
 	return true;
 }
@@ -1323,6 +1325,7 @@ _equalAlterObjectSchemaStmt(const AlterObjectSchemaStmt *a, const AlterObjectSch
 	COMPARE_NODE_FIELD(objarg);
 	COMPARE_STRING_FIELD(addname);
 	COMPARE_STRING_FIELD(newschema);
+	COMPARE_SCALAR_FIELD(missing_ok);
 
 	return true;
 }
@@ -1576,6 +1579,7 @@ _equalAlterSeqStmt(const AlterSeqStmt *a, const AlterSeqStmt *b)
 {
 	COMPARE_NODE_FIELD(sequence);
 	COMPARE_NODE_FIELD(options);
+	COMPARE_SCALAR_FIELD(missing_ok);
 
 	return true;
 }
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 0ec039b..62fde67 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -1629,6 +1629,16 @@ AlterTableStmt:
 					n->relation = $3;
 					n->cmds = $4;
 					n->relkind = OBJECT_TABLE;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+		|	ALTER TABLE IF_P EXISTS relation_expr alter_table_cmds
+				{
+					AlterTableStmt *n = makeNode(AlterTableStmt);
+					n->relation = $5;
+					n->cmds = $6;
+					n->relkind = OBJECT_TABLE;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 		|	ALTER INDEX qualified_name alter_table_cmds
@@ -1637,6 +1647,16 @@ AlterTableStmt:
 					n->relation = $3;
 					n->cmds = $4;
 					n->relkind = OBJECT_INDEX;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+		|	ALTER INDEX IF_P EXISTS qualified_name alter_table_cmds
+				{
+					AlterTableStmt *n = makeNode(AlterTableStmt);
+					n->relation = $5;
+					n->cmds = $6;
+					n->relkind = OBJECT_INDEX;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 		|	ALTER SEQUENCE qualified_name alter_table_cmds
@@ -1645,6 +1665,16 @@ AlterTableStmt:
 					n->relation = $3;
 					n->cmds = $4;
 					n->relkind = OBJECT_SEQUENCE;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+		|	ALTER SEQUENCE IF_P EXISTS qualified_name alter_table_cmds
+				{
+					AlterTableStmt *n = makeNode(AlterTableStmt);
+					n->relation = $5;
+					n->cmds = $6;
+					n->relkind = OBJECT_SEQUENCE;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 		|	ALTER VIEW qualified_name alter_table_cmds
@@ -1653,6 +1683,16 @@ AlterTableStmt:
 					n->relation = $3;
 					n->cmds = $4;
 					n->relkind = OBJECT_VIEW;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+		|	ALTER VIEW IF_P EXISTS qualified_name alter_table_cmds
+				{
+					AlterTableStmt *n = makeNode(AlterTableStmt);
+					n->relation = $5;
+					n->cmds = $6;
+					n->relkind = OBJECT_VIEW;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 		;
@@ -3068,8 +3108,18 @@ AlterSeqStmt:
 					AlterSeqStmt *n = makeNode(AlterSeqStmt);
 					n->sequence = $3;
 					n->options = $4;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
+			| ALTER SEQUENCE IF_P EXISTS qualified_name SeqOptList
+				{
+					AlterSeqStmt *n = makeNode(AlterSeqStmt);
+					n->sequence = $5;
+					n->options = $6;
+					n->missing_ok = true;
+					$$ = (Node *)n;
+				}
+
 		;
 
 OptSeqOptList: SeqOptList							{ $$ = $1; }
@@ -3906,6 +3956,16 @@ AlterForeignTableStmt:
 					n->relation = $4;
 					n->cmds = $5;
 					n->relkind = OBJECT_FOREIGN_TABLE;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER FOREIGN TABLE IF_P EXISTS relation_expr alter_table_cmds
+				{
+					AlterTableStmt *n = makeNode(AlterTableStmt);
+					n->relation = $6;
+					n->cmds = $7;
+					n->relkind = OBJECT_FOREIGN_TABLE;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 		;
@@ -6417,6 +6477,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->object = $3;
 					n->objarg = $4;
 					n->newname = $7;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER COLLATION any_name RENAME TO name
@@ -6425,6 +6486,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_COLLATION;
 					n->object = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER CONVERSION_P any_name RENAME TO name
@@ -6433,6 +6495,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_CONVERSION;
 					n->object = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER DATABASE database_name RENAME TO database_name
@@ -6441,6 +6504,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_DATABASE;
 					n->subname = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER DOMAIN_P any_name RENAME TO name
@@ -6449,6 +6513,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_DOMAIN;
 					n->object = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER FOREIGN DATA_P WRAPPER name RENAME TO name
@@ -6457,6 +6522,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_FDW;
 					n->subname = $5;
 					n->newname = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER FUNCTION function_with_argtypes RENAME TO name
@@ -6466,6 +6532,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->object = $3->funcname;
 					n->objarg = $3->funcargs;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER GROUP_P RoleId RENAME TO RoleId
@@ -6474,6 +6541,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_ROLE;
 					n->subname = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER opt_procedural LANGUAGE name RENAME TO name
@@ -6482,6 +6550,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_LANGUAGE;
 					n->subname = $4;
 					n->newname = $7;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER OPERATOR CLASS any_name USING access_method RENAME TO name
@@ -6491,6 +6560,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->object = $4;
 					n->subname = $6;
 					n->newname = $9;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER OPERATOR FAMILY any_name USING access_method RENAME TO name
@@ -6500,6 +6570,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->object = $4;
 					n->subname = $6;
 					n->newname = $9;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER SCHEMA name RENAME TO name
@@ -6508,6 +6579,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_SCHEMA;
 					n->subname = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER SERVER name RENAME TO name
@@ -6516,6 +6588,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_FOREIGN_SERVER;
 					n->subname = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TABLE relation_expr RENAME TO name
@@ -6525,6 +6598,17 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->relation = $3;
 					n->subname = NULL;
 					n->newname = $6;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER TABLE IF_P EXISTS relation_expr RENAME TO name
+				{
+					RenameStmt *n = makeNode(RenameStmt);
+					n->renameType = OBJECT_TABLE;
+					n->relation = $5;
+					n->subname = NULL;
+					n->newname = $8;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER SEQUENCE qualified_name RENAME TO name
@@ -6534,6 +6618,17 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->relation = $3;
 					n->subname = NULL;
 					n->newname = $6;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER SEQUENCE IF_P EXISTS qualified_name RENAME TO name
+				{
+					RenameStmt *n = makeNode(RenameStmt);
+					n->renameType = OBJECT_SEQUENCE;
+					n->relation = $5;
+					n->subname = NULL;
+					n->newname = $8;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER VIEW qualified_name RENAME TO name
@@ -6543,6 +6638,17 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->relation = $3;
 					n->subname = NULL;
 					n->newname = $6;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER VIEW IF_P EXISTS qualified_name RENAME TO name
+				{
+					RenameStmt *n = makeNode(RenameStmt);
+					n->renameType = OBJECT_VIEW;
+					n->relation = $5;
+					n->subname = NULL;
+					n->newname = $8;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER INDEX qualified_name RENAME TO name
@@ -6552,6 +6658,17 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->relation = $3;
 					n->subname = NULL;
 					n->newname = $6;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER INDEX IF_P EXISTS qualified_name RENAME TO name
+				{
+					RenameStmt *n = makeNode(RenameStmt);
+					n->renameType = OBJECT_INDEX;
+					n->relation = $5;
+					n->subname = NULL;
+					n->newname = $8;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER FOREIGN TABLE relation_expr RENAME TO name
@@ -6561,6 +6678,17 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->relation = $4;
 					n->subname = NULL;
 					n->newname = $7;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER FOREIGN TABLE IF_P EXISTS relation_expr RENAME TO name
+				{
+					RenameStmt *n = makeNode(RenameStmt);
+					n->renameType = OBJECT_FOREIGN_TABLE;
+					n->relation = $6;
+					n->subname = NULL;
+					n->newname = $9;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER TABLE relation_expr RENAME opt_column name TO name
@@ -6571,6 +6699,18 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->relation = $3;
 					n->subname = $6;
 					n->newname = $8;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER TABLE IF_P EXISTS relation_expr RENAME opt_column name TO name
+				{
+					RenameStmt *n = makeNode(RenameStmt);
+					n->renameType = OBJECT_COLUMN;
+					n->relationType = OBJECT_TABLE;
+					n->relation = $5;
+					n->subname = $8;
+					n->newname = $10;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER FOREIGN TABLE relation_expr RENAME opt_column name TO name
@@ -6581,6 +6721,18 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->relation = $4;
 					n->subname = $7;
 					n->newname = $9;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER FOREIGN TABLE IF_P EXISTS relation_expr RENAME opt_column name TO name
+				{
+					RenameStmt *n = makeNode(RenameStmt);
+					n->renameType = OBJECT_COLUMN;
+					n->relationType = OBJECT_FOREIGN_TABLE;
+					n->relation = $6;
+					n->subname = $9;
+					n->newname = $11;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER TRIGGER name ON qualified_name RENAME TO name
@@ -6590,6 +6742,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->relation = $5;
 					n->subname = $3;
 					n->newname = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER ROLE RoleId RENAME TO RoleId
@@ -6598,6 +6751,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_ROLE;
 					n->subname = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER USER RoleId RENAME TO RoleId
@@ -6606,6 +6760,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_ROLE;
 					n->subname = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TABLESPACE name RENAME TO name
@@ -6614,6 +6769,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_TABLESPACE;
 					n->subname = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TABLESPACE name SET reloptions
@@ -6640,6 +6796,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_TSPARSER;
 					n->object = $5;
 					n->newname = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TEXT_P SEARCH DICTIONARY any_name RENAME TO name
@@ -6648,6 +6805,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_TSDICTIONARY;
 					n->object = $5;
 					n->newname = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TEXT_P SEARCH TEMPLATE any_name RENAME TO name
@@ -6656,6 +6814,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_TSTEMPLATE;
 					n->object = $5;
 					n->newname = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TEXT_P SEARCH CONFIGURATION any_name RENAME TO name
@@ -6664,6 +6823,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_TSCONFIGURATION;
 					n->object = $5;
 					n->newname = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TYPE_P any_name RENAME TO name
@@ -6672,6 +6832,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->renameType = OBJECT_TYPE;
 					n->object = $3;
 					n->newname = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TYPE_P any_name RENAME ATTRIBUTE name TO name opt_drop_behavior
@@ -6683,6 +6844,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name
 					n->subname = $6;
 					n->newname = $8;
 					n->behavior = $9;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 		;
@@ -6709,6 +6871,7 @@ AlterObjectSchemaStmt:
 					n->object = $3;
 					n->objarg = $4;
 					n->newschema = $7;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER COLLATION any_name SET SCHEMA name
@@ -6717,6 +6880,7 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_COLLATION;
 					n->object = $3;
 					n->newschema = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER CONVERSION_P any_name SET SCHEMA name
@@ -6725,6 +6889,7 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_CONVERSION;
 					n->object = $3;
 					n->newschema = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER DOMAIN_P any_name SET SCHEMA name
@@ -6733,6 +6898,7 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_DOMAIN;
 					n->object = $3;
 					n->newschema = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER EXTENSION any_name SET SCHEMA name
@@ -6741,6 +6907,7 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_EXTENSION;
 					n->object = $3;
 					n->newschema = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER FUNCTION function_with_argtypes SET SCHEMA name
@@ -6750,6 +6917,7 @@ AlterObjectSchemaStmt:
 					n->object = $3->funcname;
 					n->objarg = $3->funcargs;
 					n->newschema = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER OPERATOR any_operator oper_argtypes SET SCHEMA name
@@ -6759,6 +6927,7 @@ AlterObjectSchemaStmt:
 					n->object = $3;
 					n->objarg = $4;
 					n->newschema = $7;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER OPERATOR CLASS any_name USING access_method SET SCHEMA name
@@ -6768,6 +6937,7 @@ AlterObjectSchemaStmt:
 					n->object = $4;
 					n->addname = $6;
 					n->newschema = $9;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER OPERATOR FAMILY any_name USING access_method SET SCHEMA name
@@ -6777,6 +6947,7 @@ AlterObjectSchemaStmt:
 					n->object = $4;
 					n->addname = $6;
 					n->newschema = $9;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TABLE relation_expr SET SCHEMA name
@@ -6785,6 +6956,16 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_TABLE;
 					n->relation = $3;
 					n->newschema = $6;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER TABLE IF_P EXISTS relation_expr SET SCHEMA name
+				{
+					AlterObjectSchemaStmt *n = makeNode(AlterObjectSchemaStmt);
+					n->objectType = OBJECT_TABLE;
+					n->relation = $5;
+					n->newschema = $8;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER TEXT_P SEARCH PARSER any_name SET SCHEMA name
@@ -6793,6 +6974,7 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_TSPARSER;
 					n->object = $5;
 					n->newschema = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TEXT_P SEARCH DICTIONARY any_name SET SCHEMA name
@@ -6801,6 +6983,7 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_TSDICTIONARY;
 					n->object = $5;
 					n->newschema = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TEXT_P SEARCH TEMPLATE any_name SET SCHEMA name
@@ -6809,6 +6992,7 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_TSTEMPLATE;
 					n->object = $5;
 					n->newschema = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER TEXT_P SEARCH CONFIGURATION any_name SET SCHEMA name
@@ -6817,6 +7001,7 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_TSCONFIGURATION;
 					n->object = $5;
 					n->newschema = $8;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 			| ALTER SEQUENCE qualified_name SET SCHEMA name
@@ -6825,6 +7010,16 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_SEQUENCE;
 					n->relation = $3;
 					n->newschema = $6;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER SEQUENCE IF_P EXISTS qualified_name SET SCHEMA name
+				{
+					AlterObjectSchemaStmt *n = makeNode(AlterObjectSchemaStmt);
+					n->objectType = OBJECT_SEQUENCE;
+					n->relation = $5;
+					n->newschema = $8;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER VIEW qualified_name SET SCHEMA name
@@ -6833,6 +7028,16 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_VIEW;
 					n->relation = $3;
 					n->newschema = $6;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER VIEW IF_P EXISTS qualified_name SET SCHEMA name
+				{
+					AlterObjectSchemaStmt *n = makeNode(AlterObjectSchemaStmt);
+					n->objectType = OBJECT_VIEW;
+					n->relation = $5;
+					n->newschema = $8;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER FOREIGN TABLE relation_expr SET SCHEMA name
@@ -6841,6 +7046,16 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_FOREIGN_TABLE;
 					n->relation = $4;
 					n->newschema = $7;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			| ALTER FOREIGN TABLE IF_P EXISTS relation_expr SET SCHEMA name
+				{
+					AlterObjectSchemaStmt *n = makeNode(AlterObjectSchemaStmt);
+					n->objectType = OBJECT_FOREIGN_TABLE;
+					n->relation = $6;
+					n->newschema = $9;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			| ALTER TYPE_P any_name SET SCHEMA name
@@ -6849,6 +7064,7 @@ AlterObjectSchemaStmt:
 					n->objectType = OBJECT_TYPE;
 					n->object = $3;
 					n->newschema = $6;
+					n->missing_ok = false;
 					$$ = (Node *)n;
 				}
 		;
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 99157c5..f1a108a 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -2283,7 +2283,15 @@ transformAlterTableStmt(AlterTableStmt *stmt, const char *queryString)
 	 * new commands we add after this must not upgrade the lock level
 	 * requested here.
 	 */
-	rel = relation_openrv(stmt->relation, lockmode);
+	rel = relation_openrv_extended(stmt->relation, lockmode, stmt->missing_ok);
+	if (rel == NULL)
+	{
+		/* this message is consistent with relation_openrv */
+		ereport(NOTICE,
+				(errmsg("relation \"%s\" does not exist, skipping",
+							stmt->relation->relname)));
+		return NIL;
+	}
 
 	/* Set up pstate and CreateStmtContext */
 	pstate = make_parsestate(NULL);
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index de16a61..5b81c0b 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -714,34 +714,41 @@ standard_ProcessUtility(Node *parsetree,
 				lockmode = AlterTableGetLockLevel(atstmt->cmds);
 				relid = AlterTableLookupRelation(atstmt, lockmode);
 
-				/* Run parse analysis ... */
-				stmts = transformAlterTableStmt(atstmt, queryString);
-
-				/* ... and do it */
-				foreach(l, stmts)
+				if (OidIsValid(relid))
 				{
-					Node	   *stmt = (Node *) lfirst(l);
+					/* Run parse analysis ... */
+					stmts = transformAlterTableStmt(atstmt, queryString);
 
-					if (IsA(stmt, AlterTableStmt))
-					{
-						/* Do the table alteration proper */
-						AlterTable(relid, lockmode, (AlterTableStmt *) stmt);
-					}
-					else
+					/* ... and do it */
+					foreach(l, stmts)
 					{
-						/* Recurse for anything else */
-						ProcessUtility(stmt,
-									   queryString,
-									   params,
-									   false,
-									   None_Receiver,
-									   NULL);
-					}
+						Node	   *stmt = (Node *) lfirst(l);
 
-					/* Need CCI between commands */
-					if (lnext(l) != NULL)
-						CommandCounterIncrement();
+						if (IsA(stmt, AlterTableStmt))
+						{
+							/* Do the table alteration proper */
+							AlterTable(relid, lockmode, (AlterTableStmt *) stmt);
+						}
+						else
+						{
+							/* Recurse for anything else */
+							ProcessUtility(stmt,
+										   queryString,
+										   params,
+										   false,
+										   None_Receiver,
+										   NULL);
+						}
+
+						/* Need CCI between commands */
+						if (lnext(l) != NULL)
+							CommandCounterIncrement();
+					}
 				}
+				else
+					ereport(NOTICE,
+						(errmsg("relation \"%s\" does not exist, skipping",
+							atstmt->relation->relname)));
 			}
 			break;
 
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index dce0e72..1d33ceb 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -1171,6 +1171,7 @@ typedef struct AlterTableStmt
 	RangeVar   *relation;		/* table to work on */
 	List	   *cmds;			/* list of subcommands */
 	ObjectType	relkind;		/* type of object */
+	bool	   missing_ok;		/* skip error if table missing */
 } AlterTableStmt;
 
 typedef enum AlterTableType
@@ -1807,6 +1808,7 @@ typedef struct AlterSeqStmt
 	NodeTag		type;
 	RangeVar   *sequence;		/* the sequence to alter */
 	List	   *options;
+	bool		missing_ok;		/* skip error if a role is missing? */
 } AlterSeqStmt;
 
 /* ----------------------
@@ -2117,6 +2119,7 @@ typedef struct RenameStmt
 								 * trigger, etc) */
 	char	   *newname;		/* the new name */
 	DropBehavior behavior;		/* RESTRICT or CASCADE behavior */
+	bool		missing_ok;	/* skip error if missing? */
 } RenameStmt;
 
 /* ----------------------
@@ -2132,6 +2135,7 @@ typedef struct AlterObjectSchemaStmt
 	List	   *objarg;			/* argument types, if applicable */
 	char	   *addname;		/* additional name if needed */
 	char	   *newschema;		/* the new schema */
+	bool		missing_ok;	/* skip error if missing? */
 } AlterObjectSchemaStmt;
 
 /* ----------------------
diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out
index 57096f2..e992549 100644
--- a/src/test/regress/expected/alter_table.out
+++ b/src/test/regress/expected/alter_table.out
@@ -128,6 +128,10 @@ DROP TABLE tmp_new;
 DROP TABLE tmp_new2;
 -- ALTER TABLE ... RENAME on non-table relations
 -- renaming indexes (FIXME: this should probably test the index's functionality)
+ALTER INDEX IF EXISTS __onek_unique1 RENAME TO tmp_onek_unique1;
+NOTICE:  relation "__onek_unique1" does not exist, skipping
+ALTER INDEX IF EXISTS __tmp_onek_unique1 RENAME TO onek_unique1;
+NOTICE:  relation "__tmp_onek_unique1" does not exist, skipping
 ALTER INDEX onek_unique1 RENAME TO tmp_onek_unique1;
 ALTER INDEX tmp_onek_unique1 RENAME TO onek_unique1;
 -- renaming views
@@ -1185,6 +1189,11 @@ ERROR:  inherited column "a" must be renamed in child tables too
 -- these should work
 alter table renameColumn rename column a to d;
 alter table renameColumnChild rename column b to a;
+-- these should work
+alter table if exists doesnt_exist_tab rename column a to d;
+NOTICE:  relation "doesnt_exist_tab" does not exist, skipping
+alter table if exists doesnt_exist_tab rename column b to a;
+NOTICE:  relation "doesnt_exist_tab" does not exist, skipping
 -- this should work
 alter table renameColumn add column w int;
 -- this should fail
@@ -2130,3 +2139,40 @@ ERROR:  new row for relation "test_drop_constr_child" violates check constraint
 DETAIL:  Failing row contains (null).
 DROP TABLE test_drop_constr_parent CASCADE;
 NOTICE:  drop cascades to table test_drop_constr_child
+--
+-- IF EXISTS test
+--
+ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
+NOTICE:  relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
+NOTICE:  relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
+NOTICE:  relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
+NOTICE:  relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
+NOTICE:  relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
+NOTICE:  relation "tt8" does not exist, skipping
+CREATE TABLE tt8(a int);
+CREATE SCHEMA alter2;
+ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
+ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
+NOTICE:  ALTER TABLE / ADD PRIMARY KEY will create implicit index "xxx" for table "tt8"
+ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
+ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
+ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
+ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
+\d alter2.tt8
+          Table "alter2.tt8"
+ Column |  Type   |     Modifiers      
+--------+---------+--------------------
+ a      | integer | 
+ f1     | integer | not null default 0
+Indexes:
+    "xxx" PRIMARY KEY, btree (f1)
+Check constraints:
+    "tt8_f_check" CHECK (f1 >= 0 AND f1 <= 10)
+
+DROP TABLE alter2.tt8;
+DROP SCHEMA alter2;
diff --git a/src/test/regress/expected/foreign_data.out b/src/test/regress/expected/foreign_data.out
index 2a6e876..c12e0d0 100644
--- a/src/test/regress/expected/foreign_data.out
+++ b/src/test/regress/expected/foreign_data.out
@@ -789,6 +789,50 @@ ALTER FOREIGN TABLE foreign_schema.ft1 RENAME TO foreign_table_1;
 Server: s0
 FDW Options: (quote '~', "be quoted" 'value', escape '@')
 
+-- alter noexisting table
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c4 integer;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c6 integer;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c7 integer NOT NULL;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c8 integer;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c9 integer;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1');
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c6 SET NOT NULL;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 DROP NOT NULL;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 TYPE char(10);
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 SET DATA TYPE text;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'),
+                        ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2');
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1);
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT IF EXISTS no_const;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT ft1_c1_check;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OWNER TO regress_test_role;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@');
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN IF EXISTS no_column;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN c9;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 SET SCHEMA foreign_schema;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME c1 TO foreign_column_1;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME TO foreign_table_1;
+NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
 -- Information schema
 SELECT * FROM information_schema.foreign_data_wrappers ORDER BY 1, 2;
  foreign_data_wrapper_catalog | foreign_data_wrapper_name | authorization_identifier | library_name | foreign_data_wrapper_language 
diff --git a/src/test/regress/expected/sequence.out b/src/test/regress/expected/sequence.out
index a110a2e..1def070 100644
--- a/src/test/regress/expected/sequence.out
+++ b/src/test/regress/expected/sequence.out
@@ -241,6 +241,9 @@ DROP SEQUENCE myseq2;
 --
 -- Alter sequence
 --
+ALTER SEQUENCE IF EXISTS sequence_test2 RESTART WITH 24
+	 INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE;
+NOTICE:  relation "sequence_test2" does not exist, skipping
 CREATE SEQUENCE sequence_test2 START WITH 32;
 SELECT nextval('sequence_test2');
  nextval 
diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql
index faafb22..d9bf08d 100644
--- a/src/test/regress/sql/alter_table.sql
+++ b/src/test/regress/sql/alter_table.sql
@@ -166,6 +166,9 @@ DROP TABLE tmp_new2;
 
 -- ALTER TABLE ... RENAME on non-table relations
 -- renaming indexes (FIXME: this should probably test the index's functionality)
+ALTER INDEX IF EXISTS __onek_unique1 RENAME TO tmp_onek_unique1;
+ALTER INDEX IF EXISTS __tmp_onek_unique1 RENAME TO onek_unique1;
+
 ALTER INDEX onek_unique1 RENAME TO tmp_onek_unique1;
 ALTER INDEX tmp_onek_unique1 RENAME TO onek_unique1;
 -- renaming views
@@ -898,6 +901,10 @@ alter table only renameColumn rename column a to d;
 alter table renameColumn rename column a to d;
 alter table renameColumnChild rename column b to a;
 
+-- these should work
+alter table if exists doesnt_exist_tab rename column a to d;
+alter table if exists doesnt_exist_tab rename column b to a;
+
 -- this should work
 alter table renameColumn add column w int;
 
@@ -1463,3 +1470,28 @@ ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_paren
 -- should fail
 INSERT INTO test_drop_constr_child (c) VALUES (NULL);
 DROP TABLE test_drop_constr_parent CASCADE;
+
+--
+-- IF EXISTS test
+--
+ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
+ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
+ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
+ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
+ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
+ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
+
+CREATE TABLE tt8(a int);
+CREATE SCHEMA alter2;
+
+ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
+ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
+ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
+ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
+ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
+ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
+
+\d alter2.tt8
+
+DROP TABLE alter2.tt8;
+DROP SCHEMA alter2;
diff --git a/src/test/regress/sql/foreign_data.sql b/src/test/regress/sql/foreign_data.sql
index 95d02d1..b41a7e4 100644
--- a/src/test/regress/sql/foreign_data.sql
+++ b/src/test/regress/sql/foreign_data.sql
@@ -328,6 +328,32 @@ ALTER FOREIGN TABLE foreign_schema.ft1 RENAME c1 TO foreign_column_1;
 ALTER FOREIGN TABLE foreign_schema.ft1 RENAME TO foreign_table_1;
 \d foreign_schema.foreign_table_1
 
+-- alter noexisting table
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c4 integer;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c6 integer;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c7 integer NOT NULL;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c8 integer;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c9 integer;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1');
+
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c6 SET NOT NULL;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 DROP NOT NULL;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 TYPE char(10);
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 SET DATA TYPE text;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'),
+                        ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2');
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1);
+
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT IF EXISTS no_const;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT ft1_c1_check;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OWNER TO regress_test_role;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@');
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN IF EXISTS no_column;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN c9;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 SET SCHEMA foreign_schema;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME c1 TO foreign_column_1;
+ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME TO foreign_table_1;
+
 -- Information schema
 
 SELECT * FROM information_schema.foreign_data_wrappers ORDER BY 1, 2;
diff --git a/src/test/regress/sql/sequence.sql b/src/test/regress/sql/sequence.sql
index 97ffb60..a32e049 100644
--- a/src/test/regress/sql/sequence.sql
+++ b/src/test/regress/sql/sequence.sql
@@ -113,6 +113,10 @@ DROP SEQUENCE myseq2;
 --
 -- Alter sequence
 --
+
+ALTER SEQUENCE IF EXISTS sequence_test2 RESTART WITH 24
+	 INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE;
+
 CREATE SEQUENCE sequence_test2 START WITH 32;
 
 SELECT nextval('sequence_test2');

commit 4993a49b7cf1d23dfe1f9e1a85d9411b8ff57454
Author: Magnus Hagander <magnus@hagander.net>
Date:   Sun Jan 22 22:34:28 2012 +0100

    Typo fix
    
    Guillaume Lelarge

diff --git a/doc/src/sgml/tcn.sgml b/doc/src/sgml/tcn.sgml
index af830df..53c4637 100644
--- a/doc/src/sgml/tcn.sgml
+++ b/doc/src/sgml/tcn.sgml
@@ -18,7 +18,7 @@
  </para>
 
  <para>
-  Only one parameter may be suupplied to the function in a
+  Only one parameter may be supplied to the function in a
   <literal>CREATE TRIGGER</> statement, and that is optional.  If supplied
   it will be used for the channel name for the notifications.  If omitted
   <literal>tcn</> will be used for the channel name.

commit 95c63b5e3209cfc8d91d7956407032fc6fe89640
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Sat Jan 21 19:46:55 2012 +0200

    psql: Add support for tab completion of GRANT/REVOKE role
    
    Previously, only GRANT/REVOKE privilege was supported.
    
    reviewed by Pavel Stehule

diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index a27ef69..6efc0ce 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -2209,21 +2209,52 @@ psql_completion(char *text, int start, int end)
 		COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_foreign_tables, NULL);
 
 /* GRANT && REVOKE */
-	/* Complete GRANT/REVOKE with a list of privileges */
+	/* Complete GRANT/REVOKE with a list of roles and privileges */
 	else if (pg_strcasecmp(prev_wd, "GRANT") == 0 ||
 			 pg_strcasecmp(prev_wd, "REVOKE") == 0)
 	{
-		static const char *const list_privilege[] =
-		{"SELECT", "INSERT", "UPDATE", "DELETE", "TRUNCATE", "REFERENCES",
-			"TRIGGER", "CREATE", "CONNECT", "TEMPORARY", "EXECUTE", "USAGE",
-		"ALL", NULL};
-
-		COMPLETE_WITH_LIST(list_privilege);
-	}
-	/* Complete GRANT/REVOKE <sth> with "ON" */
+		COMPLETE_WITH_QUERY(Query_for_list_of_roles
+							" UNION SELECT 'SELECT'"
+							" UNION SELECT 'INSERT'"
+							" UNION SELECT 'UPDATE'"
+							" UNION SELECT 'DELETE'"
+							" UNION SELECT 'TRUNCATE'"
+							" UNION SELECT 'REFERENCES'"
+							" UNION SELECT 'TRIGGER'"
+							" UNION SELECT 'CREATE'"
+							" UNION SELECT 'CONNECT'"
+							" UNION SELECT 'TEMPORARY'"
+							" UNION SELECT 'EXECUTE'"
+							" UNION SELECT 'USAGE'"
+							" UNION SELECT 'ALL'");
+	}
+	/* Complete GRANT/REVOKE <privilege> with "ON", GRANT/REVOKE <role> with TO/FROM */
 	else if (pg_strcasecmp(prev2_wd, "GRANT") == 0 ||
 			 pg_strcasecmp(prev2_wd, "REVOKE") == 0)
-		COMPLETE_WITH_CONST("ON");
+	{
+		if (pg_strcasecmp(prev_wd, "SELECT") == 0
+			|| pg_strcasecmp(prev_wd, "INSERT") == 0
+			|| pg_strcasecmp(prev_wd, "UPDATE") == 0
+			|| pg_strcasecmp(prev_wd, "DELETE") == 0
+			|| pg_strcasecmp(prev_wd, "TRUNCATE") == 0
+			|| pg_strcasecmp(prev_wd, "REFERENCES") == 0
+			|| pg_strcasecmp(prev_wd, "TRIGGER") == 0
+			|| pg_strcasecmp(prev_wd, "CREATE") == 0
+			|| pg_strcasecmp(prev_wd, "CONNECT") == 0
+			|| pg_strcasecmp(prev_wd, "TEMPORARY") == 0
+			|| pg_strcasecmp(prev_wd, "TEMP") == 0
+			|| pg_strcasecmp(prev_wd, "EXECUTE") == 0
+			|| pg_strcasecmp(prev_wd, "USAGE") == 0
+			|| pg_strcasecmp(prev_wd, "ALL") == 0)
+			COMPLETE_WITH_CONST("ON");
+		else
+		{
+			if (pg_strcasecmp(prev2_wd, "GRANT") == 0)
+				COMPLETE_WITH_CONST("TO");
+			else
+				COMPLETE_WITH_CONST("FROM");
+		}
+	}
 
 	/*
 	 * Complete GRANT/REVOKE <sth> ON with a list of tables, views, sequences,
@@ -2304,6 +2335,18 @@ psql_completion(char *text, int start, int end)
 			COMPLETE_WITH_CONST("FROM");
 	}
 
+	/* Complete "GRANT/REVOKE * TO/FROM" with username, GROUP, or PUBLIC */
+	else if (pg_strcasecmp(prev3_wd, "GRANT") == 0 &&
+			 pg_strcasecmp(prev_wd, "TO") == 0)
+	{
+		COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles);
+	}
+	else if (pg_strcasecmp(prev3_wd, "REVOKE") == 0 &&
+			 pg_strcasecmp(prev_wd, "FROM") == 0)
+	{
+		COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles);
+	}
+
 /* GROUP BY */
 	else if (pg_strcasecmp(prev3_wd, "FROM") == 0 &&
 			 pg_strcasecmp(prev_wd, "GROUP") == 0)

commit c14534957bb93df76bc66516aa03476de0069213
Author: Magnus Hagander <magnus@hagander.net>
Date:   Fri Jan 20 13:57:02 2012 +0100

    Check number of fields in IDENTIFY_SYSTEM response
    
    Jaime Casanova

diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 4007680..aabbdac 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -918,10 +918,10 @@ BaseBackup(void)
 				progname, PQerrorMessage(conn));
 		disconnect_and_exit(1);
 	}
-	if (PQntuples(res) != 1)
+	if (PQntuples(res) != 1 || PQnfields(res) != 3)
 	{
-		fprintf(stderr, _("%s: could not identify system, got %i rows\n"),
-				progname, PQntuples(res));
+		fprintf(stderr, _("%s: could not identify system, got %i rows and %i fields\n"),
+				progname, PQntuples(res), PQnfields(res));
 		disconnect_and_exit(1);
 	}
 	sysidentifier = strdup(PQgetvalue(res, 0, 0));
@@ -1130,7 +1130,7 @@ BaseBackup(void)
 		{
 			fprintf(stderr, _("%s: could not parse xlog end position \"%s\"\n"),
 					progname, xlogend);
-			exit(1);
+			disconnect_and_exit(1);
 		}
 		InterlockedIncrement(&has_xlogendptr);
 
@@ -1162,6 +1162,7 @@ BaseBackup(void)
 	/*
 	 * End of copy data. Final result is already checked inside the loop.
 	 */
+	PQclear(res);
 	PQfinish(conn);
 
 	if (verbose)
diff --git a/src/bin/pg_basebackup/pg_receivexlog.c b/src/bin/pg_basebackup/pg_receivexlog.c
index e698b06..fe9e39b 100644
--- a/src/bin/pg_basebackup/pg_receivexlog.c
+++ b/src/bin/pg_basebackup/pg_receivexlog.c
@@ -235,10 +235,10 @@ StreamLog(void)
 				progname, PQerrorMessage(conn));
 		disconnect_and_exit(1);
 	}
-	if (PQntuples(res) != 1)
+	if (PQntuples(res) != 1 || PQnfields(res) != 3)
 	{
-		fprintf(stderr, _("%s: could not identify system, got %i rows\n"),
-				progname, PQntuples(res));
+		fprintf(stderr, _("%s: could not identify system, got %i rows and %i fields\n"),
+				progname, PQntuples(res), PQnfields(res));
 		disconnect_and_exit(1);
 	}
 	timeline = atoi(PQgetvalue(res, 0, 1));
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index c390cbf..8ca3882 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -235,6 +235,13 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline, char *sysi
 			PQclear(res);
 			return false;
 		}
+		if (PQnfields(res) != 3 || PQntuples(res) != 1)
+		{
+			fprintf(stderr, _("%s: could not identify system, got %i rows and %i fields\n"),
+					progname, PQntuples(res), PQnfields(res));
+			PQclear(res);
+			return false;
+		}
 		if (strcmp(sysidentifier, PQgetvalue(res, 0, 0)) != 0)
 		{
 			fprintf(stderr, _("%s: system identifier does not match between base backup and streaming connection\n"), progname);

commit 356fddfa0ff612a40cc85f8374f9cd058585687f
Author: Magnus Hagander <magnus@hagander.net>
Date:   Fri Jan 20 12:30:19 2012 +0100

    Get rid of itemizedlist inside table
    
    This renders badly on the website, and in this particular case also
    doesn't actually add anything to the readability...

diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index dadfd24..fef2a35 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -577,17 +577,9 @@ postgres: <replaceable>user</> <replaceable>database</> <replaceable>host</> <re
      <entry>client_addr</entry>
      <entry><type>inet</></entry>
      <entry>The remote IP of the client connected to the backend.
-     If this field is not set, it indicates that the client is either:
-      <itemizedlist spacing="compact" mark="bullet">
-       <listitem>
-        <para>
-         Connected via unix sockets on the server machine
-        </para>
-       </listitem>
-       <listitem>
-        <para>An internal process such as autovacuum</para>
-       </listitem>
-     </itemizedlist>
+      If this field is not set, it indicates that the client is either connected
+      via a Unix socket on the server machine or is an internal process such
+      as autovacuum.
      </entry>
     </row>
     <row>

commit a65023e7decf5f5ba9b17bd86db8fe1ea4b6c331
Author: Magnus Hagander <magnus@hagander.net>
Date:   Fri Jan 20 12:23:26 2012 +0100

    Further doc cleanups from the pg_stat_activity changes
    
    Fujii Masao

diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index be4bbc7..ea98cb7 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -7320,7 +7320,7 @@
 
   <para>
    The <structfield>pid</structfield> column can be joined to the
-   <structfield>procpid</structfield> column of the
+   <structfield>pid</structfield> column of the
    <structname>pg_stat_activity</structname> view to get more
    information on the session holding or waiting to hold each lock.
    Also, if you are using prepared transactions, the
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 58e8ede..e55b503 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -3857,7 +3857,7 @@ local0.*    /var/log/postgresql
          identifier from <literal>pg_stat_activity</>, use this query:
 <programlisting>
 SELECT to_hex(EXTRACT(EPOCH FROM backend_start)::integer) || '.' ||
-       to_hex(procpid)
+       to_hex(pid)
 FROM pg_stat_activity;
 </programlisting>
 
@@ -4153,7 +4153,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
        <para>
        Specifies the number of bytes reserved to track the currently
        executing command for each active session, for the
-       <structname>pg_stat_activity</>.<structfield>current_query</> field.
+       <structname>pg_stat_activity</>.<structfield>query</> field.
        The default value is 1024. This parameter can only be set at server
        start.
        </para>
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index 43b72f6..48631cc 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -14322,7 +14322,7 @@ SELECT set_config('log_statement_stats', 'off', false);
     send signals (<systemitem>SIGINT</> or <systemitem>SIGTERM</>
     respectively) to backend processes identified by process ID.
     The process ID of an active backend can be found from
-    the <structfield>procpid</structfield> column of the
+    the <structfield>pid</structfield> column of the
     <structname>pg_stat_activity</structname> view, or by listing the
     <command>postgres</command> processes on the server (using
     <application>ps</> on Unix or the <application>Task
diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index 2259180..dadfd24 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -1462,8 +1462,8 @@ postgres: <replaceable>user</> <replaceable>database</> <replaceable>host</> <re
    example, to show the <acronym>PID</>s and current queries of all server processes:
 
 <programlisting>
-SELECT pg_stat_get_backend_pid(s.backendid) AS procpid,
-       pg_stat_get_backend_activity(s.backendid) AS current_query
+SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
+       pg_stat_get_backend_activity(s.backendid) AS query
     FROM (SELECT pg_stat_get_backend_idset() AS backendid) AS s;
 </programlisting>
   </para>
@@ -1670,7 +1670,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS procpid,
      <entry>statement-status</entry>
      <entry>(const char *)</entry>
      <entry>Probe that fires anytime the server process updates its
-      <structname>pg_stat_activity</>.<structfield>current_query</> status.
+      <structname>pg_stat_activity</>.<structfield>status</>.
       arg0 is the new status string.</entry>
     </row>
     <row>
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 5c910dd..9fc96b2 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -2362,7 +2362,7 @@ static struct config_int ConfigureNamesInt[] =
 
 	{
 		{"track_activity_query_size", PGC_POSTMASTER, RESOURCES_MEM,
-			gettext_noop("Sets the size reserved for pg_stat_activity.current_query, in bytes."),
+			gettext_noop("Sets the size reserved for pg_stat_activity.query, in bytes."),
 			NULL,
 		},
 		&pgstat_track_activity_query_size,

commit 6e3323d41dc45e93700a3420fd27ca05db6a64a7
Author: Robert Haas <rhaas@postgresql.org>
Date:   Thu Jan 19 23:15:15 2012 -0500

    Triggered change notifications.
    
    Kevin Grittner, reviewed (in earlier versions) by Álvaro Herrera

diff --git a/contrib/Makefile b/contrib/Makefile
index 0c238aa..ac0a80a 100644
--- a/contrib/Makefile
+++ b/contrib/Makefile
@@ -45,6 +45,7 @@ SUBDIRS = \
 		seg		\
 		spi		\
 		tablefunc	\
+		tcn		\
 		test_parser	\
 		tsearch2	\
 		unaccent	\
diff --git a/contrib/tcn/Makefile b/contrib/tcn/Makefile
new file mode 100644
index 0000000..7bac5e3
--- /dev/null
+++ b/contrib/tcn/Makefile
@@ -0,0 +1,17 @@
+# contrib/tcn/Makefile
+
+MODULES = tcn
+
+EXTENSION = tcn
+DATA = tcn--1.0.sql
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/tcn
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/tcn/tcn--1.0.sql b/contrib/tcn/tcn--1.0.sql
new file mode 100644
index 0000000..027a4ef
--- /dev/null
+++ b/contrib/tcn/tcn--1.0.sql
@@ -0,0 +1,9 @@
+/* contrib/tcn/tcn--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION tcn" to load this file. \quit
+
+CREATE FUNCTION triggered_change_notification()
+RETURNS pg_catalog.trigger
+AS 'MODULE_PATHNAME'
+LANGUAGE C;
diff --git a/contrib/tcn/tcn.c b/contrib/tcn/tcn.c
new file mode 100644
index 0000000..314632d
--- /dev/null
+++ b/contrib/tcn/tcn.c
@@ -0,0 +1,184 @@
+/*-------------------------------------------------------------------------
+ *
+ * tcn.c
+ *	  triggered change notification support for PostgreSQL
+ *
+ * Portions Copyright (c) 2011-2012, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ *	  contrib/tcn/tcn.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "executor/spi.h"
+#include "commands/async.h"
+#include "commands/trigger.h"
+#include "lib/stringinfo.h"
+#include "utils/rel.h"
+#include "utils/syscache.h"
+
+
+PG_MODULE_MAGIC;
+
+
+/* forward declarations */
+Datum		triggered_change_notification(PG_FUNCTION_ARGS);
+
+
+/*
+ * Copy from s (for source) to r (for result), wrapping with q (quote)
+ * characters and doubling any quote characters found.
+ */
+static void
+strcpy_quoted(StringInfo r, const char *s, const char q)
+{
+	appendStringInfoCharMacro(r, q);
+	while (*s)
+	{
+		if (*s == q)
+			appendStringInfoCharMacro(r, q);
+		appendStringInfoCharMacro(r, *s);
+		s++;
+	}
+	appendStringInfoCharMacro(r, q);
+}
+
+/*
+ * triggered_change_notification
+ *
+ * This trigger function will send a notification of data modification with
+ * primary key values.	The channel will be "tcn" unless the trigger is
+ * created with a parameter, in which case that parameter will be used.
+ */
+PG_FUNCTION_INFO_V1(triggered_change_notification);
+
+Datum
+triggered_change_notification(PG_FUNCTION_ARGS)
+{
+	TriggerData *trigdata = (TriggerData *) fcinfo->context;
+	Trigger    *trigger;
+	int			nargs;
+	HeapTuple	trigtuple;
+	Relation	rel;
+	TupleDesc	tupdesc;
+	char	   *channel;
+	char		operation;
+	StringInfo	payload = makeStringInfo();
+	bool		foundPK;
+
+	List	   *indexoidlist;
+	ListCell   *indexoidscan;
+
+	/* make sure it's called as a trigger */
+	if (!CALLED_AS_TRIGGER(fcinfo))
+		ereport(ERROR,
+				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+		errmsg("triggered_change_notification: must be called as trigger")));
+
+	/* and that it's called after the change */
+	if (!TRIGGER_FIRED_AFTER(trigdata->tg_event))
+		ereport(ERROR,
+				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+				 errmsg("triggered_change_notification: must be called after the change")));
+
+	/* and that it's called for each row */
+	if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
+		ereport(ERROR,
+				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+				 errmsg("triggered_change_notification: must be called for each row")));
+
+	if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
+		operation = 'I';
+	else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
+		operation = 'U';
+	else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
+		operation = 'D';
+	else
+	{
+		elog(ERROR, "triggered_change_notification: trigger fired by unrecognized operation");
+		operation = 'X';		/* silence compiler warning */
+	}
+
+	trigger = trigdata->tg_trigger;
+	nargs = trigger->tgnargs;
+	if (nargs > 1)
+		ereport(ERROR,
+				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+				 errmsg("triggered_change_notification: must not be called with more than one parameter")));
+
+	if (nargs == 0)
+		channel = "tcn";
+	else
+		channel = trigger->tgargs[0];
+
+	/* get tuple data */
+	trigtuple = trigdata->tg_trigtuple;
+	rel = trigdata->tg_relation;
+	tupdesc = rel->rd_att;
+
+	foundPK = false;
+
+	/*
+	 * Get the list of index OIDs for the table from the relcache, and look up
+	 * each one in the pg_index syscache until we find one marked primary key
+	 * (hopefully there isn't more than one such).
+	 */
+	indexoidlist = RelationGetIndexList(rel);
+
+	foreach(indexoidscan, indexoidlist)
+	{
+		Oid			indexoid = lfirst_oid(indexoidscan);
+		HeapTuple	indexTuple;
+		Form_pg_index index;
+
+		indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexoid));
+		if (!HeapTupleIsValid(indexTuple))		/* should not happen */
+			elog(ERROR, "cache lookup failed for index %u", indexoid);
+		index = (Form_pg_index) GETSTRUCT(indexTuple);
+		/* we're only interested if it is the primary key */
+		if (index->indisprimary)
+		{
+			int			numatts = index->indnatts;
+
+			if (numatts > 0)
+			{
+				int			i;
+
+				foundPK = true;
+
+				strcpy_quoted(payload, RelationGetRelationName(rel), '"');
+				appendStringInfoCharMacro(payload, ',');
+				appendStringInfoCharMacro(payload, operation);
+
+				for (i = 0; i < numatts; i++)
+				{
+					int			colno = index->indkey.values[i];
+
+					appendStringInfoCharMacro(payload, ',');
+					strcpy_quoted(payload, NameStr((tupdesc->attrs[colno - 1])->attname), '"');
+					appendStringInfoCharMacro(payload, '=');
+					strcpy_quoted(payload, SPI_getvalue(trigtuple, tupdesc, colno), '\'');
+				}
+
+				Async_Notify(channel, payload->data);
+			}
+			ReleaseSysCache(indexTuple);
+			break;
+		}
+		ReleaseSysCache(indexTuple);
+	}
+
+	list_free(indexoidlist);
+
+	if (!foundPK)
+		ereport(ERROR,
+				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+				 errmsg("triggered_change_notification: must be called on a table with a primary key")));
+
+	return PointerGetDatum(NULL);		/* after trigger; value doesn't matter */
+}
diff --git a/contrib/tcn/tcn.control b/contrib/tcn/tcn.control
new file mode 100644
index 0000000..8abfd19
--- /dev/null
+++ b/contrib/tcn/tcn.control
@@ -0,0 +1,5 @@
+# tcn extension
+comment = 'Triggered change notifications'
+default_version = '1.0'
+module_pathname = '$libdir/tcn'
+relocatable = true
diff --git a/doc/src/sgml/contrib.sgml b/doc/src/sgml/contrib.sgml
index adf09ca..d4da4ee 100644
--- a/doc/src/sgml/contrib.sgml
+++ b/doc/src/sgml/contrib.sgml
@@ -128,6 +128,7 @@ CREATE EXTENSION <replaceable>module_name</> FROM unpackaged;
  &contrib-spi;
  &sslinfo;
  &tablefunc;
+ &tcn;
  &test-parser;
  &tsearch2;
  &unaccent;
diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml
index b96dd65..b5d3c6d 100644
--- a/doc/src/sgml/filelist.sgml
+++ b/doc/src/sgml/filelist.sgml
@@ -136,6 +136,7 @@
 <!ENTITY sepgsql         SYSTEM "sepgsql.sgml">
 <!ENTITY sslinfo         SYSTEM "sslinfo.sgml">
 <!ENTITY tablefunc       SYSTEM "tablefunc.sgml">
+<!ENTITY tcn             SYSTEM "tcn.sgml">
 <!ENTITY test-parser     SYSTEM "test-parser.sgml">
 <!ENTITY tsearch2        SYSTEM "tsearch2.sgml">
 <!ENTITY unaccent      SYSTEM "unaccent.sgml">
diff --git a/doc/src/sgml/tcn.sgml b/doc/src/sgml/tcn.sgml
new file mode 100644
index 0000000..af830df
--- /dev/null
+++ b/doc/src/sgml/tcn.sgml
@@ -0,0 +1,72 @@
+<!-- doc/src/sgml/tcn.sgml -->
+
+<sect1 id="tcn" xreflabel="tcn">
+ <title>tcn</title>
+
+ <indexterm zone="tcn">
+  <primary>tcn</primary>
+ </indexterm>
+
+ <indexterm zone="tcn">
+  <primary>triggered_change_notification</primary>
+ </indexterm>
+
+ <para>
+  The <filename>tcn</> module provides a trigger function that notifies
+  listeners of changes to any table on which it is attached.  It must be
+  used as an <literal>AFTER</> trigger <literal>FOR EACH ROW</>.
+ </para>
+
+ <para>
+  Only one parameter may be suupplied to the function in a
+  <literal>CREATE TRIGGER</> statement, and that is optional.  If supplied
+  it will be used for the channel name for the notifications.  If omitted
+  <literal>tcn</> will be used for the channel name.
+ </para>
+
+ <para>
+  The payload of the notifications consists of the table name, a letter to
+  indicate which type of operation was performed, and column name/value pairs
+  for primary key columns.  Each part is separated from the next by a comma.
+  For ease of parsing using regular expressions, table and column names are
+  always wrapped in double quotes, and data values are always wrapped in
+  single quotes.  Embeded quotes are doubled.
+ </para>
+
+ <para>
+  A brief example of using the extension follows.
+
+<programlisting>
+test=# create table tcndata
+test-#   (
+test(#     a int not null,
+test(#     b date not null,
+test(#     c text,
+test(#     primary key (a, b)
+test(#   );
+NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "tcndata_pkey" for table "tcndata"
+CREATE TABLE
+test=# create trigger tcndata_tcn_trigger
+test-#   after insert or update or delete on tcndata
+test-#   for each row execute procedure triggered_change_notification();
+CREATE TRIGGER
+test=# listen tcn;
+LISTEN
+test=# insert into tcndata values (1, date '2012-12-22', 'one'),
+test-#                            (1, date '2012-12-23', 'another'),
+test-#                            (2, date '2012-12-23', 'two');
+INSERT 0 3
+Asynchronous notification "tcn" with payload ""tcndata",I,"a"='1',"b"='2012-12-22'" received from server process with PID 22770.
+Asynchronous notification "tcn" with payload ""tcndata",I,"a"='1',"b"='2012-12-23'" received from server process with PID 22770.
+Asynchronous notification "tcn" with payload ""tcndata",I,"a"='2',"b"='2012-12-23'" received from server process with PID 22770.
+test=# update tcndata set c = 'uno' where a = 1;
+UPDATE 2
+Asynchronous notification "tcn" with payload ""tcndata",U,"a"='1',"b"='2012-12-22'" received from server process with PID 22770.
+Asynchronous notification "tcn" with payload ""tcndata",U,"a"='1',"b"='2012-12-23'" received from server process with PID 22770.
+test=# delete from tcndata where a = 1 and b = date '2012-12-22';
+DELETE 1
+Asynchronous notification "tcn" with payload ""tcndata",D,"a"='1',"b"='2012-12-22'" received from server process with PID 22770.
+</programlisting>
+ </para>
+
+</sect1>

commit c8397bd6d909620457b532e252ce2e437c787d4f
Author: Robert Haas <rhaas@postgresql.org>
Date:   Thu Jan 19 21:52:51 2012 -0500

    Clarify that bgwriter no longer handles checkpoints.
    
    Text by Peter Geoghegan.

diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 0cc3296..58e8ede 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -1342,7 +1342,8 @@ SET ENABLE_SEQSCAN TO OFF;
         <para>
          In each round, no more than this many buffers will be written
          by the background writer.  Setting this to zero disables
-         background writing (except for checkpoint activity).
+         background writing.  (Note that checkpoints, which are managed by
+         a separate, dedicated auxiliary process, are unaffected.)
          The default value is 100 buffers.
          This parameter can only be set in the <filename>postgresql.conf</>
          file or on the server command line.

commit ecf7a2ea387a409f6152c05f60e5765c7436b8df
Author: Bruce Momjian <bruce@momjian.us>
Date:   Thu Jan 19 16:04:34 2012 -0500

    Add pg_upgrade C comment about why we check all relkinds for regtypes.

diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c
index 8594d26..891eb9a 100644
--- a/contrib/pg_upgrade/check.c
+++ b/contrib/pg_upgrade/check.c
@@ -644,6 +644,11 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
 		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
 		PGconn	   *conn = connectToServer(cluster, active_db->db_name);
 
+		/*
+		 *	While several relkinds don't store any data, e.g. views, they
+		 *	can be used to define data types of other columns, so we
+		 *	check all relkinds.
+		 */
 		res = executeQueryOrDie(conn,
 								"SELECT n.nspname, c.relname, a.attname "
 								"FROM	pg_catalog.pg_class c, "

commit cc53a1e7ccfa762bda70e1b6a15bfd929bf1b4e3
Author: Robert Haas <rhaas@postgresql.org>
Date:   Thu Jan 19 15:23:04 2012 -0500

    Add bitwise AND, OR, and NOT operators for macaddr data type.
    
    Brendan Jurd, reviewed by Fujii Masao

diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index ff9b8b0..43b72f6 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -8300,7 +8300,9 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple
    <para>
     The <type>macaddr</type> type also supports the standard relational
     operators (<literal>&gt;</literal>, <literal>&lt;=</literal>, etc.) for
-    lexicographical ordering.
+    lexicographical ordering, and the bitwise arithmetic operators
+    (<literal>~</literal>, <literal>&amp;</literal> and <literal>|</literal>)
+    for NOT, AND and OR.
    </para>
 
   </sect1>
diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c
index 333f4bc..958ff54 100644
--- a/src/backend/utils/adt/mac.c
+++ b/src/backend/utils/adt/mac.c
@@ -242,6 +242,59 @@ hashmacaddr(PG_FUNCTION_ARGS)
 }
 
 /*
+ * Arithmetic functions: bitwise NOT, AND, OR.
+ */
+Datum
+macaddr_not(PG_FUNCTION_ARGS)
+{
+	macaddr	   *addr = PG_GETARG_MACADDR_P(0);
+	macaddr	   *result;
+
+	result = (macaddr *) palloc(sizeof(macaddr));
+	result->a = ~addr->a;
+	result->b = ~addr->b;
+	result->c = ~addr->c;
+	result->d = ~addr->d;
+	result->e = ~addr->e;
+	result->f = ~addr->f;
+	PG_RETURN_MACADDR_P(result);
+}
+
+Datum
+macaddr_and(PG_FUNCTION_ARGS)
+{
+	macaddr	   *addr1 = PG_GETARG_MACADDR_P(0);
+	macaddr	   *addr2 = PG_GETARG_MACADDR_P(1);
+	macaddr	   *result;
+
+	result = (macaddr *) palloc(sizeof(macaddr));
+	result->a = addr1->a & addr2->a;
+	result->b = addr1->b & addr2->b;
+	result->c = addr1->c & addr2->c;
+	result->d = addr1->d & addr2->d;
+	result->e = addr1->e & addr2->e;
+	result->f = addr1->f & addr2->f;
+	PG_RETURN_MACADDR_P(result);
+}
+
+Datum
+macaddr_or(PG_FUNCTION_ARGS)
+{
+	macaddr	   *addr1 = PG_GETARG_MACADDR_P(0);
+	macaddr	   *addr2 = PG_GETARG_MACADDR_P(1);
+	macaddr	   *result;
+
+	result = (macaddr *) palloc(sizeof(macaddr));
+	result->a = addr1->a | addr2->a;
+	result->b = addr1->b | addr2->b;
+	result->c = addr1->c | addr2->c;
+	result->d = addr1->d | addr2->d;
+	result->e = addr1->e | addr2->e;
+	result->f = addr1->f | addr2->f;
+	PG_RETURN_MACADDR_P(result);
+}
+
+/*
  *	Truncation function to allow comparing mac manufacturers.
  *	From suggestion by Alex Pilosov <alex@pilosoft.com>
  */
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index 9e799c6..285fae3 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -53,6 +53,6 @@
  */
 
 /*							yyyymmddN */
-#define CATALOG_VERSION_NO	201201191
+#define CATALOG_VERSION_NO	201201192
 
 #endif
diff --git a/src/include/catalog/pg_operator.h b/src/include/catalog/pg_operator.h
index f19865d..ead5af6 100644
--- a/src/include/catalog/pg_operator.h
+++ b/src/include/catalog/pg_operator.h
@@ -1116,6 +1116,13 @@ DESCR("greater than");
 DATA(insert OID = 1225 (  ">="	   PGNSP PGUID b f f 829 829	 16 1223 1222 macaddr_ge scalargtsel scalargtjoinsel ));
 DESCR("greater than or equal");
 
+DATA(insert OID = 3147 (  "~"	   PGNSP PGUID l f f	  0 829 829 0 0 macaddr_not - - ));
+DESCR("bitwise not");
+DATA(insert OID = 3148 (  "&"	   PGNSP PGUID b f f	829 829 829 0 0 macaddr_and - - ));
+DESCR("bitwise and");
+DATA(insert OID = 3149 (  "|"	   PGNSP PGUID b f f	829 829 829 0 0 macaddr_or - - ));
+DESCR("bitwise or");
+
 /* INET type (these also support CIDR via implicit cast) */
 DATA(insert OID = 1201 (  "="	   PGNSP PGUID b t t 869 869	 16 1201 1202 network_eq eqsel eqjoinsel ));
 DESCR("equal");
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 9994468..b6ac195 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -2039,6 +2039,9 @@ DATA(insert OID = 834 (  macaddr_ge			PGNSP PGUID 12 1 0 0 0 f f f t f i 2 0 16
 DATA(insert OID = 835 (  macaddr_ne			PGNSP PGUID 12 1 0 0 0 f f f t f i 2 0 16 "829 829" _null_ _null_ _null_ _null_	macaddr_ne _null_ _null_ _null_ ));
 DATA(insert OID = 836 (  macaddr_cmp		PGNSP PGUID 12 1 0 0 0 f f f t f i 2 0 23 "829 829" _null_ _null_ _null_ _null_	macaddr_cmp _null_ _null_ _null_ ));
 DESCR("less-equal-greater");
+DATA(insert OID = 3144 (  macaddr_not		PGNSP PGUID 12 1 0 0 0 f f f t f i 1 0 829 "829" _null_ _null_ _null_ _null_	macaddr_not _null_ _null_ _null_ ));
+DATA(insert OID = 3145 (  macaddr_and		PGNSP PGUID 12 1 0 0 0 f f f t f i 2 0 829 "829 829" _null_ _null_ _null_ _null_	macaddr_and _null_ _null_ _null_ ));
+DATA(insert OID = 3146 (  macaddr_or		PGNSP PGUID 12 1 0 0 0 f f f t f i 2 0 829 "829 829" _null_ _null_ _null_ _null_	macaddr_or _null_ _null_ _null_ ));
 
 /* for inet type support */
 DATA(insert OID = 910 (  inet_in			PGNSP PGUID 12 1 0 0 0 f f f t f i 1 0 869 "2275" _null_ _null_ _null_ _null_ inet_in _null_ _null_ _null_ ));
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index 46b2f3b..68179d5 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -900,6 +900,9 @@ extern Datum macaddr_eq(PG_FUNCTION_ARGS);
 extern Datum macaddr_ge(PG_FUNCTION_ARGS);
 extern Datum macaddr_gt(PG_FUNCTION_ARGS);
 extern Datum macaddr_ne(PG_FUNCTION_ARGS);
+extern Datum macaddr_not(PG_FUNCTION_ARGS);
+extern Datum macaddr_and(PG_FUNCTION_ARGS);
+extern Datum macaddr_or(PG_FUNCTION_ARGS);
 extern Datum macaddr_trunc(PG_FUNCTION_ARGS);
 extern Datum hashmacaddr(PG_FUNCTION_ARGS);
 
diff --git a/src/test/regress/expected/macaddr.out b/src/test/regress/expected/macaddr.out
index 0b2a96d..50d0369 100644
--- a/src/test/regress/expected/macaddr.out
+++ b/src/test/regress/expected/macaddr.out
@@ -103,4 +103,52 @@ SELECT b <> '08:00:2b:01:02:03' FROM macaddr_data WHERE a = 1; -- false
  f
 (1 row)
 
+SELECT ~b                       FROM macaddr_data;
+     ?column?      
+-------------------
+ f7:ff:d4:fe:fd:fc
+ f7:ff:d4:fe:fd:fc
+ f7:ff:d4:fe:fd:fc
+ f7:ff:d4:fe:fd:fc
+ f7:ff:d4:fe:fd:fc
+ f7:ff:d4:fe:fd:fc
+ f7:ff:d4:fe:fd:fb
+ f7:ff:d4:fe:fd:fd
+ f7:ff:d5:fe:fd:fc
+ f7:ff:d3:fe:fd:fc
+ f7:ff:d5:fe:fd:fb
+(11 rows)
+
+SELECT  b & '00:00:00:ff:ff:ff' FROM macaddr_data;
+     ?column?      
+-------------------
+ 00:00:00:01:02:03
+ 00:00:00:01:02:03
+ 00:00:00:01:02:03
+ 00:00:00:01:02:03
+ 00:00:00:01:02:03
+ 00:00:00:01:02:03
+ 00:00:00:01:02:04
+ 00:00:00:01:02:02
+ 00:00:00:01:02:03
+ 00:00:00:01:02:03
+ 00:00:00:01:02:04
+(11 rows)
+
+SELECT  b | '01:02:03:04:05:06' FROM macaddr_data;
+     ?column?      
+-------------------
+ 09:02:2b:05:07:07
+ 09:02:2b:05:07:07
+ 09:02:2b:05:07:07
+ 09:02:2b:05:07:07
+ 09:02:2b:05:07:07
+ 09:02:2b:05:07:07
+ 09:02:2b:05:07:06
+ 09:02:2b:05:07:06
+ 09:02:2b:05:07:07
+ 09:02:2f:05:07:07
+ 09:02:2b:05:07:06
+(11 rows)
+
 DROP TABLE macaddr_data;
diff --git a/src/test/regress/sql/macaddr.sql b/src/test/regress/sql/macaddr.sql
index ce8d920..1ccf501 100644
--- a/src/test/regress/sql/macaddr.sql
+++ b/src/test/regress/sql/macaddr.sql
@@ -35,4 +35,8 @@ SELECT b =  '08:00:2b:01:02:03' FROM macaddr_data WHERE a = 1; -- true
 SELECT b <> '08:00:2b:01:02:04' FROM macaddr_data WHERE a = 1; -- true
 SELECT b <> '08:00:2b:01:02:03' FROM macaddr_data WHERE a = 1; -- false
 
+SELECT ~b                       FROM macaddr_data;
+SELECT  b & '00:00:00:ff:ff:ff' FROM macaddr_data;
+SELECT  b | '01:02:03:04:05:06' FROM macaddr_data;
+
 DROP TABLE macaddr_data;

commit 4f42b546fd87a80be30c53a0f2c897acb826ad52
Author: Magnus Hagander <magnus@hagander.net>
Date:   Thu Jan 19 14:19:20 2012 +0100

    Separate state from query string in pg_stat_activity
    
    This separates the state (running/idle/idleintransaction etc) into
    it's own field ("state"), and leaves the query field containing just
    query text.
    
    The query text will now mean "current query" when a query is running
    and "last query" in other states. Accordingly,the field has been
    renamed from current_query to query.
    
    Since backwards compatibility was broken anyway to make that, the procpid
    field has also been renamed to pid - along with the same field in
    pg_stat_replication for consistency.
    
    Scott Mead and Magnus Hagander, review work from Greg Smith

diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index a12a9a2..2259180 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -242,20 +242,20 @@ postgres: <replaceable>user</> <replaceable>database</> <replaceable>host</> <re
 
     <tbody>
      <row>
-      <entry><structname>pg_stat_activity</><indexterm><primary>pg_stat_activity</primary></indexterm></entry>
-      <entry>One row per server process, showing database OID, database
-      name, process <acronym>ID</>, user OID, user name, application name,
-      client's address, host name (if available), and port number, times at
-      which the server process, current transaction, and current query began
-      execution, process's waiting status, and text of the current query.
-      The columns that report data on the current query are available unless
-      the parameter <varname>track_activities</varname> has been turned off.
-      Furthermore, these columns are only visible if the user examining
-      the view is a superuser or the same as the user owning the process
-      being reported on.  The client's host name will be available only if
-      <xref linkend="guc-log-hostname"> is set or if the user's host name
-      needed to be looked up during <filename>pg_hba.conf</filename>
-      processing.
+      <entry>
+       <structname>pg_stat_activity</structname>
+       <indexterm><primary>pg_stat_activity</primary></indexterm>
+      </entry>
+      <entry>
+       <para>One row per server process, showing information related to
+        each connection to the server. Unless the
+        <xref linkend="guc-track-activities"> parameter has been turned
+         off, it is possible to monitor state and query information of
+         all running processes.
+       </para>
+       <para>
+        See <xref linkend="pg-stat-activity-view"> for more details.
+       </para>
      </entry>
      </row>
 
@@ -529,6 +529,210 @@ postgres: <replaceable>user</> <replaceable>database</> <replaceable>host</> <re
    into the kernel's handling of I/O.
   </para>
 
+  <table id="pg-stat-activity-view" xreflabel="pg_stat_activity">
+   <title>pg_stat_activity view</title>
+
+   <tgroup cols="3">
+    <thead>
+    <row>
+      <entry>Column</entry>
+      <entry>Type</entry>
+      <entry>Description</entry>
+     </row>
+    </thead>
+
+   <tbody>
+    <row>
+     <entry>datid</entry>
+     <entry><type>oid</></entry>
+     <entry>The oid of the database the backend is connected to.</entry>
+    </row>
+    <row>
+     <entry>datname</entry>
+     <entry><type>name</></entry>
+     <entry>The name of the database the backend is connected to.</entry>
+    </row>
+    <row>
+     <entry>pid</entry>
+     <entry><type>integer</></entry>
+     <entry>The process ID of the backend.</entry>
+    </row>
+    <row>
+     <entry>usesysid</entry>
+     <entry><type>oid</></entry>
+     <entry>The id of the user logged into the backend.</entry>
+    </row>
+    <row>
+     <entry>usename</entry>
+     <entry><type>name</></entry>
+     <entry>The name of the user logged into the backend.</entry>
+    </row>
+    <row>
+     <entry>application_name</entry>
+     <entry><type>text</></entry>
+     <entry>The name of the application that has initiated the connection
+      to the backend.</entry>
+    </row>
+    <row>
+     <entry>client_addr</entry>
+     <entry><type>inet</></entry>
+     <entry>The remote IP of the client connected to the backend.
+     If this field is not set, it indicates that the client is either:
+      <itemizedlist spacing="compact" mark="bullet">
+       <listitem>
+        <para>
+         Connected via unix sockets on the server machine
+        </para>
+       </listitem>
+       <listitem>
+        <para>An internal process such as autovacuum</para>
+       </listitem>
+     </itemizedlist>
+     </entry>
+    </row>
+    <row>
+     <entry>client_hostname</entry>
+     <entry><type>text</></entry>
+     <entry>
+      If available, the hostname of the client as reported by a
+      reverse lookup of <structfield>client_addr</>. This field will
+      only be set when <xref linkend="guc-log-hostname"> is enabled.
+     </entry>
+    </row>
+    <row>
+     <entry>client_port</entry>
+     <entry><type>integer</></entry>
+     <entry>
+      The remote TCP port that the client is using for communication
+      to the backend, or <symbol>NULL</> if a unix socket is used.
+     </entry>
+    </row>
+    <row>
+     <entry>backend_start</entry>
+     <entry><type>timestamp with time zone</></entry>
+     <entry>
+      The time when this process was started,  i.e. when the
+      client connected to the server.
+     </entry>
+    </row>
+    <row>
+     <entry>xact_start</entry>
+     <entry><type>timestamp with time zone</></entry>
+     <entry>
+      The time when the current transaction was started. If the client is
+      using autocommit for transactions, this value is equal to the
+      query_start column.
+     </entry>
+    </row>
+    <row>
+     <entry>query_start</entry>
+     <entry><type>timestamp with time zone</></entry>
+     <entry>
+      The time when the currently active query started, or if
+      <structfield>state</> is <literal>idle</>, when the last query
+      was started.
+     </entry>
+    </row>
+    <row>
+     <entry>state_change</entry>
+     <entry><type>timestamp with time zone</></entry>
+     <entry>The time when the <structfield>state</> was last changed.</entry>
+    </row>
+    <row>
+     <entry>waiting</entry>
+     <entry><type>boolean</></entry>
+     <entry>
+      Boolean indicating if a backend is currently waiting on a lock.
+     </entry>
+    </row>
+    <row>
+     <entry>state</entry>
+     <entry><type>text</></entry>
+     <entry>
+       The <structfield>state</> of the currently running query.
+       Can be one of:
+       <variablelist>
+         <varlistentry>
+           <term>active</term>
+           <listitem>
+             <para>
+              The backend is executing a query.
+             </para>
+           </listitem>
+           </varlistentry>
+           <varlistentry>
+             <term>idle</term>
+           <listitem>
+             <para>
+               There is no query executing in the backend.
+             </para>
+           </listitem>
+         </varlistentry>
+         <varlistentry>
+           <term>idle in transaction</term>
+           <listitem>
+             <para>
+               The backend is in a transaction, but is currently not currently
+               executing a query.
+             </para>
+           </listitem>
+         </varlistentry>
+         <varlistentry>
+           <term>idle in transaction (aborted)</term>
+           <listitem>
+             <para>
+               This state is similar to <literal>idle in transaction</>,
+               except one of the statements in the transaction caused an error.
+             </para>
+           </listitem>
+         </varlistentry>
+         <varlistentry>
+           <term>fastpath function call</term>
+           <listitem>
+             <para>
+               The backend is executing a fast-path function.
+             </para>
+           </listitem>
+         </varlistentry>
+         <varlistentry>
+           <term>disabled</term>
+           <listitem>
+             <para>
+               This state indicates that <xref linkend="guc-track-activities">
+               is disabled.
+             </para>
+           </listitem>
+         </varlistentry>
+       </variablelist>
+      <note>
+       <para>
+        The <structfield>waiting</> and <structfield>state</> columns are
+        independent.  If a query is in the <literal>active</> state,
+        it may or may not be <literal>waiting</>.  If a query is
+        <literal>active</> and <structfield>waiting</> is true, it means
+        that the query is being executed, but is being blocked by a lock
+        somewhere in the system.
+       </para>
+      </note>
+     </entry>
+    </row>
+    <row>
+     <entry>query</entry>
+     <entry><type>text</></entry>
+     <entry>
+      The most recent query that the backend has executed. If
+      <structfield>state</> is <literal>active</> this means the currently
+      executing query. In all other states, it means the last query that was
+      executed.
+     </entry>
+    </row>
+   </tbody>
+   </tgroup>
+  </table>
+
+ <sect3 id="monitoring-stats-functions">
+  <title>Statistics Access Functions</title>
+
   <para>
    Other ways of looking at the statistics can be set up by writing
    queries that use the same underlying statistics access functions as
@@ -1264,6 +1468,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS procpid,
 </programlisting>
   </para>
 
+ </sect3>
  </sect2>
  </sect1>
 
diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql
index 50ba20c..e25914b 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -520,7 +520,7 @@ CREATE VIEW pg_stat_activity AS
     SELECT
             S.datid AS datid,
             D.datname AS datname,
-            S.procpid,
+            S.pid,
             S.usesysid,
             U.rolname AS usename,
             S.application_name,
@@ -530,15 +530,17 @@ CREATE VIEW pg_stat_activity AS
             S.backend_start,
             S.xact_start,
             S.query_start,
+            S.state_change,
             S.waiting,
-            S.current_query
+            S.state,
+            S.query
     FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U
     WHERE S.datid = D.oid AND
             S.usesysid = U.oid;
 
 CREATE VIEW pg_stat_replication AS
     SELECT
-            S.procpid,
+            S.pid,
             S.usesysid,
             U.rolname AS usename,
             S.application_name,
@@ -556,7 +558,7 @@ CREATE VIEW pg_stat_replication AS
     FROM pg_stat_get_activity(NULL) AS S, pg_authid U,
             pg_stat_get_wal_senders() AS W
     WHERE S.usesysid = U.oid AND
-            S.procpid = W.procpid;
+            S.pid = W.pid;
 
 CREATE VIEW pg_stat_database AS
     SELECT
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index f858a6d..e84e21c 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -2781,7 +2781,7 @@ autovac_report_activity(autovac_table *tab)
 	/* Set statement_timestamp() to current time for pg_stat_activity */
 	SetCurrentStatementStartTimestamp();
 
-	pgstat_report_activity(activity);
+	pgstat_report_activity(STATE_RUNNING, activity);
 }
 
 /*
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 323d42b..3ab8dfe 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -2410,12 +2410,14 @@ pgstat_bestart(void)
 	beentry->st_procpid = MyProcPid;
 	beentry->st_proc_start_timestamp = proc_start_timestamp;
 	beentry->st_activity_start_timestamp = 0;
+	beentry->st_state_start_timestamp = 0;
 	beentry->st_xact_start_timestamp = 0;
 	beentry->st_databaseid = MyDatabaseId;
 	beentry->st_userid = userid;
 	beentry->st_clientaddr = clientaddr;
 	beentry->st_clienthostname[0] = '\0';
 	beentry->st_waiting = false;
+	beentry->st_state = STATE_UNDEFINED;
 	beentry->st_appname[0] = '\0';
 	beentry->st_activity[0] = '\0';
 	/* Also make sure the last byte in each string area is always 0 */
@@ -2476,39 +2478,70 @@ pgstat_beshutdown_hook(int code, Datum arg)
  *
  *	Called from tcop/postgres.c to report what the backend is actually doing
  *	(usually "<IDLE>" or the start of the query to be executed).
+ *
+ * All updates of the status entry follow the protocol of bumping
+ * st_changecount before and after.  We use a volatile pointer here to
+ * ensure the compiler doesn't try to get cute.
  * ----------
  */
 void
-pgstat_report_activity(const char *cmd_str)
+pgstat_report_activity(BackendState state, const char *cmd_str)
 {
 	volatile PgBackendStatus *beentry = MyBEEntry;
 	TimestampTz start_timestamp;
+	TimestampTz current_timestamp;
 	int			len;
 
 	TRACE_POSTGRESQL_STATEMENT_STATUS(cmd_str);
 
-	if (!pgstat_track_activities || !beentry)
+	if (!beentry)
 		return;
 
 	/*
 	 * To minimize the time spent modifying the entry, fetch all the needed
 	 * data first.
 	 */
-	start_timestamp = GetCurrentStatementStartTimestamp();
+	current_timestamp = GetCurrentTimestamp();
 
-	len = strlen(cmd_str);
-	len = pg_mbcliplen(cmd_str, len, pgstat_track_activity_query_size - 1);
+	if (!pgstat_track_activities && beentry->st_state != STATE_DISABLED)
+	{
+		/*
+		 * Track activities is disabled, but we have a non-disabled state set.
+		 * That means the status changed - so as our last update, tell the
+		 * collector that we disabled it and will no longer update.
+		 */
+		beentry->st_changecount++;
+		beentry->st_state = STATE_DISABLED;
+		beentry->st_state_start_timestamp = current_timestamp;
+		beentry->st_changecount++;
+		Assert((beentry->st_changecount & 1) == 0);
+		return;
+	}
 
 	/*
-	 * Update my status entry, following the protocol of bumping
-	 * st_changecount before and after.  We use a volatile pointer here to
-	 * ensure the compiler doesn't try to get cute.
+	 * Fetch more data before we start modifying the entry
+	 */
+	start_timestamp = GetCurrentStatementStartTimestamp();
+	if (cmd_str != NULL)
+	{
+		len = strlen(cmd_str);
+		len = pg_mbcliplen(cmd_str, len, pgstat_track_activity_query_size - 1);
+	}
+
+	/*
+	 * Now update the status entry
 	 */
 	beentry->st_changecount++;
 
-	beentry->st_activity_start_timestamp = start_timestamp;
-	memcpy((char *) beentry->st_activity, cmd_str, len);
-	beentry->st_activity[len] = '\0';
+	beentry->st_state = state;
+	beentry->st_state_start_timestamp = current_timestamp;
+
+	if (cmd_str != NULL)
+	{
+		memcpy((char *) beentry->st_activity, cmd_str, len);
+		beentry->st_activity[len] = '\0';
+		beentry->st_activity_start_timestamp = start_timestamp;
+	}
 
 	beentry->st_changecount++;
 	Assert((beentry->st_changecount & 1) == 0);
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 59a287f..49a3969 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -809,7 +809,7 @@ exec_simple_query(const char *query_string)
 	 */
 	debug_query_string = query_string;
 
-	pgstat_report_activity(query_string);
+	pgstat_report_activity(STATE_RUNNING, query_string);
 
 	TRACE_POSTGRESQL_QUERY_START(query_string);
 
@@ -1134,7 +1134,7 @@ exec_parse_message(const char *query_string,	/* string to execute */
 	 */
 	debug_query_string = query_string;
 
-	pgstat_report_activity(query_string);
+	pgstat_report_activity(STATE_RUNNING, query_string);
 
 	set_ps_display("PARSE", false);
 
@@ -1429,7 +1429,7 @@ exec_bind_message(StringInfo input_message)
 	 */
 	debug_query_string = psrc->query_string;
 
-	pgstat_report_activity(psrc->query_string);
+	pgstat_report_activity(STATE_RUNNING, psrc->query_string);
 
 	set_ps_display("BIND", false);
 
@@ -1836,7 +1836,7 @@ exec_execute_message(const char *portal_name, long max_rows)
 	 */
 	debug_query_string = sourceText;
 
-	pgstat_report_activity(sourceText);
+	pgstat_report_activity(STATE_RUNNING, sourceText);
 
 	set_ps_display(portal->commandTag, false);
 
@@ -3811,12 +3811,12 @@ PostgresMain(int argc, char *argv[], const char *username)
 			if (IsAbortedTransactionBlockState())
 			{
 				set_ps_display("idle in transaction (aborted)", false);
-				pgstat_report_activity("<IDLE> in transaction (aborted)");
+				pgstat_report_activity(STATE_IDLEINTRANSACTION_ABORTED, NULL);
 			}
 			else if (IsTransactionOrTransactionBlock())
 			{
 				set_ps_display("idle in transaction", false);
-				pgstat_report_activity("<IDLE> in transaction");
+				pgstat_report_activity(STATE_IDLEINTRANSACTION, NULL);
 			}
 			else
 			{
@@ -3824,7 +3824,7 @@ PostgresMain(int argc, char *argv[], const char *username)
 				pgstat_report_stat(false);
 
 				set_ps_display("idle", false);
-				pgstat_report_activity("<IDLE>");
+				pgstat_report_activity(STATE_IDLE, NULL);
 			}
 
 			ReadyForQuery(whereToSendOutput);
@@ -3944,7 +3944,7 @@ PostgresMain(int argc, char *argv[], const char *username)
 				SetCurrentStatementStartTimestamp();
 
 				/* Report query to various monitoring facilities. */
-				pgstat_report_activity("<FASTPATH> function call");
+				pgstat_report_activity(STATE_FASTPATH, NULL);
 				set_ps_display("<FASTPATH>", false);
 
 				/* start an xact for this function invocation */
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index b4986d8..ed39f27 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -507,31 +507,34 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 
 		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
 
-		tupdesc = CreateTemplateTupleDesc(12, false);
+		tupdesc = CreateTemplateTupleDesc(14, false);
 		TupleDescInitEntry(tupdesc, (AttrNumber) 1, "datid",
 						   OIDOID, -1, 0);
-		/* This should have been called 'pid';  can't change it. 2011-06-11 */
-		TupleDescInitEntry(tupdesc, (AttrNumber) 2, "procpid",
+		TupleDescInitEntry(tupdesc, (AttrNumber) 2, "pid",
 						   INT4OID, -1, 0);
 		TupleDescInitEntry(tupdesc, (AttrNumber) 3, "usesysid",
 						   OIDOID, -1, 0);
 		TupleDescInitEntry(tupdesc, (AttrNumber) 4, "application_name",
 						   TEXTOID, -1, 0);
-		TupleDescInitEntry(tupdesc, (AttrNumber) 5, "current_query",
+		TupleDescInitEntry(tupdesc, (AttrNumber) 5, "state",
 						   TEXTOID, -1, 0);
-		TupleDescInitEntry(tupdesc, (AttrNumber) 6, "waiting",
+		TupleDescInitEntry(tupdesc, (AttrNumber) 6, "query",
+						   TEXTOID, -1, 0);
+		TupleDescInitEntry(tupdesc, (AttrNumber) 7, "waiting",
 						   BOOLOID, -1, 0);
-		TupleDescInitEntry(tupdesc, (AttrNumber) 7, "act_start",
+		TupleDescInitEntry(tupdesc, (AttrNumber) 8, "act_start",
+						   TIMESTAMPTZOID, -1, 0);
+		TupleDescInitEntry(tupdesc, (AttrNumber) 9, "query_start",
 						   TIMESTAMPTZOID, -1, 0);
-		TupleDescInitEntry(tupdesc, (AttrNumber) 8, "query_start",
+		TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_start",
 						   TIMESTAMPTZOID, -1, 0);
-		TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_start",
+		TupleDescInitEntry(tupdesc, (AttrNumber) 11, "state_change",
 						   TIMESTAMPTZOID, -1, 0);
-		TupleDescInitEntry(tupdesc, (AttrNumber) 10, "client_addr",
+		TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
 						   INETOID, -1, 0);
-		TupleDescInitEntry(tupdesc, (AttrNumber) 11, "client_hostname",
+		TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
 						   TEXTOID, -1, 0);
-		TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_port",
+		TupleDescInitEntry(tupdesc, (AttrNumber) 14, "client_port",
 						   INT4OID, -1, 0);
 
 		funcctx->tuple_desc = BlessTupleDesc(tupdesc);
@@ -584,8 +587,8 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 	if (funcctx->call_cntr < funcctx->max_calls)
 	{
 		/* for each row */
-		Datum		values[12];
-		bool		nulls[12];
+		Datum		values[14];
+		bool		nulls[14];
 		HeapTuple	tuple;
 		PgBackendStatus *beentry;
 		SockAddr	zero_clientaddr;
@@ -610,8 +613,8 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 			for (i = 0; i < sizeof(nulls) / sizeof(nulls[0]); i++)
 				nulls[i] = true;
 
-			nulls[4] = false;
-			values[4] = CStringGetTextDatum("<backend information not available>");
+			nulls[5] = false;
+			values[5] = CStringGetTextDatum("<backend information not available>");
 
 			tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
 			SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
@@ -629,40 +632,69 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 		/* Values only available to same user or superuser */
 		if (superuser() || beentry->st_userid == GetUserId())
 		{
-			if (*(beentry->st_activity) == '\0')
+			switch (beentry->st_state)
+			{
+				case STATE_IDLE:
+					values[4] = CStringGetTextDatum("idle");
+					break;
+				case STATE_RUNNING:
+					values[4] = CStringGetTextDatum("active");
+					break;
+				case STATE_IDLEINTRANSACTION:
+					values[4] = CStringGetTextDatum("idle in transaction");
+					break;
+				case STATE_FASTPATH:
+					values[4] = CStringGetTextDatum("fastpath function call");
+					break;
+				case STATE_IDLEINTRANSACTION_ABORTED:
+					values[4] = CStringGetTextDatum("idle in transaction (aborted)");
+					break;
+				case STATE_DISABLED:
+					values[4] = CStringGetTextDatum("disabled");
+					break;
+				case STATE_UNDEFINED:
+					nulls[4] = true;
+					break;
+			}
+			if (beentry->st_state == STATE_UNDEFINED ||
+				beentry->st_state == STATE_DISABLED)
 			{
-				values[4] = CStringGetTextDatum("<command string not enabled>");
+				values[5] = CStringGetTextDatum("");
 			}
 			else
 			{
-				values[4] = CStringGetTextDatum(beentry->st_activity);
+				values[5] = CStringGetTextDatum(beentry->st_activity);
 			}
-
-			values[5] = BoolGetDatum(beentry->st_waiting);
+			values[6] = BoolGetDatum(beentry->st_waiting);
 
 			if (beentry->st_xact_start_timestamp != 0)
-				values[6] = TimestampTzGetDatum(beentry->st_xact_start_timestamp);
+				values[7] = TimestampTzGetDatum(beentry->st_xact_start_timestamp);
 			else
-				nulls[6] = true;
+				nulls[7] = true;
 
 			if (beentry->st_activity_start_timestamp != 0)
-				values[7] = TimestampTzGetDatum(beentry->st_activity_start_timestamp);
+				values[8] = TimestampTzGetDatum(beentry->st_activity_start_timestamp);
 			else
-				nulls[7] = true;
+				nulls[8] = true;
 
 			if (beentry->st_proc_start_timestamp != 0)
-				values[8] = TimestampTzGetDatum(beentry->st_proc_start_timestamp);
+				values[9] = TimestampTzGetDatum(beentry->st_proc_start_timestamp);
 			else
-				nulls[8] = true;
+				nulls[9] = true;
+
+			if (beentry->st_state_start_timestamp != 0)
+				values[10] = TimestampTzGetDatum(beentry->st_state_start_timestamp);
+			else
+				nulls[10] = true;
 
 			/* A zeroed client addr means we don't know */
 			memset(&zero_clientaddr, 0, sizeof(zero_clientaddr));
 			if (memcmp(&(beentry->st_clientaddr), &zero_clientaddr,
 					   sizeof(zero_clientaddr) == 0))
 			{
-				nulls[9] = true;
-				nulls[10] = true;
 				nulls[11] = true;
+				nulls[12] = true;
+				nulls[13] = true;
 			}
 			else
 			{
@@ -686,19 +718,19 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 					if (ret == 0)
 					{
 						clean_ipv6_addr(beentry->st_clientaddr.addr.ss_family, remote_host);
-						values[9] = DirectFunctionCall1(inet_in,
+						values[11] = DirectFunctionCall1(inet_in,
 											   CStringGetDatum(remote_host));
 						if (beentry->st_clienthostname)
-							values[10] = CStringGetTextDatum(beentry->st_clienthostname);
+							values[12] = CStringGetTextDatum(beentry->st_clienthostname);
 						else
-							nulls[10] = true;
-						values[11] = Int32GetDatum(atoi(remote_port));
+							nulls[12] = true;
+						values[13] = Int32GetDatum(atoi(remote_port));
 					}
 					else
 					{
-						nulls[9] = true;
-						nulls[10] = true;
 						nulls[11] = true;
+						nulls[12] = true;
+						nulls[13] = true;
 					}
 				}
 				else if (beentry->st_clientaddr.addr.ss_family == AF_UNIX)
@@ -709,30 +741,32 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 					 * connections we have no permissions to view, or with
 					 * errors.
 					 */
-					nulls[9] = true;
-					nulls[10] = true;
-					values[11] = DatumGetInt32(-1);
+					nulls[11] = true;
+					nulls[12] = true;
+					values[13] = DatumGetInt32(-1);
 				}
 				else
 				{
 					/* Unknown address type, should never happen */
-					nulls[9] = true;
-					nulls[10] = true;
 					nulls[11] = true;
+					nulls[12] = true;
+					nulls[13] = true;
 				}
 			}
 		}
 		else
 		{
 			/* No permissions to view data about this session */
-			values[4] = CStringGetTextDatum("<insufficient privilege>");
-			nulls[5] = true;
+			values[5] = CStringGetTextDatum("<insufficient privilege>");
+			nulls[4] = true;
 			nulls[6] = true;
 			nulls[7] = true;
 			nulls[8] = true;
 			nulls[9] = true;
 			nulls[10] = true;
 			nulls[11] = true;
+			nulls[12] = true;
+			nulls[13] = true;
 		}
 
 		tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index f7af5fd..9e799c6 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -53,6 +53,6 @@
  */
 
 /*							yyyymmddN */
-#define CATALOG_VERSION_NO	201112241
+#define CATALOG_VERSION_NO	201201191
 
 #endif
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 355c61a..9994468 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -2573,9 +2573,9 @@ DATA(insert OID = 3057 ( pg_stat_get_autoanalyze_count PGNSP PGUID 12 1 0 0 0 f
 DESCR("statistics: number of auto analyzes for a table");
 DATA(insert OID = 1936 (  pg_stat_get_backend_idset		PGNSP PGUID 12 1 100 0 0 f f f t t s 0 0 23 "" _null_ _null_ _null_ _null_ pg_stat_get_backend_idset _null_ _null_ _null_ ));
 DESCR("statistics: currently active backend IDs");
-DATA(insert OID = 2022 (  pg_stat_get_activity			PGNSP PGUID 12 1 100 0 0 f f f f t s 1 0 2249 "23" "{23,26,23,26,25,25,16,1184,1184,1184,869,25,23}" "{i,o,o,o,o,o,o,o,o,o,o,o,o}" "{pid,datid,procpid,usesysid,application_name,current_query,waiting,xact_start,query_start,backend_start,client_addr,client_hostname,client_port}" _null_ pg_stat_get_activity _null_ _null_ _null_ ));
+DATA(insert OID = 2022 (  pg_stat_get_activity			PGNSP PGUID 12 1 100 0 0 f f f f t s 1 0 2249 "23" "{23,26,23,26,25,25,25,16,1184,1184,1184,1184,869,25,23}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{pid,datid,pid,usesysid,application_name,state,query,waiting,xact_start,query_start,backend_start,state_change,client_addr,client_hostname,client_port}" _null_ pg_stat_get_activity _null_ _null_ _null_ ));
 DESCR("statistics: information about currently active backends");
-DATA(insert OID = 3099 (  pg_stat_get_wal_senders	PGNSP PGUID 12 1 10 0 0 f f f f t s 0 0 2249 "" "{23,25,25,25,25,25,23,25}" "{o,o,o,o,o,o,o,o}" "{procpid,state,sent_location,write_location,flush_location,replay_location,sync_priority,sync_state}" _null_ pg_stat_get_wal_senders _null_ _null_ _null_ ));
+DATA(insert OID = 3099 (  pg_stat_get_wal_senders	PGNSP PGUID 12 1 10 0 0 f f f f t s 0 0 2249 "" "{23,25,25,25,25,25,23,25}" "{o,o,o,o,o,o,o,o}" "{pid,state,sent_location,write_location,flush_location,replay_location,sync_priority,sync_state}" _null_ pg_stat_get_wal_senders _null_ _null_ _null_ ));
 DESCR("statistics: information about currently active replication");
 DATA(insert OID = 2026 (  pg_backend_pid				PGNSP PGUID 12 1 0 0 0 f f f t f s 0 0 23 "" _null_ _null_ _null_ _null_ pg_backend_pid _null_ _null_ _null_ ));
 DESCR("statistics: current backend PID");
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index b8c6d82..fa52447 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -589,10 +589,25 @@ typedef struct PgStat_GlobalStats
 
 
 /* ----------
+ * Backend states
+ * ----------
+ */
+typedef enum BackendState {
+	STATE_UNDEFINED,
+	STATE_IDLE,
+	STATE_RUNNING,
+	STATE_IDLEINTRANSACTION,
+	STATE_FASTPATH,
+	STATE_IDLEINTRANSACTION_ABORTED,
+	STATE_DISABLED,
+} BackendState;
+
+/* ----------
  * Shared-memory data structures
  * ----------
  */
 
+
 /* ----------
  * PgBackendStatus
  *
@@ -622,6 +637,7 @@ typedef struct PgBackendStatus
 	TimestampTz st_proc_start_timestamp;
 	TimestampTz st_xact_start_timestamp;
 	TimestampTz st_activity_start_timestamp;
+    TimestampTz st_state_start_timestamp;
 
 	/* Database OID, owning user's OID, connection client address */
 	Oid			st_databaseid;
@@ -632,6 +648,9 @@ typedef struct PgBackendStatus
 	/* Is backend currently waiting on an lmgr lock? */
 	bool		st_waiting;
 
+    /* current state */
+    BackendState	st_state;
+
 	/* application name; MUST be null-terminated */
 	char	   *st_appname;
 
@@ -715,7 +734,7 @@ extern void pgstat_report_recovery_conflict(int reason);
 extern void pgstat_initialize(void);
 extern void pgstat_bestart(void);
 
-extern void pgstat_report_activity(const char *cmd_str);
+extern void pgstat_report_activity(BackendState state, const char *cmd_str);
 extern void pgstat_report_appname(const char *appname);
 extern void pgstat_report_xact_timestamp(TimestampTz tstamp);
 extern void pgstat_report_waiting(bool waiting);
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index 454e1f9..d26881f 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -1292,13 +1292,13 @@ SELECT viewname, definition FROM pg_views WHERE schemaname <> 'information_schem
  pg_seclabels                    | ((((((((SELECT l.objoid, l.classoid, l.objsubid, CASE WHEN (rel.relkind = 'r'::"char") THEN 'table'::text WHEN (rel.relkind = 'v'::"char") THEN 'view'::text WHEN (rel.relkind = 'S'::"char") THEN 'sequence'::text WHEN (rel.relkind = 'f'::"char") THEN 'foreign table'::text ELSE NULL::text END AS objtype, rel.relnamespace AS objnamespace, CASE WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) END AS objname, l.provider, l.label FROM ((pg_seclabel l JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) WHERE (l.objsubid = 0) UNION ALL SELECT l.objoid, l.classoid, l.objsubid, 'column'::text AS objtype, rel.relnamespace AS objnamespace, ((CASE WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) END || '.'::text) || (att.attname)::text) AS objname, l.provider, l.label FROM (((pg_seclabel l JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) JOIN pg_attribute att ON (((rel.oid = att.attrelid) AND (l.objsubid = att.attnum)))) JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) WHERE (l.objsubid <> 0)) UNION ALL SELECT l.objoid, l.classoid, l.objsubid, CASE WHEN (pro.proisagg = true) THEN 'aggregate'::text WHEN (pro.proisagg = false) THEN 'function'::text ELSE NULL::text END AS objtype, pro.pronamespace AS objnamespace, (((CASE WHEN pg_function_is_visible(pro.oid) THEN quote_ident((pro.proname)::text) ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((pro.proname)::text)) END || '('::text) || pg_get_function_arguments(pro.oid)) || ')'::text) AS objname, l.provider, l.label FROM ((pg_seclabel l JOIN pg_proc pro ON (((l.classoid = pro.tableoid) AND (l.objoid = pro.oid)))) JOIN pg_namespace nsp ON ((pro.pronamespace = nsp.oid))) WHERE (l.objsubid = 0)) UNION ALL SELECT l.objoid, l.classoid, l.objsubid, CASE WHEN (typ.typtype = 'd'::"char") THEN 'domain'::text ELSE 'type'::text END AS objtype, typ.typnamespace AS objnamespace, CASE WHEN pg_type_is_visible(typ.oid) THEN quote_ident((typ.typname)::text) ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((typ.typname)::text)) END AS objname, l.provider, l.label FROM ((pg_seclabel l JOIN pg_type typ ON (((l.classoid = typ.tableoid) AND (l.objoid = typ.oid)))) JOIN pg_namespace nsp ON ((typ.typnamespace = nsp.oid))) WHERE (l.objsubid = 0)) UNION ALL SELECT l.objoid, l.classoid, l.objsubid, 'large object'::text AS objtype, NULL::oid AS objnamespace, (l.objoid)::text AS objname, l.provider, l.label FROM (pg_seclabel l JOIN pg_largeobject_metadata lom ON ((l.objoid = lom.oid))) WHERE ((l.classoid = ('pg_largeobject'::regclass)::oid) AND (l.objsubid = 0))) UNION ALL SELECT l.objoid, l.classoid, l.objsubid, 'language'::text AS objtype, NULL::oid AS objnamespace, quote_ident((lan.lanname)::text) AS objname, l.provider, l.label FROM (pg_seclabel l JOIN pg_language lan ON (((l.classoid = lan.tableoid) AND (l.objoid = lan.oid)))) WHERE (l.objsubid = 0)) UNION ALL SELECT l.objoid, l.classoid, l.objsubid, 'schema'::text AS objtype, nsp.oid AS objnamespace, quote_ident((nsp.nspname)::text) AS objname, l.provider, l.label FROM (pg_seclabel l JOIN pg_namespace nsp ON (((l.classoid = nsp.tableoid) AND (l.objoid = nsp.oid)))) WHERE (l.objsubid = 0)) UNION ALL SELECT l.objoid, l.classoid, 0 AS objsubid, 'database'::text AS objtype, NULL::oid AS objnamespace, quote_ident((dat.datname)::text) AS objname, l.provider, l.label FROM (pg_shseclabel l JOIN pg_database dat ON (((l.classoid = dat.tableoid) AND (l.objoid = dat.oid))))) UNION ALL SELECT l.objoid, l.classoid, 0 AS objsubid, 'tablespace'::text AS objtype, NULL::oid AS objnamespace, quote_ident((spc.spcname)::text) AS objname, l.provider, l.label FROM (pg_shseclabel l JOIN pg_tablespace spc ON (((l.classoid = spc.tableoid) AND (l.objoid = spc.oid))))) UNION ALL SELECT l.objoid, l.classoid, 0 AS objsubid, 'role'::text AS objtype, NULL::oid AS objnamespace, quote_ident((rol.rolname)::text) AS objname, l.provider, l.label FROM (pg_shseclabel l JOIN pg_authid rol ON (((l.classoid = rol.tableoid) AND (l.objoid = rol.oid))));
  pg_settings                     | SELECT a.name, a.setting, a.unit, a.category, a.short_desc, a.extra_desc, a.context, a.vartype, a.source, a.min_val, a.max_val, a.enumvals, a.boot_val, a.reset_val, a.sourcefile, a.sourceline FROM pg_show_all_settings() a(name, setting, unit, category, short_desc, extra_desc, context, vartype, source, min_val, max_val, enumvals, boot_val, reset_val, sourcefile, sourceline);
  pg_shadow                       | SELECT pg_authid.rolname AS usename, pg_authid.oid AS usesysid, pg_authid.rolcreatedb AS usecreatedb, pg_authid.rolsuper AS usesuper, pg_authid.rolcatupdate AS usecatupd, pg_authid.rolreplication AS userepl, pg_authid.rolpassword AS passwd, (pg_authid.rolvaliduntil)::abstime AS valuntil, s.setconfig AS useconfig FROM (pg_authid LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))) WHERE pg_authid.rolcanlogin;
- pg_stat_activity                | SELECT s.datid, d.datname, s.procpid, s.usesysid, u.rolname AS usename, s.application_name, s.client_addr, s.client_hostname, s.client_port, s.backend_start, s.xact_start, s.query_start, s.waiting, s.current_query FROM pg_database d, pg_stat_get_activity(NULL::integer) s(datid, procpid, usesysid, application_name, current_query, waiting, xact_start, query_start, backend_start, client_addr, client_hostname, client_port), pg_authid u WHERE ((s.datid = d.oid) AND (s.usesysid = u.oid));
+ pg_stat_activity                | SELECT s.datid, d.datname, s.pid, s.usesysid, u.rolname AS usename, s.application_name, s.client_addr, s.client_hostname, s.client_port, s.backend_start, s.xact_start, s.query_start, s.state_change, s.waiting, s.state, s.query FROM pg_database d, pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, waiting, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port), pg_authid u WHERE ((s.datid = d.oid) AND (s.usesysid = u.oid));
  pg_stat_all_indexes             | SELECT c.oid AS relid, i.oid AS indexrelid, n.nspname AS schemaname, c.relname, i.relname AS indexrelname, pg_stat_get_numscans(i.oid) AS idx_scan, pg_stat_get_tuples_returned(i.oid) AS idx_tup_read, pg_stat_get_tuples_fetched(i.oid) AS idx_tup_fetch FROM (((pg_class c JOIN pg_index x ON ((c.oid = x.indrelid))) JOIN pg_class i ON ((i.oid = x.indexrelid))) LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char"]));
  pg_stat_all_tables              | SELECT c.oid AS relid, n.nspname AS schemaname, c.relname, pg_stat_get_numscans(c.oid) AS seq_scan, pg_stat_get_tuples_returned(c.oid) AS seq_tup_read, (sum(pg_stat_get_numscans(i.indexrelid)))::bigint AS idx_scan, ((sum(pg_stat_get_tuples_fetched(i.indexrelid)))::bigint + pg_stat_get_tuples_fetched(c.oid)) AS idx_tup_fetch, pg_stat_get_tuples_inserted(c.oid) AS n_tup_ins, pg_stat_get_tuples_updated(c.oid) AS n_tup_upd, pg_stat_get_tuples_deleted(c.oid) AS n_tup_del, pg_stat_get_tuples_hot_updated(c.oid) AS n_tup_hot_upd, pg_stat_get_live_tuples(c.oid) AS n_live_tup, pg_stat_get_dead_tuples(c.oid) AS n_dead_tup, pg_stat_get_last_vacuum_time(c.oid) AS last_vacuum, pg_stat_get_last_autovacuum_time(c.oid) AS last_autovacuum, pg_stat_get_last_analyze_time(c.oid) AS last_analyze, pg_stat_get_last_autoanalyze_time(c.oid) AS last_autoanalyze, pg_stat_get_vacuum_count(c.oid) AS vacuum_count, pg_stat_get_autovacuum_count(c.oid) AS autovacuum_count, pg_stat_get_analyze_count(c.oid) AS analyze_count, pg_stat_get_autoanalyze_count(c.oid) AS autoanalyze_count FROM ((pg_class c LEFT JOIN pg_index i ON ((c.oid = i.indrelid))) LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char"])) GROUP BY c.oid, n.nspname, c.relname;
  pg_stat_bgwriter                | SELECT pg_stat_get_bgwriter_timed_checkpoints() AS checkpoints_timed, pg_stat_get_bgwriter_requested_checkpoints() AS checkpoints_req, pg_stat_get_bgwriter_buf_written_checkpoints() AS buffers_checkpoint, pg_stat_get_bgwriter_buf_written_clean() AS buffers_clean, pg_stat_get_bgwriter_maxwritten_clean() AS maxwritten_clean, pg_stat_get_buf_written_backend() AS buffers_backend, pg_stat_get_buf_fsync_backend() AS buffers_backend_fsync, pg_stat_get_buf_alloc() AS buffers_alloc, pg_stat_get_bgwriter_stat_reset_time() AS stats_reset;
  pg_stat_database                | SELECT d.oid AS datid, d.datname, pg_stat_get_db_numbackends(d.oid) AS numbackends, pg_stat_get_db_xact_commit(d.oid) AS xact_commit, pg_stat_get_db_xact_rollback(d.oid) AS xact_rollback, (pg_stat_get_db_blocks_fetched(d.oid) - pg_stat_get_db_blocks_hit(d.oid)) AS blks_read, pg_stat_get_db_blocks_hit(d.oid) AS blks_hit, pg_stat_get_db_tuples_returned(d.oid) AS tup_returned, pg_stat_get_db_tuples_fetched(d.oid) AS tup_fetched, pg_stat_get_db_tuples_inserted(d.oid) AS tup_inserted, pg_stat_get_db_tuples_updated(d.oid) AS tup_updated, pg_stat_get_db_tuples_deleted(d.oid) AS tup_deleted, pg_stat_get_db_conflict_all(d.oid) AS conflicts, pg_stat_get_db_stat_reset_time(d.oid) AS stats_reset FROM pg_database d;
  pg_stat_database_conflicts      | SELECT d.oid AS datid, d.datname, pg_stat_get_db_conflict_tablespace(d.oid) AS confl_tablespace, pg_stat_get_db_conflict_lock(d.oid) AS confl_lock, pg_stat_get_db_conflict_snapshot(d.oid) AS confl_snapshot, pg_stat_get_db_conflict_bufferpin(d.oid) AS confl_bufferpin, pg_stat_get_db_conflict_startup_deadlock(d.oid) AS confl_deadlock FROM pg_database d;
- pg_stat_replication             | SELECT s.procpid, s.usesysid, u.rolname AS usename, s.application_name, s.client_addr, s.client_hostname, s.client_port, s.backend_start, w.state, w.sent_location, w.write_location, w.flush_location, w.replay_location, w.sync_priority, w.sync_state FROM pg_stat_get_activity(NULL::integer) s(datid, procpid, usesysid, application_name, current_query, waiting, xact_start, query_start, backend_start, client_addr, client_hostname, client_port), pg_authid u, pg_stat_get_wal_senders() w(procpid, state, sent_location, write_location, flush_location, replay_location, sync_priority, sync_state) WHERE ((s.usesysid = u.oid) AND (s.procpid = w.procpid));
+ pg_stat_replication             | SELECT s.pid, s.usesysid, u.rolname AS usename, s.application_name, s.client_addr, s.client_hostname, s.client_port, s.backend_start, w.state, w.sent_location, w.write_location, w.flush_location, w.replay_location, w.sync_priority, w.sync_state FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, waiting, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port), pg_authid u, pg_stat_get_wal_senders() w(pid, state, sent_location, write_location, flush_location, replay_location, sync_priority, sync_state) WHERE ((s.usesysid = u.oid) AND (s.pid = w.pid));
  pg_stat_sys_indexes             | SELECT pg_stat_all_indexes.relid, pg_stat_all_indexes.indexrelid, pg_stat_all_indexes.schemaname, pg_stat_all_indexes.relname, pg_stat_all_indexes.indexrelname, pg_stat_all_indexes.idx_scan, pg_stat_all_indexes.idx_tup_read, pg_stat_all_indexes.idx_tup_fetch FROM pg_stat_all_indexes WHERE ((pg_stat_all_indexes.schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (pg_stat_all_indexes.schemaname ~ '^pg_toast'::text));
  pg_stat_sys_tables              | SELECT pg_stat_all_tables.relid, pg_stat_all_tables.schemaname, pg_stat_all_tables.relname, pg_stat_all_tables.seq_scan, pg_stat_all_tables.seq_tup_read, pg_stat_all_tables.idx_scan, pg_stat_all_tables.idx_tup_fetch, pg_stat_all_tables.n_tup_ins, pg_stat_all_tables.n_tup_upd, pg_stat_all_tables.n_tup_del, pg_stat_all_tables.n_tup_hot_upd, pg_stat_all_tables.n_live_tup, pg_stat_all_tables.n_dead_tup, pg_stat_all_tables.last_vacuum, pg_stat_all_tables.last_autovacuum, pg_stat_all_tables.last_analyze, pg_stat_all_tables.last_autoanalyze, pg_stat_all_tables.vacuum_count, pg_stat_all_tables.autovacuum_count, pg_stat_all_tables.analyze_count, pg_stat_all_tables.autoanalyze_count FROM pg_stat_all_tables WHERE ((pg_stat_all_tables.schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (pg_stat_all_tables.schemaname ~ '^pg_toast'::text));
  pg_stat_user_functions          | SELECT p.oid AS funcid, n.nspname AS schemaname, p.proname AS funcname, pg_stat_get_function_calls(p.oid) AS calls, (pg_stat_get_function_time(p.oid) / 1000) AS total_time, (pg_stat_get_function_self_time(p.oid) / 1000) AS self_time FROM (pg_proc p LEFT JOIN pg_namespace n ON ((n.oid = p.pronamespace))) WHERE ((p.prolang <> (12)::oid) AND (pg_stat_get_function_calls(p.oid) IS NOT NULL));

commit fa352d662e57fa150158b9cb0a8f127250f8c97f
Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date:   Thu Jan 19 13:06:30 2012 +0200

    Make pg_relation_size() and friends return NULL if the object doesn't exist.
    
    That avoids errors when the functions are used in queries like "SELECT
    pg_relation_size(oid) FROM pg_class", and a table is dropped concurrently.
    
    Phil Sorber

diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index 7d7aba7..ff9b8b0 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -14980,6 +14980,11 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup());
    </para>
 
    <para>
+    If an OID that does not represent an existing object is passed as
+    argument to one of the above functions, NULL is returned.
+   </para>
+
+   <para>
     The functions shown in <xref linkend="functions-admin-dblocation"> assist
     in identifying the specific disk files associated with database objects.
    </para>
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index cff061c..26a8c01 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -120,12 +120,6 @@ calculate_database_size(Oid dbOid)
 
 	FreeDir(dirdesc);
 
-	/* Complain if we found no trace of the DB at all */
-	if (!totalsize)
-		ereport(ERROR,
-				(ERRCODE_UNDEFINED_DATABASE,
-				 errmsg("database with OID %u does not exist", dbOid)));
-
 	return totalsize;
 }
 
@@ -133,8 +127,14 @@ Datum
 pg_database_size_oid(PG_FUNCTION_ARGS)
 {
 	Oid			dbOid = PG_GETARG_OID(0);
+	int64		size;
 
-	PG_RETURN_INT64(calculate_database_size(dbOid));
+	size = calculate_database_size(dbOid);
+
+	if (size == 0)
+		PG_RETURN_NULL();
+
+	PG_RETURN_INT64(size);
 }
 
 Datum
@@ -142,13 +142,20 @@ pg_database_size_name(PG_FUNCTION_ARGS)
 {
 	Name		dbName = PG_GETARG_NAME(0);
 	Oid			dbOid = get_database_oid(NameStr(*dbName), false);
+	int64		size;
+
+	size = calculate_database_size(dbOid);
 
-	PG_RETURN_INT64(calculate_database_size(dbOid));
+	if (size == 0)
+		PG_RETURN_NULL();
+
+	PG_RETURN_INT64(size);
 }
 
 
 /*
- * calculate total size of tablespace
+ * Calculate total size of tablespace. Returns -1 if the tablespace directory
+ * cannot be found.
  */
 static int64
 calculate_tablespace_size(Oid tblspcOid)
@@ -184,10 +191,7 @@ calculate_tablespace_size(Oid tblspcOid)
 	dirdesc = AllocateDir(tblspcPath);
 
 	if (!dirdesc)
-		ereport(ERROR,
-				(errcode_for_file_access(),
-				 errmsg("could not open tablespace directory \"%s\": %m",
-						tblspcPath)));
+		return -1;
 
 	while ((direntry = ReadDir(dirdesc, tblspcPath)) != NULL)
 	{
@@ -226,8 +230,14 @@ Datum
 pg_tablespace_size_oid(PG_FUNCTION_ARGS)
 {
 	Oid			tblspcOid = PG_GETARG_OID(0);
+	int64		size;
+
+	size = calculate_tablespace_size(tblspcOid);
 
-	PG_RETURN_INT64(calculate_tablespace_size(tblspcOid));
+	if (size < 0)
+		PG_RETURN_NULL();
+
+	PG_RETURN_INT64(size);
 }
 
 Datum
@@ -235,8 +245,14 @@ pg_tablespace_size_name(PG_FUNCTION_ARGS)
 {
 	Name		tblspcName = PG_GETARG_NAME(0);
 	Oid			tblspcOid = get_tablespace_oid(NameStr(*tblspcName), false);
+	int64		size;
 
-	PG_RETURN_INT64(calculate_tablespace_size(tblspcOid));
+	size = calculate_tablespace_size(tblspcOid);
+
+	if (size < 0)
+		PG_RETURN_NULL();
+
+	PG_RETURN_INT64(size);
 }
 
 
@@ -289,7 +305,17 @@ pg_relation_size(PG_FUNCTION_ARGS)
 	Relation	rel;
 	int64		size;
 
-	rel = relation_open(relOid, AccessShareLock);
+	rel = try_relation_open(relOid, AccessShareLock);
+
+	/*
+	 * Before 9.2, we used to throw an error if the relation didn't exist, but
+	 * that makes queries like "SELECT pg_relation_size(oid) FROM pg_class"
+	 * less robust, because while we scan pg_class with an MVCC snapshot,
+	 * someone else might drop the table. It's better to return NULL for
+	 * alread-dropped tables than throw an error and abort the whole query.
+	 */
+	if (rel == NULL)
+		PG_RETURN_NULL();
 
 	size = calculate_relation_size(&(rel->rd_node), rel->rd_backend,
 							  forkname_to_number(text_to_cstring(forkName)));
@@ -339,14 +365,11 @@ calculate_toast_table_size(Oid toastrelid)
  * those won't have attached toast tables, but they can have multiple forks.
  */
 static int64
-calculate_table_size(Oid relOid)
+calculate_table_size(Relation rel)
 {
 	int64		size = 0;
-	Relation	rel;
 	ForkNumber	forkNum;
 
-	rel = relation_open(relOid, AccessShareLock);
-
 	/*
 	 * heap size, including FSM and VM
 	 */
@@ -360,8 +383,6 @@ calculate_table_size(Oid relOid)
 	if (OidIsValid(rel->rd_rel->reltoastrelid))
 		size += calculate_toast_table_size(rel->rd_rel->reltoastrelid);
 
-	relation_close(rel, AccessShareLock);
-
 	return size;
 }
 
@@ -371,12 +392,9 @@ calculate_table_size(Oid relOid)
  * Can be applied safely to an index, but you'll just get zero.
  */
 static int64
-calculate_indexes_size(Oid relOid)
+calculate_indexes_size(Relation rel)
 {
 	int64		size = 0;
-	Relation	rel;
-
-	rel = relation_open(relOid, AccessShareLock);
 
 	/*
 	 * Aggregate all indexes on the given relation
@@ -405,8 +423,6 @@ calculate_indexes_size(Oid relOid)
 		list_free(index_oids);
 	}
 
-	relation_close(rel, AccessShareLock);
-
 	return size;
 }
 
@@ -414,16 +430,38 @@ Datum
 pg_table_size(PG_FUNCTION_ARGS)
 {
 	Oid			relOid = PG_GETARG_OID(0);
+	Relation	rel;
+	int64		size;
+
+	rel = try_relation_open(relOid, AccessShareLock);
+
+	if (rel == NULL)
+		PG_RETURN_NULL();
 
-	PG_RETURN_INT64(calculate_table_size(relOid));
+	size = calculate_table_size(rel);
+
+	relation_close(rel, AccessShareLock);
+
+	PG_RETURN_INT64(size);
 }
 
 Datum
 pg_indexes_size(PG_FUNCTION_ARGS)
 {
 	Oid			relOid = PG_GETARG_OID(0);
+	Relation	rel;
+	int64		size;
 
-	PG_RETURN_INT64(calculate_indexes_size(relOid));
+	rel = try_relation_open(relOid, AccessShareLock);
+
+	if (rel == NULL)
+		PG_RETURN_NULL();
+
+	size = calculate_indexes_size(rel);
+
+	relation_close(rel, AccessShareLock);
+
+	PG_RETURN_INT64(size);
 }
 
 /*
@@ -431,7 +469,7 @@ pg_indexes_size(PG_FUNCTION_ARGS)
  *	including heap data, index data, toast data, FSM, VM.
  */
 static int64
-calculate_total_relation_size(Oid Relid)
+calculate_total_relation_size(Relation rel)
 {
 	int64		size;
 
@@ -439,12 +477,12 @@ calculate_total_relation_size(Oid Relid)
 	 * Aggregate the table size, this includes size of the heap, toast and
 	 * toast index with free space and visibility map
 	 */
-	size = calculate_table_size(Relid);
+	size = calculate_table_size(rel);
 
 	/*
 	 * Add size of all attached indexes as well
 	 */
-	size += calculate_indexes_size(Relid);
+	size += calculate_indexes_size(rel);
 
 	return size;
 }
@@ -452,9 +490,20 @@ calculate_total_relation_size(Oid Relid)
 Datum
 pg_total_relation_size(PG_FUNCTION_ARGS)
 {
-	Oid			relid = PG_GETARG_OID(0);
+	Oid			relOid = PG_GETARG_OID(0);
+	Relation	rel;
+	int64		size;
+
+	rel = try_relation_open(relOid, AccessShareLock);
+
+	if (rel == NULL)
+		PG_RETURN_NULL();
 
-	PG_RETURN_INT64(calculate_total_relation_size(relid));
+	size = calculate_total_relation_size(rel);
+
+	relation_close(rel, AccessShareLock);
+
+	PG_RETURN_INT64(size);
 }
 
 /*

commit 6f6b46c9c0ca3d96acbebc5499c32ee6369e1eec
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Wed Jan 18 21:02:09 2012 +0200

    PL/Python: Update example
    
    Change the usesavedplan() example to use a more modern Python style
    using the .setdefault() function.

diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml
index 618f8d0..5a3c8ca 100644
--- a/doc/src/sgml/plpython.sgml
+++ b/doc/src/sgml/plpython.sgml
@@ -955,11 +955,7 @@ rv = plpy.execute(plan, [ "name" ], 5)
    <xref linkend="plpython-sharing">). For example:
 <programlisting>
 CREATE FUNCTION usesavedplan() RETURNS trigger AS $$
-    if SD.has_key("plan"):
-        plan = SD["plan"]
-    else:
-        plan = plpy.prepare("SELECT 1")
-        SD["plan"] = plan
+    plan = SD.setdefault("plan", plpy.prepare("SELECT 1"))
     # rest of function
 $$ LANGUAGE plpythonu;
 </programlisting>

commit 326b922e8b2d65257a635b5f80e5de0f15dffd3a
Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date:   Wed Jan 18 17:09:44 2012 +0200

    Fix corner case in cleanup of transactions using SSI.
    
    When the only remaining active transactions are READ ONLY, we do a "partial
    cleanup" of committed transactions because certain types of conflicts
    aren't possible anymore. For committed r/w transactions, we release the
    SIREAD locks but keep the SERIALIZABLEXACT. However, for committed r/o
    transactions, we can go further and release the SERIALIZABLEXACT too. The
    problem was with the latter case: we were returning the SERIALIZABLEXACT to
    the free list without removing it from the finished list.
    
    The only real change in the patch is the SHMQueueDelete line, but I also
    reworked some of the surrounding code to make it obvious that r/o and r/w
    transactions are handled differently -- the existing code felt a bit too
    clever.
    
    Dan Ports

diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 821328b..9e927f8 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -3528,10 +3528,29 @@ ClearOldPredicateLocks(void)
 		else if (finishedSxact->commitSeqNo > PredXact->HavePartialClearedThrough
 		   && finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
 		{
+			/*
+			 * Any active transactions that took their snapshot before this
+			 * transaction committed are read-only, so we can clear part of
+			 * its state.
+			 */
 			LWLockRelease(SerializableXactHashLock);
-			ReleaseOneSerializableXact(finishedSxact,
-									   !SxactIsReadOnly(finishedSxact),
-									   false);
+
+			if (SxactIsReadOnly(finishedSxact))
+			{
+				/* A read-only transaction can be removed entirely */
+				SHMQueueDelete(&(finishedSxact->finishedLink));
+				ReleaseOneSerializableXact(finishedSxact, false, false);
+			}
+			else
+			{
+				/*
+				 * A read-write transaction can only be partially
+				 * cleared. We need to keep the SERIALIZABLEXACT but
+				 * can release the SIREAD locks and conflicts in.
+				 */
+				ReleaseOneSerializableXact(finishedSxact, true, false);
+			}
+
 			PredXact->HavePartialClearedThrough = finishedSxact->commitSeqNo;
 			LWLockAcquire(SerializableXactHashLock, LW_SHARED);
 		}
@@ -3637,6 +3656,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
 
 	Assert(sxact != NULL);
 	Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact));
+	Assert(partial || !SxactIsOnFinishedList(sxact));
 	Assert(LWLockHeldByMe(SerializableFinishedListLock));
 
 	/*

commit 2106c55ac8dacc52bf6483925529fd3ab99b94c4
Author: Magnus Hagander <magnus@hagander.net>
Date:   Wed Jan 18 10:32:54 2012 +0100

    Show psql timing output for failed queries as well as successful ones
    
    This is useful for example when a long-runing statement such as CREATE
    INDEX fails after a long time.

diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 889c157..29389d0 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -942,7 +942,7 @@ SendQuery(const char *query)
 	PQclear(results);
 
 	/* Possible microtiming output */
-	if (OK && pset.timing)
+	if (pset.timing)
 		printf(_("Time: %.3f ms\n"), elapsed_msec);
 
 	/* check for events that may occur during query execution */

commit ae137bcaab7dd0b1ee58020d93ce8f07e36d4d49
Author: Magnus Hagander <magnus@hagander.net>
Date:   Wed Jan 18 10:22:54 2012 +0100

    Fix warning about unused variable

diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index c3520ae..f895488 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -102,7 +102,6 @@ DefineVirtualRelation(RangeVar *relation, List *tlist, bool replace,
 					  List *options)
 {
 	Oid			viewOid;
-	Oid			namespaceId;
 	LOCKMODE	lockmode;
 	CreateStmt *createStmt = makeNode(CreateStmt);
 	List	   *attrList;
@@ -167,8 +166,7 @@ DefineVirtualRelation(RangeVar *relation, List *tlist, bool replace,
 	 * namespace is temporary.
 	 */
 	lockmode = replace ? AccessExclusiveLock : NoLock;
-	namespaceId =
-		RangeVarGetAndCheckCreationNamespace(relation, lockmode, &viewOid);
+	(void) RangeVarGetAndCheckCreationNamespace(relation, lockmode, &viewOid);
 
 	if (OidIsValid(viewOid) && replace)
 	{

commit 504f0c5d5d2955c05458e1a8d5f4fbba4cac07cd
Author: Robert Haas <rhaas@postgresql.org>
Date:   Tue Jan 17 22:07:24 2012 -0500

    Regression tests for security_barrier views.
    
    KaiGai Kohei

diff --git a/src/test/regress/expected/select_views.out b/src/test/regress/expected/select_views.out
index 6cd317c..89dfcb1 100644
--- a/src/test/regress/expected/select_views.out
+++ b/src/test/regress/expected/select_views.out
@@ -1247,3 +1247,258 @@ SELECT * FROM toyemp WHERE name = 'sharon';
  sharon |  25 | (15,12)  |     12000
 (1 row)
 
+--
+-- Test for Leaky view scenario
+--
+CREATE USER alice;
+CREATE FUNCTION f_leak (text)
+       RETURNS bool LANGUAGE 'plpgsql' COST 0.0000001
+       AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END';
+CREATE TABLE customer (
+       cid      int primary key,
+       name     text not null,
+       tel      text,
+       passwd	text
+);
+NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "customer_pkey" for table "customer"
+CREATE TABLE credit_card (
+       cid      int references customer(cid),
+       cnum     text,
+       climit   int
+);
+CREATE TABLE credit_usage (
+       cid      int references customer(cid),
+       ymd      date,
+       usage    int
+);
+INSERT INTO customer
+       VALUES (101, 'alice', '+81-12-3456-7890', 'passwd123'),
+              (102, 'bob',   '+01-234-567-8901', 'beafsteak'),
+              (103, 'eve',   '+49-8765-43210',   'hamburger');
+INSERT INTO credit_card
+       VALUES (101, '1111-2222-3333-4444', 4000),
+              (102, '5555-6666-7777-8888', 3000),
+              (103, '9801-2345-6789-0123', 2000);
+INSERT INTO credit_usage
+       VALUES (101, '2011-09-15', 120),
+       	      (101, '2011-10-05',  90),
+	      (101, '2011-10-18', 110),
+	      (101, '2011-10-21', 200),
+	      (101, '2011-11-10',  80),
+	      (102, '2011-09-22', 300),
+	      (102, '2011-10-12', 120),
+	      (102, '2011-10-28', 200),
+	      (103, '2011-10-15', 480);
+CREATE VIEW my_property_normal AS
+       SELECT * FROM customer WHERE name = current_user;
+CREATE VIEW my_property_secure WITH (security_barrier) AS
+       SELECT * FROM customer WHERE name = current_user;
+CREATE VIEW my_credit_card_normal AS
+       SELECT * FROM customer l NATURAL JOIN credit_card r
+       WHERE l.name = current_user;
+CREATE VIEW my_credit_card_secure WITH (security_barrier) AS
+       SELECT * FROM customer l NATURAL JOIN credit_card r
+       WHERE l.name = current_user;
+CREATE VIEW my_credit_card_usage_normal AS
+       SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r;
+CREATE VIEW my_credit_card_usage_secure WITH (security_barrier) AS
+       SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r;
+GRANT SELECT ON my_property_normal TO public;
+GRANT SELECT ON my_property_secure TO public;
+GRANT SELECT ON my_credit_card_normal TO public;
+GRANT SELECT ON my_credit_card_secure TO public;
+GRANT SELECT ON my_credit_card_usage_normal TO public;
+GRANT SELECT ON my_credit_card_usage_secure TO public;
+--
+-- Run leaky view scenarios
+--
+SET SESSION AUTHORIZATION alice;
+--
+-- scenario: if a qualifier with tiny-cost is given, it shall be launched
+--           prior to the security policy of the view.
+--
+SELECT * FROM my_property_normal WHERE f_leak(passwd);
+NOTICE:  f_leak => passwd123
+NOTICE:  f_leak => beafsteak
+NOTICE:  f_leak => hamburger
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal WHERE f_leak(passwd);
+                            QUERY PLAN                            
+------------------------------------------------------------------
+ Seq Scan on customer
+   Filter: (f_leak(passwd) AND (name = ("current_user"())::text))
+(2 rows)
+
+SELECT * FROM my_property_secure WHERE f_leak(passwd);
+NOTICE:  f_leak => passwd123
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure WHERE f_leak(passwd);
+                    QUERY PLAN                     
+---------------------------------------------------
+ Subquery Scan on my_property_secure
+   Filter: f_leak(my_property_secure.passwd)
+   ->  Seq Scan on customer
+         Filter: (name = ("current_user"())::text)
+(4 rows)
+
+--
+-- scenario: if a qualifier references only one-side of a particular join-
+--           tree, it shall be distributed to the most deep scan plan as
+--           possible as we can.
+--
+SELECT * FROM my_credit_card_normal WHERE f_leak(cnum);
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 5555-6666-7777-8888
+NOTICE:  f_leak => 9801-2345-6789-0123
+ cid | name  |       tel        |  passwd   |        cnum         | climit 
+-----+-------+------------------+-----------+---------------------+--------
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_normal WHERE f_leak(cnum);
+                       QUERY PLAN                        
+---------------------------------------------------------
+ Hash Join
+   Hash Cond: (r.cid = l.cid)
+   ->  Seq Scan on credit_card r
+         Filter: f_leak(cnum)
+   ->  Hash
+         ->  Seq Scan on customer l
+               Filter: (name = ("current_user"())::text)
+(7 rows)
+
+SELECT * FROM my_credit_card_secure WHERE f_leak(cnum);
+NOTICE:  f_leak => 1111-2222-3333-4444
+ cid | name  |       tel        |  passwd   |        cnum         | climit 
+-----+-------+------------------+-----------+---------------------+--------
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum);
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Subquery Scan on my_credit_card_secure
+   Filter: f_leak(my_credit_card_secure.cnum)
+   ->  Hash Join
+         Hash Cond: (r.cid = l.cid)
+         ->  Seq Scan on credit_card r
+         ->  Hash
+               ->  Seq Scan on customer l
+                     Filter: (name = ("current_user"())::text)
+(8 rows)
+
+--
+-- scenario: an external qualifier can be pushed-down by in-front-of the
+--           views with "security_barrier" attribute
+--
+SELECT * FROM my_credit_card_usage_normal
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+NOTICE:  f_leak => 1111-2222-3333-4444
+ cid | name  |       tel        |  passwd   |        cnum         | climit |    ymd     | usage 
+-----+-------+------------------+-----------+---------------------+--------+------------+-------
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-05-2011 |    90
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-18-2011 |   110
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-21-2011 |   200
+(3 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_normal
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+                                  QUERY PLAN                                  
+------------------------------------------------------------------------------
+ Nested Loop
+   Join Filter: (l.cid = r.cid)
+   ->  Seq Scan on credit_usage r
+         Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date))
+   ->  Materialize
+         ->  Subquery Scan on l
+               Filter: f_leak(l.cnum)
+               ->  Hash Join
+                     Hash Cond: (r.cid = l.cid)
+                     ->  Seq Scan on credit_card r
+                     ->  Hash
+                           ->  Seq Scan on customer l
+                                 Filter: (name = ("current_user"())::text)
+(13 rows)
+
+SELECT * FROM my_credit_card_usage_secure
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 1111-2222-3333-4444
+ cid | name  |       tel        |  passwd   |        cnum         | climit |    ymd     | usage 
+-----+-------+------------------+-----------+---------------------+--------+------------+-------
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-05-2011 |    90
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-18-2011 |   110
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-21-2011 |   200
+(3 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_secure
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+                                                                                 QUERY PLAN                                                                                  
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Subquery Scan on my_credit_card_usage_secure
+   Filter: (f_leak(my_credit_card_usage_secure.cnum) AND (my_credit_card_usage_secure.ymd >= '10-01-2011'::date) AND (my_credit_card_usage_secure.ymd < '11-01-2011'::date))
+   ->  Hash Join
+         Hash Cond: (r.cid = l.cid)
+         ->  Seq Scan on credit_usage r
+         ->  Hash
+               ->  Hash Join
+                     Hash Cond: (r.cid = l.cid)
+                     ->  Seq Scan on credit_card r
+                     ->  Hash
+                           ->  Seq Scan on customer l
+                                 Filter: (name = ("current_user"())::text)
+(12 rows)
+
+--
+-- Test for the case when security_barrier gets changed between rewriter
+-- and planner stage.
+--
+PREPARE p1 AS SELECT * FROM my_property_normal WHERE f_leak(passwd);
+PREPARE p2 AS SELECT * FROM my_property_secure WHERE f_leak(passwd);
+EXECUTE p1;
+NOTICE:  f_leak => passwd123
+NOTICE:  f_leak => beafsteak
+NOTICE:  f_leak => hamburger
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+EXECUTE p2;
+NOTICE:  f_leak => passwd123
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+RESET SESSION AUTHORIZATION;
+ALTER VIEW my_property_normal SET (security_barrier=true);
+ALTER VIEW my_property_secure SET (security_barrier=false);
+SET SESSION AUTHORIZATION alice;
+EXECUTE p1;		-- To be perform as a view with security-barrier
+NOTICE:  f_leak => passwd123
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+EXECUTE p2;		-- To be perform as a view without security-barrier
+NOTICE:  f_leak => passwd123
+NOTICE:  f_leak => beafsteak
+NOTICE:  f_leak => hamburger
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
diff --git a/src/test/regress/expected/select_views_1.out b/src/test/regress/expected/select_views_1.out
index 9a972cf..c6f75af 100644
--- a/src/test/regress/expected/select_views_1.out
+++ b/src/test/regress/expected/select_views_1.out
@@ -1247,3 +1247,258 @@ SELECT * FROM toyemp WHERE name = 'sharon';
  sharon |  25 | (15,12)  |     12000
 (1 row)
 
+--
+-- Test for Leaky view scenario
+--
+CREATE USER alice;
+CREATE FUNCTION f_leak (text)
+       RETURNS bool LANGUAGE 'plpgsql' COST 0.0000001
+       AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END';
+CREATE TABLE customer (
+       cid      int primary key,
+       name     text not null,
+       tel      text,
+       passwd	text
+);
+NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "customer_pkey" for table "customer"
+CREATE TABLE credit_card (
+       cid      int references customer(cid),
+       cnum     text,
+       climit   int
+);
+CREATE TABLE credit_usage (
+       cid      int references customer(cid),
+       ymd      date,
+       usage    int
+);
+INSERT INTO customer
+       VALUES (101, 'alice', '+81-12-3456-7890', 'passwd123'),
+              (102, 'bob',   '+01-234-567-8901', 'beafsteak'),
+              (103, 'eve',   '+49-8765-43210',   'hamburger');
+INSERT INTO credit_card
+       VALUES (101, '1111-2222-3333-4444', 4000),
+              (102, '5555-6666-7777-8888', 3000),
+              (103, '9801-2345-6789-0123', 2000);
+INSERT INTO credit_usage
+       VALUES (101, '2011-09-15', 120),
+       	      (101, '2011-10-05',  90),
+	      (101, '2011-10-18', 110),
+	      (101, '2011-10-21', 200),
+	      (101, '2011-11-10',  80),
+	      (102, '2011-09-22', 300),
+	      (102, '2011-10-12', 120),
+	      (102, '2011-10-28', 200),
+	      (103, '2011-10-15', 480);
+CREATE VIEW my_property_normal AS
+       SELECT * FROM customer WHERE name = current_user;
+CREATE VIEW my_property_secure WITH (security_barrier) AS
+       SELECT * FROM customer WHERE name = current_user;
+CREATE VIEW my_credit_card_normal AS
+       SELECT * FROM customer l NATURAL JOIN credit_card r
+       WHERE l.name = current_user;
+CREATE VIEW my_credit_card_secure WITH (security_barrier) AS
+       SELECT * FROM customer l NATURAL JOIN credit_card r
+       WHERE l.name = current_user;
+CREATE VIEW my_credit_card_usage_normal AS
+       SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r;
+CREATE VIEW my_credit_card_usage_secure WITH (security_barrier) AS
+       SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r;
+GRANT SELECT ON my_property_normal TO public;
+GRANT SELECT ON my_property_secure TO public;
+GRANT SELECT ON my_credit_card_normal TO public;
+GRANT SELECT ON my_credit_card_secure TO public;
+GRANT SELECT ON my_credit_card_usage_normal TO public;
+GRANT SELECT ON my_credit_card_usage_secure TO public;
+--
+-- Run leaky view scenarios
+--
+SET SESSION AUTHORIZATION alice;
+--
+-- scenario: if a qualifier with tiny-cost is given, it shall be launched
+--           prior to the security policy of the view.
+--
+SELECT * FROM my_property_normal WHERE f_leak(passwd);
+NOTICE:  f_leak => passwd123
+NOTICE:  f_leak => beafsteak
+NOTICE:  f_leak => hamburger
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal WHERE f_leak(passwd);
+                            QUERY PLAN                            
+------------------------------------------------------------------
+ Seq Scan on customer
+   Filter: (f_leak(passwd) AND (name = ("current_user"())::text))
+(2 rows)
+
+SELECT * FROM my_property_secure WHERE f_leak(passwd);
+NOTICE:  f_leak => passwd123
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure WHERE f_leak(passwd);
+                    QUERY PLAN                     
+---------------------------------------------------
+ Subquery Scan on my_property_secure
+   Filter: f_leak(my_property_secure.passwd)
+   ->  Seq Scan on customer
+         Filter: (name = ("current_user"())::text)
+(4 rows)
+
+--
+-- scenario: if a qualifier references only one-side of a particular join-
+--           tree, it shall be distributed to the most deep scan plan as
+--           possible as we can.
+--
+SELECT * FROM my_credit_card_normal WHERE f_leak(cnum);
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 5555-6666-7777-8888
+NOTICE:  f_leak => 9801-2345-6789-0123
+ cid | name  |       tel        |  passwd   |        cnum         | climit 
+-----+-------+------------------+-----------+---------------------+--------
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_normal WHERE f_leak(cnum);
+                       QUERY PLAN                        
+---------------------------------------------------------
+ Hash Join
+   Hash Cond: (r.cid = l.cid)
+   ->  Seq Scan on credit_card r
+         Filter: f_leak(cnum)
+   ->  Hash
+         ->  Seq Scan on customer l
+               Filter: (name = ("current_user"())::text)
+(7 rows)
+
+SELECT * FROM my_credit_card_secure WHERE f_leak(cnum);
+NOTICE:  f_leak => 1111-2222-3333-4444
+ cid | name  |       tel        |  passwd   |        cnum         | climit 
+-----+-------+------------------+-----------+---------------------+--------
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum);
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Subquery Scan on my_credit_card_secure
+   Filter: f_leak(my_credit_card_secure.cnum)
+   ->  Hash Join
+         Hash Cond: (r.cid = l.cid)
+         ->  Seq Scan on credit_card r
+         ->  Hash
+               ->  Seq Scan on customer l
+                     Filter: (name = ("current_user"())::text)
+(8 rows)
+
+--
+-- scenario: an external qualifier can be pushed-down by in-front-of the
+--           views with "security_barrier" attribute
+--
+SELECT * FROM my_credit_card_usage_normal
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+NOTICE:  f_leak => 1111-2222-3333-4444
+ cid | name  |       tel        |  passwd   |        cnum         | climit |    ymd     | usage 
+-----+-------+------------------+-----------+---------------------+--------+------------+-------
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-05-2011 |    90
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-18-2011 |   110
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-21-2011 |   200
+(3 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_normal
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+                                  QUERY PLAN                                  
+------------------------------------------------------------------------------
+ Nested Loop
+   Join Filter: (l.cid = r.cid)
+   ->  Seq Scan on credit_usage r
+         Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date))
+   ->  Materialize
+         ->  Subquery Scan on l
+               Filter: f_leak(l.cnum)
+               ->  Hash Join
+                     Hash Cond: (r.cid = l.cid)
+                     ->  Seq Scan on credit_card r
+                     ->  Hash
+                           ->  Seq Scan on customer l
+                                 Filter: (name = ("current_user"())::text)
+(13 rows)
+
+SELECT * FROM my_credit_card_usage_secure
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 1111-2222-3333-4444
+NOTICE:  f_leak => 1111-2222-3333-4444
+ cid | name  |       tel        |  passwd   |        cnum         | climit |    ymd     | usage 
+-----+-------+------------------+-----------+---------------------+--------+------------+-------
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-05-2011 |    90
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-18-2011 |   110
+ 101 | alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 |   4000 | 10-21-2011 |   200
+(3 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_secure
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+                                                                                 QUERY PLAN                                                                                  
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Subquery Scan on my_credit_card_usage_secure
+   Filter: (f_leak(my_credit_card_usage_secure.cnum) AND (my_credit_card_usage_secure.ymd >= '10-01-2011'::date) AND (my_credit_card_usage_secure.ymd < '11-01-2011'::date))
+   ->  Hash Join
+         Hash Cond: (r.cid = l.cid)
+         ->  Seq Scan on credit_usage r
+         ->  Hash
+               ->  Hash Join
+                     Hash Cond: (r.cid = l.cid)
+                     ->  Seq Scan on credit_card r
+                     ->  Hash
+                           ->  Seq Scan on customer l
+                                 Filter: (name = ("current_user"())::text)
+(12 rows)
+
+--
+-- Test for the case when security_barrier gets changed between rewriter
+-- and planner stage.
+--
+PREPARE p1 AS SELECT * FROM my_property_normal WHERE f_leak(passwd);
+PREPARE p2 AS SELECT * FROM my_property_secure WHERE f_leak(passwd);
+EXECUTE p1;
+NOTICE:  f_leak => passwd123
+NOTICE:  f_leak => beafsteak
+NOTICE:  f_leak => hamburger
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+EXECUTE p2;
+NOTICE:  f_leak => passwd123
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+RESET SESSION AUTHORIZATION;
+ALTER VIEW my_property_normal SET (security_barrier=true);
+ALTER VIEW my_property_secure SET (security_barrier=false);
+SET SESSION AUTHORIZATION alice;
+EXECUTE p1;		-- To be perform as a view with security-barrier
+NOTICE:  f_leak => passwd123
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
+EXECUTE p2;		-- To be perform as a view without security-barrier
+NOTICE:  f_leak => passwd123
+NOTICE:  f_leak => beafsteak
+NOTICE:  f_leak => hamburger
+ cid | name  |       tel        |  passwd   
+-----+-------+------------------+-----------
+ 101 | alice | +81-12-3456-7890 | passwd123
+(1 row)
+
diff --git a/src/test/regress/sql/select_views.sql b/src/test/regress/sql/select_views.sql
index 14f1be8..4b2dac9 100644
--- a/src/test/regress/sql/select_views.sql
+++ b/src/test/regress/sql/select_views.sql
@@ -8,3 +8,129 @@ SELECT * FROM street;
 SELECT name, #thepath FROM iexit ORDER BY 1, 2;
 
 SELECT * FROM toyemp WHERE name = 'sharon';
+
+--
+-- Test for Leaky view scenario
+--
+CREATE USER alice;
+
+CREATE FUNCTION f_leak (text)
+       RETURNS bool LANGUAGE 'plpgsql' COST 0.0000001
+       AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END';
+
+CREATE TABLE customer (
+       cid      int primary key,
+       name     text not null,
+       tel      text,
+       passwd	text
+);
+
+CREATE TABLE credit_card (
+       cid      int references customer(cid),
+       cnum     text,
+       climit   int
+);
+
+CREATE TABLE credit_usage (
+       cid      int references customer(cid),
+       ymd      date,
+       usage    int
+);
+
+INSERT INTO customer
+       VALUES (101, 'alice', '+81-12-3456-7890', 'passwd123'),
+              (102, 'bob',   '+01-234-567-8901', 'beafsteak'),
+              (103, 'eve',   '+49-8765-43210',   'hamburger');
+INSERT INTO credit_card
+       VALUES (101, '1111-2222-3333-4444', 4000),
+              (102, '5555-6666-7777-8888', 3000),
+              (103, '9801-2345-6789-0123', 2000);
+INSERT INTO credit_usage
+       VALUES (101, '2011-09-15', 120),
+       	      (101, '2011-10-05',  90),
+	      (101, '2011-10-18', 110),
+	      (101, '2011-10-21', 200),
+	      (101, '2011-11-10',  80),
+	      (102, '2011-09-22', 300),
+	      (102, '2011-10-12', 120),
+	      (102, '2011-10-28', 200),
+	      (103, '2011-10-15', 480);
+
+CREATE VIEW my_property_normal AS
+       SELECT * FROM customer WHERE name = current_user;
+CREATE VIEW my_property_secure WITH (security_barrier) AS
+       SELECT * FROM customer WHERE name = current_user;
+
+CREATE VIEW my_credit_card_normal AS
+       SELECT * FROM customer l NATURAL JOIN credit_card r
+       WHERE l.name = current_user;
+CREATE VIEW my_credit_card_secure WITH (security_barrier) AS
+       SELECT * FROM customer l NATURAL JOIN credit_card r
+       WHERE l.name = current_user;
+
+CREATE VIEW my_credit_card_usage_normal AS
+       SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r;
+CREATE VIEW my_credit_card_usage_secure WITH (security_barrier) AS
+       SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r;
+
+GRANT SELECT ON my_property_normal TO public;
+GRANT SELECT ON my_property_secure TO public;
+GRANT SELECT ON my_credit_card_normal TO public;
+GRANT SELECT ON my_credit_card_secure TO public;
+GRANT SELECT ON my_credit_card_usage_normal TO public;
+GRANT SELECT ON my_credit_card_usage_secure TO public;
+
+--
+-- Run leaky view scenarios
+--
+SET SESSION AUTHORIZATION alice;
+
+--
+-- scenario: if a qualifier with tiny-cost is given, it shall be launched
+--           prior to the security policy of the view.
+--
+SELECT * FROM my_property_normal WHERE f_leak(passwd);
+EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal WHERE f_leak(passwd);
+
+SELECT * FROM my_property_secure WHERE f_leak(passwd);
+EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure WHERE f_leak(passwd);
+
+--
+-- scenario: if a qualifier references only one-side of a particular join-
+--           tree, it shall be distributed to the most deep scan plan as
+--           possible as we can.
+--
+SELECT * FROM my_credit_card_normal WHERE f_leak(cnum);
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_normal WHERE f_leak(cnum);
+
+SELECT * FROM my_credit_card_secure WHERE f_leak(cnum);
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum);
+
+--
+-- scenario: an external qualifier can be pushed-down by in-front-of the
+--           views with "security_barrier" attribute
+--
+SELECT * FROM my_credit_card_usage_normal
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_normal
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+
+SELECT * FROM my_credit_card_usage_secure
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_secure
+       WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
+
+--
+-- Test for the case when security_barrier gets changed between rewriter
+-- and planner stage.
+--
+PREPARE p1 AS SELECT * FROM my_property_normal WHERE f_leak(passwd);
+PREPARE p2 AS SELECT * FROM my_property_secure WHERE f_leak(passwd);
+EXECUTE p1;
+EXECUTE p2;
+RESET SESSION AUTHORIZATION;
+ALTER VIEW my_property_normal SET (security_barrier=true);
+ALTER VIEW my_property_secure SET (security_barrier=false);
+SET SESSION AUTHORIZATION alice;
+EXECUTE p1;		-- To be perform as a view with security-barrier
+EXECUTE p2;		-- To be perform as a view without security-barrier

commit 4b496a3583ecb3f70bb4d13f8275dbb7e5b26100
Author: Robert Haas <rhaas@postgresql.org>
Date:   Tue Jan 17 20:51:38 2012 -0500

    Catch fatal flex errors in the GUC file lexer.
    
    This prevents the postmaster from unexpectedly croaking if postgresql.conf
    contains something like:
    
    include 'invalid_directory_name'
    
    Noah Misch. Reviewed by Tom Lane and myself.

diff --git a/src/backend/utils/misc/guc-file.l b/src/backend/utils/misc/guc-file.l
index 6fc2165..c6c1558 100644
--- a/src/backend/utils/misc/guc-file.l
+++ b/src/backend/utils/misc/guc-file.l
@@ -20,9 +20,14 @@
 #include "utils/guc.h"
 
 
-/* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */
+/*
+ * flex emits a yy_fatal_error() function that it calls in response to
+ * critical errors like malloc failure, file I/O errors, and detection of
+ * internal inconsistency.  That function prints a message and calls exit().
+ * Mutate it to instead call our handler, which jumps out of the parser.
+ */
 #undef fprintf
-#define fprintf(file, fmt, msg)  ereport(ERROR, (errmsg_internal("%s", msg)))
+#define fprintf(file, fmt, msg) GUC_flex_fatal(msg)
 
 enum {
 	GUC_ID = 1,
@@ -37,10 +42,13 @@ enum {
 };
 
 static unsigned int ConfigFileLineno;
+static const char *GUC_flex_fatal_errmsg;
+static sigjmp_buf *GUC_flex_fatal_jmp;
 
 /* flex fails to supply a prototype for yylex, so provide one */
 int GUC_yylex(void);
 
+static int GUC_flex_fatal(const char *msg);
 static char *GUC_scanstr(const char *s);
 
 %}
@@ -437,6 +445,22 @@ ParseConfigFile(const char *config_file, const char *calling_file, bool strict,
 }
 
 /*
+ * Flex fatal errors bring us here.  Stash the error message and jump back to
+ * ParseConfigFp().  Assume all msg arguments point to string constants; this
+ * holds for flex 2.5.31 (earliest we support) and flex 2.5.35 (latest as of
+ * this writing).  Otherwise, we would need to copy the message.
+ *
+ * We return "int" since this takes the place of calls to fprintf().
+*/
+static int
+GUC_flex_fatal(const char *msg)
+{
+	GUC_flex_fatal_errmsg = msg;
+	siglongjmp(*GUC_flex_fatal_jmp, 1);
+	return 0;	/* keep compiler quiet */
+}
+
+/*
  * Read and parse a single configuration file.  This function recurses
  * to handle "include" directives.
  *
@@ -464,19 +488,38 @@ ParseConfigFp(FILE *fp, const char *config_file, int depth, int elevel,
 			  ConfigVariable **head_p, ConfigVariable **tail_p)
 {
 	bool		OK = true;
-	YY_BUFFER_STATE lex_buffer;
+	unsigned int save_ConfigFileLineno = ConfigFileLineno;
+	sigjmp_buf *save_GUC_flex_fatal_jmp = GUC_flex_fatal_jmp;
+	sigjmp_buf	flex_fatal_jmp;
+	volatile YY_BUFFER_STATE lex_buffer = NULL;
 	int			errorcount;
 	int			token;
 
+	if (sigsetjmp(flex_fatal_jmp, 1) == 0)
+		GUC_flex_fatal_jmp = &flex_fatal_jmp;
+	else
+	{
+		/*
+		 * Regain control after a fatal, internal flex error.  It may have
+		 * corrupted parser state.  Consequently, abandon the file, but trust
+		 * that the state remains sane enough for yy_delete_buffer().
+		 */
+		elog(elevel, "%s at file \"%s\" line %u",
+			 GUC_flex_fatal_errmsg, config_file, ConfigFileLineno);
+
+		OK = false;
+		goto cleanup;
+	}
+
 	/*
 	 * Parse
 	 */
-	lex_buffer = yy_create_buffer(fp, YY_BUF_SIZE);
-	yy_switch_to_buffer(lex_buffer);
-
 	ConfigFileLineno = 1;
 	errorcount = 0;
 
+	lex_buffer = yy_create_buffer(fp, YY_BUF_SIZE);
+	yy_switch_to_buffer(lex_buffer);
+
 	/* This loop iterates once per logical line */
 	while ((token = yylex()))
 	{
@@ -526,14 +569,11 @@ ParseConfigFp(FILE *fp, const char *config_file, int depth, int elevel,
 			 * An include_if_exists directive isn't a variable and should be
 			 * processed immediately.
 			 */
-			unsigned int save_ConfigFileLineno = ConfigFileLineno;
-
 			if (!ParseConfigFile(opt_value, config_file, false,
 								 depth + 1, elevel,
 								 head_p, tail_p))
 				OK = false;
 			yy_switch_to_buffer(lex_buffer);
-			ConfigFileLineno = save_ConfigFileLineno;
 			pfree(opt_name);
 			pfree(opt_value);
 		}
@@ -543,14 +583,11 @@ ParseConfigFp(FILE *fp, const char *config_file, int depth, int elevel,
 			 * An include directive isn't a variable and should be processed
 			 * immediately.
 			 */
-			unsigned int save_ConfigFileLineno = ConfigFileLineno;
-
 			if (!ParseConfigFile(opt_value, config_file, true,
 								 depth + 1, elevel,
 								 head_p, tail_p))
 				OK = false;
 			yy_switch_to_buffer(lex_buffer);
-			ConfigFileLineno = save_ConfigFileLineno;
 			pfree(opt_name);
 			pfree(opt_value);
 		}
@@ -620,7 +657,11 @@ ParseConfigFp(FILE *fp, const char *config_file, int depth, int elevel,
 			break;
 	}
 
+cleanup:
 	yy_delete_buffer(lex_buffer);
+	/* Each recursion level must save and restore these static variables. */
+	ConfigFileLineno = save_ConfigFileLineno;
+	GUC_flex_fatal_jmp = save_GUC_flex_fatal_jmp;
 	return OK;
 }
 

commit 754b8140a1a5ceb12343fd89423da5cc86ce5328
Author: Robert Haas <rhaas@postgresql.org>
Date:   Mon Jan 16 20:37:01 2012 -0500

    fastgetattr is in access/htup.h, not access/heapam.h
    
    Noted by Peter Geoghegan

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 5f6ac2e..99a431a 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -833,7 +833,7 @@ heapgettup_pagemode(HeapScanDesc scan,
 #if defined(DISABLE_COMPLEX_MACRO)
 /*
  * This is formatted so oddly so that the correspondence to the macro
- * definition in access/heapam.h is maintained.
+ * definition in access/htup.h is maintained.
  */
 Datum
 fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,

commit 3b11247aadf857bbcbfc765191273973d9ca9dd7
Author: Alvaro Herrera <alvherre@alvh.no-ip.org>
Date:   Mon Jan 16 19:19:42 2012 -0300

    Disallow merging ONLY constraints in children tables
    
    When creating a child table, or when attaching an existing table as
    child of another, we must not allow inheritable constraints to be
    merged with non-inheritable ones, because then grandchildren would not
    properly get the constraint.  This would violate the grandparent's
    expectations.
    
    Bugs noted by Robert Haas.
    
    Author: Nikhil Sontakke

diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml
index 1976f6d..6f1917f 100644
--- a/doc/src/sgml/ref/alter_table.sgml
+++ b/doc/src/sgml/ref/alter_table.sgml
@@ -482,7 +482,11 @@ ALTER TABLE <replaceable class="PARAMETER">name</replaceable>
 
      <para>
       There must also be matching child-table constraints for all
-      <literal>CHECK</literal> constraints of the parent. Currently
+      <literal>CHECK</literal> constraints of the parent, except those
+      marked non-inheritable (that is, created with <literal>ALTER TABLE ONLY</literal>)
+      in the parent, which are ignored; all child-table constraints matched
+      must not be marked non-inheritable.
+      Currently
       <literal>UNIQUE</literal>, <literal>PRIMARY KEY</literal>, and
       <literal>FOREIGN KEY</literal> constraints are not considered, but
       this might change in the future.
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index dc801ae..204236f 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -2251,6 +2251,8 @@ AddRelationNewConstraints(Relation rel,
  *
  * Returns TRUE if merged (constraint is a duplicate), or FALSE if it's
  * got a so-far-unique name, or throws error if conflict.
+ *
+ * XXX See MergeConstraintsIntoExisting too if you change this code.
  */
 static bool
 MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr,
@@ -2307,12 +2309,17 @@ MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr,
 						(errcode(ERRCODE_DUPLICATE_OBJECT),
 				errmsg("constraint \"%s\" for relation \"%s\" already exists",
 					   ccname, RelationGetRelationName(rel))));
-			/* OK to update the tuple */
-			ereport(NOTICE,
-			   (errmsg("merging constraint \"%s\" with inherited definition",
-					   ccname)));
+
 			tup = heap_copytuple(tup);
 			con = (Form_pg_constraint) GETSTRUCT(tup);
+
+			/* If the constraint is "only" then cannot merge */
+			if (con->conisonly)
+				ereport(ERROR,
+						(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+						 errmsg("constraint \"%s\" conflicts with non-inherited constraint on relation \"%s\"",
+								ccname, RelationGetRelationName(rel))));
+
 			if (is_local)
 				con->conislocal = true;
 			else
@@ -2322,6 +2329,10 @@ MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr,
 				Assert(is_local);
 				con->conisonly = true;
 			}
+			/* OK to update the tuple */
+			ereport(NOTICE,
+					(errmsg("merging constraint \"%s\" with inherited definition",
+							ccname)));
 			simple_heap_update(conDesc, &tup->t_self, tup);
 			CatalogUpdateIndexes(conDesc, tup);
 			break;
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index d0843b2..cc210f0 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8818,18 +8818,18 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
  * Check constraints in child table match up with constraints in parent,
  * and increment their coninhcount.
  *
+ * Constraints that are marked ONLY in the parent are ignored.
+ *
  * Called by ATExecAddInherit
  *
  * Currently all constraints in parent must be present in the child. One day we
- * may consider adding new constraints like CREATE TABLE does. We may also want
- * to allow an optional flag on parent table constraints indicating they are
- * intended to ONLY apply to the master table, not to the children. That would
- * make it possible to ensure no records are mistakenly inserted into the
- * master in partitioned tables rather than the appropriate child.
+ * may consider adding new constraints like CREATE TABLE does.
  *
  * XXX This is O(N^2) which may be an issue with tables with hundreds of
  * constraints. As long as tables have more like 10 constraints it shouldn't be
  * a problem though. Even 100 constraints ought not be the end of the world.
+ *
+ * XXX See MergeWithExistingConstraint too if you change this code.
  */
 static void
 MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel)
@@ -8862,6 +8862,10 @@ MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel)
 		if (parent_con->contype != CONSTRAINT_CHECK)
 			continue;
 
+		/* if the parent's constraint is marked ONLY, it's not inherited */
+		if (parent_con->conisonly)
+			continue;
+
 		/* Search for a child constraint matching this one */
 		ScanKeyInit(&child_key,
 					Anum_pg_constraint_conrelid,
@@ -8889,6 +8893,14 @@ MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel)
 								RelationGetRelationName(child_rel),
 								NameStr(parent_con->conname))));
 
+			/* If the constraint is "only" then cannot merge */
+			if (child_con->conisonly)
+				ereport(ERROR,
+						(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+						 errmsg("constraint \"%s\" conflicts with non-inherited constraint on child table \"%s\"",
+								NameStr(child_con->conname),
+								RelationGetRelationName(child_rel))));
+
 			/*
 			 * OK, bump the child constraint's inheritance count.  (If we fail
 			 * later on, this change will just roll back.)

commit 1b9f774090d58a950ef0535b51bc377ab62b795c
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Mon Jan 16 20:08:33 2012 +0200

    psql: Fix memory leak
    
    The command
    
    \password username
    
    leaked memory.

diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 69fac83..6c3f0aa 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -953,6 +953,9 @@ exec_command(const char *cmd,
 					PQclear(res);
 				PQfreemem(encrypted_password);
 			}
+
+			if (opt0)
+				free(opt0);
 		}
 
 		free(pw1);

commit 1575fbcb795fc331f46588b4520c4bca7e854d5c
Author: Robert Haas <rhaas@postgresql.org>
Date:   Mon Jan 16 09:34:21 2012 -0500

    Prevent adding relations to a concurrently dropped schema.
    
    In the previous coding, it was possible for a relation to be created
    via CREATE TABLE, CREATE VIEW, CREATE SEQUENCE, CREATE FOREIGN TABLE,
    etc.  in a schema while that schema was meanwhile being concurrently
    dropped.  This led to a pg_class entry with an invalid relnamespace
    value.  The same problem could occur if a relation was moved using
    ALTER .. SET SCHEMA while the target schema was being concurrently
    dropped.  This patch prevents both of those scenarios by locking the
    schema to which the relation is being added using AccessShareLock,
    which conflicts with the AccessExclusiveLock taken by DROP.
    
    As a desirable side effect, this also prevents the use of CREATE OR
    REPLACE VIEW to queue for an AccessExclusiveLock on a relation on which
    you have no rights: that will now fail immediately with a permissions
    error, before trying to obtain a lock.
    
    We need similar protection for all other object types, but as everything
    other than relations uses a slightly different set of code paths, I'm
    leaving that for a separate commit.
    
    Original complaint (as far as I could find) about CREATE by Nikhil
    Sontakke; risk for ALTER .. SET SCHEMA pointed out by Tom Lane;
    further details by Dan Farina; patch by me; review by Hitoshi Harada.

diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index a9a64fe..80d6fc7 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -480,31 +480,131 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
 
 /*
  * RangeVarGetAndCheckCreationNamespace
- *		As RangeVarGetCreationNamespace, but with a permissions check.
+ *
+ * This function returns the OID of the namespace in which a new relation
+ * with a given name should be created.  If the user does not have CREATE
+ * permission on the target namespace, this function will instead signal
+ * an ERROR.
+ *
+ * If non-NULL, *existing_oid is set to the OID of any existing relation with
+ * the same name which already exists in that namespace, or to InvalidOid if
+ * no such relation exists.
+ *
+ * If lockmode != NoLock, the specified lock mode is acquire on the existing
+ * relation, if any, provided that the current user owns the target relation.
+ * However, if lockmode != NoLock and the user does not own the target
+ * relation, we throw an ERROR, as we must not try to lock relations the
+ * user does not have permissions on.
+ *
+ * As a side effect, this function acquires AccessShareLock on the target
+ * namespace.  Without this, the namespace could be dropped before our
+ * transaction commits, leaving behind relations with relnamespace pointing
+ * to a no-longer-exstant namespace.
+ *
+ * As a further side-effect, if the select namespace is a temporary namespace,
+ * we mark the RangeVar as RELPERSISTENCE_TEMP.
  */
 Oid
-RangeVarGetAndCheckCreationNamespace(const RangeVar *newRelation)
+RangeVarGetAndCheckCreationNamespace(RangeVar *relation,
+									 LOCKMODE lockmode,
+									 Oid *existing_relation_id)
 {
-	Oid			namespaceId;
+	uint64		inval_count;
+	Oid			relid;
+	Oid			oldrelid = InvalidOid;
+	Oid			nspid;
+	Oid			oldnspid = InvalidOid;
+	bool		retry = false;
 
-	namespaceId = RangeVarGetCreationNamespace(newRelation);
+	/*
+	 * We check the catalog name and then ignore it.
+	 */
+	if (relation->catalogname)
+	{
+		if (strcmp(relation->catalogname, get_database_name(MyDatabaseId)) != 0)
+			ereport(ERROR,
+					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+					 errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
+							relation->catalogname, relation->schemaname,
+							relation->relname)));
+	}
 
 	/*
-	 * Check we have permission to create there. Skip check if bootstrapping,
-	 * since permissions machinery may not be working yet.
+	 * As in RangeVarGetRelidExtended(), we guard against concurrent DDL
+	 * operations by tracking whether any invalidation messages are processed
+	 * while we're doing the name lookups and acquiring locks.  See comments
+	 * in that function for a more detailed explanation of this logic.
 	 */
-	if (!IsBootstrapProcessingMode())
+	for (;;)
 	{
 		AclResult	aclresult;
 
-		aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
-										  ACL_CREATE);
+		inval_count = SharedInvalidMessageCounter;
+
+		/* Look up creation namespace and check for existing relation. */
+		nspid = RangeVarGetCreationNamespace(relation);
+		Assert(OidIsValid(nspid));
+		if (existing_relation_id != NULL)
+			relid = get_relname_relid(relation->relname, nspid);
+		else
+			relid = InvalidOid;
+
+		/*
+		 * In bootstrap processing mode, we don't bother with permissions
+		 * or locking.  Permissions might not be working yet, and locking is
+		 * unnecessary.
+		 */
+		if (IsBootstrapProcessingMode())
+			break;
+
+		/* Check namespace permissions. */
+		aclresult = pg_namespace_aclcheck(nspid, GetUserId(), ACL_CREATE);
 		if (aclresult != ACLCHECK_OK)
 			aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
-						   get_namespace_name(namespaceId));
+						   get_namespace_name(nspid));
+
+		if (retry)
+		{
+			/* If nothing changed, we're done. */
+			if (relid == oldrelid && nspid == oldnspid)
+				break;
+			/* If creation namespace has changed, give up old lock. */
+			if (nspid != oldnspid)
+				UnlockDatabaseObject(NamespaceRelationId, oldnspid, 0,
+									 AccessShareLock);
+			/* If name points to something different, give up old lock. */
+			if (relid != oldrelid && OidIsValid(oldrelid) && lockmode != NoLock)
+				UnlockRelationOid(oldrelid, lockmode);
+		}
+
+		/* Lock namespace. */
+		if (nspid != oldnspid)
+			LockDatabaseObject(NamespaceRelationId, nspid, 0, AccessShareLock);
+
+		/* Lock relation, if required if and we have permission. */
+		if (lockmode != NoLock && OidIsValid(relid))
+		{
+			if (!pg_class_ownercheck(relid, GetUserId()))
+				aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
+							   relation->relname);
+			if (relid != oldrelid)
+				LockRelationOid(relid, lockmode);
+		}
+
+		/* If no invalidation message were processed, we're done! */
+		if (inval_count == SharedInvalidMessageCounter)
+			break;
+
+		/* Something may have changed, so recheck our work. */
+		retry = true;
+		oldrelid = relid;
+		oldnspid = nspid;
 	}
 
-	return namespaceId;
+	RangeVarAdjustRelationPersistence(relation, nspid);
+	if (existing_relation_id != NULL)
+		*existing_relation_id = relid;
+	return nspid;
 }
 
 /*
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index c373016..d0843b2 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -451,10 +451,12 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
 
 	/*
 	 * Look up the namespace in which we are supposed to create the relation,
-	 * and check we have permission to create there.
+	 * check we have permission to create there, lock it against concurrent
+	 * drop, and mark stmt->relation as RELPERSISTENCE_TEMP if a temporary
+	 * namespace is selected.
 	 */
-	namespaceId = RangeVarGetAndCheckCreationNamespace(stmt->relation);
-	RangeVarAdjustRelationPersistence(stmt->relation, namespaceId);
+	namespaceId =
+		RangeVarGetAndCheckCreationNamespace(stmt->relation, NoLock, NULL);
 
 	/*
 	 * Security check: disallow creating temp tables from security-restricted
@@ -9417,6 +9419,7 @@ AlterTableNamespace(AlterObjectSchemaStmt *stmt)
 	Oid			oldNspOid;
 	Oid			nspOid;
 	Relation	classRel;
+	RangeVar   *newrv;
 
 	relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
 									 false, false,
@@ -9441,8 +9444,9 @@ AlterTableNamespace(AlterObjectSchemaStmt *stmt)
 						get_rel_name(tableId))));
 	}
 
-	/* get schema OID and check its permissions */
-	nspOid = LookupCreationNamespace(stmt->newschema);
+	/* Get and lock schema OID and check its permissions. */
+	newrv = makeRangeVar(stmt->newschema, RelationGetRelationName(rel), -1);
+	nspOid = RangeVarGetAndCheckCreationNamespace(newrv, NoLock, NULL);
 
 	/* common checks on switching namespaces */
 	CheckSetNamespace(oldNspOid, nspOid, RelationRelationId, relid);
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 0f8af31..0043bf1 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -2005,7 +2005,8 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
 	 * check is here mainly to get a better error message about a "type"
 	 * instead of below about a "relation".
 	 */
-	typeNamespace = RangeVarGetCreationNamespace(createStmt->relation);
+	typeNamespace = RangeVarGetAndCheckCreationNamespace(createStmt->relation,
+														 NoLock, NULL);
 	RangeVarAdjustRelationPersistence(createStmt->relation, typeNamespace);
 	old_type_oid =
 		GetSysCacheOid2(TYPENAMENSP,
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index ff9c449..c3520ae 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -98,10 +98,12 @@ isViewOnTempTable_walker(Node *node, void *context)
  *---------------------------------------------------------------------
  */
 static Oid
-DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace,
-					  Oid namespaceId, List *options)
+DefineVirtualRelation(RangeVar *relation, List *tlist, bool replace,
+					  List *options)
 {
 	Oid			viewOid;
+	Oid			namespaceId;
+	LOCKMODE	lockmode;
 	CreateStmt *createStmt = makeNode(CreateStmt);
 	List	   *attrList;
 	ListCell   *t;
@@ -159,9 +161,14 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace,
 				 errmsg("view must have at least one column")));
 
 	/*
-	 * Check to see if we want to replace an existing view.
+	 * Look up, check permissions on, and lock the creation namespace; also
+	 * check for a preexisting view with the same name.  This will also set
+	 * relation->relpersistence to RELPERSISTENCE_TEMP if the selected
+	 * namespace is temporary.
 	 */
-	viewOid = get_relname_relid(relation->relname, namespaceId);
+	lockmode = replace ? AccessExclusiveLock : NoLock;
+	namespaceId =
+		RangeVarGetAndCheckCreationNamespace(relation, lockmode, &viewOid);
 
 	if (OidIsValid(viewOid) && replace)
 	{
@@ -170,24 +177,16 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace,
 		List	   *atcmds = NIL;
 		AlterTableCmd *atcmd;
 
-		/*
-		 * Yes.  Get exclusive lock on the existing view ...
-		 */
-		rel = relation_open(viewOid, AccessExclusiveLock);
+		/* Relation is already locked, but we must build a relcache entry. */
+		rel = relation_open(viewOid, NoLock);
 
-		/*
-		 * Make sure it *is* a view, and do permissions checks.
-		 */
+		/* Make sure it *is* a view. */
 		if (rel->rd_rel->relkind != RELKIND_VIEW)
 			ereport(ERROR,
 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
 					 errmsg("\"%s\" is not a view",
 							RelationGetRelationName(rel))));
 
-		if (!pg_class_ownercheck(viewOid, GetUserId()))
-			aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
-						   RelationGetRelationName(rel));
-
 		/* Also check it's not in use already */
 		CheckTableNotInUse(rel, "CREATE OR REPLACE VIEW");
 
@@ -428,7 +427,6 @@ DefineView(ViewStmt *stmt, const char *queryString)
 {
 	Query	   *viewParse;
 	Oid			viewOid;
-	Oid			namespaceId;
 	RangeVar   *view;
 
 	/*
@@ -514,10 +512,6 @@ DefineView(ViewStmt *stmt, const char *queryString)
 						view->relname)));
 	}
 
-	/* Might also need to make it temporary if placed in temp schema. */
-	namespaceId = RangeVarGetCreationNamespace(view);
-	RangeVarAdjustRelationPersistence(view, namespaceId);
-
 	/*
 	 * Create the view relation
 	 *
@@ -525,7 +519,7 @@ DefineView(ViewStmt *stmt, const char *queryString)
 	 * aborted.
 	 */
 	viewOid = DefineVirtualRelation(view, viewParse->targetList,
-									stmt->replace, namespaceId, stmt->options);
+									stmt->replace, stmt->options);
 
 	/*
 	 * The relation we have just created is not visible to any other commands
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 569d0ba..422f737 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -2532,11 +2532,13 @@ OpenIntoRel(QueryDesc *queryDesc)
 	}
 
 	/*
-	 * Find namespace to create in, check its permissions
+	 * Find namespace to create in, check its permissions, lock it against
+	 * concurrent drop, and mark into->rel as RELPERSISTENCE_TEMP if the
+	 * selected namespace is temporary.
 	 */
 	intoName = into->rel->relname;
-	namespaceId = RangeVarGetAndCheckCreationNamespace(into->rel);
-	RangeVarAdjustRelationPersistence(into->rel, namespaceId);
+	namespaceId = RangeVarGetAndCheckCreationNamespace(into->rel, NoLock,
+													   NULL);
 
 	/*
 	 * Security check: disallow creating temp tables from security-restricted
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 335bdc6..99157c5 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -146,6 +146,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
 	List	   *save_alist;
 	ListCell   *elements;
 	Oid			namespaceid;
+	Oid			existing_relid;
 
 	/*
 	 * We must not scribble on the passed-in CreateStmt, so copy it.  (This is
@@ -155,30 +156,25 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
 
 	/*
 	 * Look up the creation namespace.	This also checks permissions on the
-	 * target namespace, so that we throw any permissions error as early as
-	 * possible.
+	 * target namespace, locks it against concurrent drops, checks for a
+	 * preexisting relation in that namespace with the same name, and updates
+	 * stmt->relation->relpersistence if the select namespace is temporary.
 	 */
-	namespaceid = RangeVarGetAndCheckCreationNamespace(stmt->relation);
-	RangeVarAdjustRelationPersistence(stmt->relation, namespaceid);
+	namespaceid =
+		RangeVarGetAndCheckCreationNamespace(stmt->relation, NoLock,
+											 &existing_relid);
 
 	/*
 	 * If the relation already exists and the user specified "IF NOT EXISTS",
 	 * bail out with a NOTICE.
 	 */
-	if (stmt->if_not_exists)
+	if (stmt->if_not_exists && OidIsValid(existing_relid))
 	{
-		Oid			existing_relid;
-
-		existing_relid = get_relname_relid(stmt->relation->relname,
-										   namespaceid);
-		if (existing_relid != InvalidOid)
-		{
-			ereport(NOTICE,
-					(errcode(ERRCODE_DUPLICATE_TABLE),
-					 errmsg("relation \"%s\" already exists, skipping",
-							stmt->relation->relname)));
-			return NIL;
-		}
+		ereport(NOTICE,
+				(errcode(ERRCODE_DUPLICATE_TABLE),
+				 errmsg("relation \"%s\" already exists, skipping",
+						stmt->relation->relname)));
+		return NIL;
 	}
 
 	/*
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index 37b259d..fa3ba5b 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -58,7 +58,9 @@ extern Oid	RangeVarGetRelidExtended(const RangeVar *relation,
 						 RangeVarGetRelidCallback callback,
 						 void *callback_arg);
 extern Oid	RangeVarGetCreationNamespace(const RangeVar *newRelation);
-extern Oid	RangeVarGetAndCheckCreationNamespace(const RangeVar *newRelation);
+extern Oid	RangeVarGetAndCheckCreationNamespace(RangeVar *newRelation,
+									 LOCKMODE lockmode,
+									 Oid *existing_relation_id);
 extern void RangeVarAdjustRelationPersistence(RangeVar *newRelation, Oid nspid);
 extern Oid	RelnameGetRelid(const char *relname);
 extern bool RelationIsVisible(Oid relid);

commit 01d83ffdcae92f75dbfd41de0b4213d241edd394
Author: Andrew Dunstan <andrew@dunslane.net>
Date:   Sun Jan 15 16:15:04 2012 -0500

    Improve efficiency of recent changes to plperl's sv2cstr().
    
    Along the way, add a missing dependency in the GNUmakefile.
    
    Alex Hunsaker, with a slight adjustment by me.

diff --git a/src/pl/plperl/GNUmakefile b/src/pl/plperl/GNUmakefile
index 0f3bd99..188d7d2 100644
--- a/src/pl/plperl/GNUmakefile
+++ b/src/pl/plperl/GNUmakefile
@@ -72,11 +72,11 @@ perlchunks.h: $(PERLCHUNKS)
 
 all: all-lib
 
-SPI.c: SPI.xs
+SPI.c: SPI.xs plperl_helpers.h
 	@if [ x"$(perl_privlibexp)" = x"" ]; then echo "configure switch --with-perl was not specified."; exit 1; fi
 	$(PERL) $(XSUBPPDIR)/ExtUtils/xsubpp -typemap $(perl_privlibexp)/ExtUtils/typemap $< >$@
 
-Util.c: Util.xs
+Util.c: Util.xs plperl_helpers.h
 	@if [ x"$(perl_privlibexp)" = x"" ]; then echo "configure switch --with-perl was not specified."; exit 1; fi
 	$(PERL) $(XSUBPPDIR)/ExtUtils/xsubpp -typemap $(perl_privlibexp)/ExtUtils/typemap $< >$@
 
diff --git a/src/pl/plperl/expected/plperl_elog.out b/src/pl/plperl/expected/plperl_elog.out
index 02497d9..60eade8 100644
--- a/src/pl/plperl/expected/plperl_elog.out
+++ b/src/pl/plperl/expected/plperl_elog.out
@@ -58,3 +58,7 @@ select uses_global();
  uses_global worked
 (1 row)
 
+-- make sure we don't choke on readonly values
+do language plperl $$ elog(NOTICE, ${^TAINT}); $$;
+NOTICE:  0
+CONTEXT:  PL/Perl anonymous code block
diff --git a/src/pl/plperl/plperl_helpers.h b/src/pl/plperl/plperl_helpers.h
index 800a408..35e1257 100644
--- a/src/pl/plperl/plperl_helpers.h
+++ b/src/pl/plperl/plperl_helpers.h
@@ -47,28 +47,35 @@ sv2cstr(SV *sv)
 {
 	char	   *val, *res;
 	STRLEN		len;
-	SV         *nsv;
 
 	/*
 	 * get a utf8 encoded char * out of perl. *note* it may not be valid utf8!
 	 *
 	 * SvPVutf8() croaks nastily on certain things, like typeglobs and
 	 * readonly objects such as $^V. That's a perl bug - it's not supposed to
-	 * happen. To avoid crashing the backend, we make a copy of the
-	 * sv before passing it to SvPVutf8(). The copy is garbage collected 
+	 * happen. To avoid crashing the backend, we make a copy of the sv before
+	 * passing it to SvPVutf8(). The copy is garbage collected 
 	 * when we're done with it.
 	 */
-	nsv = newSVsv(sv);
-	val = SvPVutf8(nsv, len);
+	if (SvREADONLY(sv) ||
+		isGV_with_GP(sv) ||
+		(SvTYPE(sv) > SVt_PVLV && SvTYPE(sv) != SVt_PVFM))
+		sv = newSVsv(sv);
+	else
+		/* increase the reference count so we cant just SvREFCNT_dec() it when
+		 * we are done */
+		SvREFCNT_inc(sv);
+
+	val = SvPVutf8(sv, len);
 
 	/*
 	 * we use perl's length in the event we had an embedded null byte to ensure
 	 * we error out properly
 	 */
-	res =  utf_u2e(val, len);
+	res = utf_u2e(val, len);
 
 	/* safe now to garbage collect the new SV */
-	SvREFCNT_dec(nsv);
+	SvREFCNT_dec(sv);
 
 	return res;
 }
diff --git a/src/pl/plperl/sql/plperl_elog.sql b/src/pl/plperl/sql/plperl_elog.sql
index 4f1c014..40896a4 100644
--- a/src/pl/plperl/sql/plperl_elog.sql
+++ b/src/pl/plperl/sql/plperl_elog.sql
@@ -43,3 +43,6 @@ create or replace function uses_global() returns text language plperl as $$
 $$;
 
 select uses_global();
+
+-- make sure we don't choke on readonly values
+do language plperl $$ elog(NOTICE, ${^TAINT}); $$;

commit b2b4af535eb733ba0c2ea6eeb2b14cac7f1ca4be
Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date:   Sun Jan 15 22:03:09 2012 +0200

    Fix poll() implementation of WaitLatchOrSocket to notice postmaster death.
    
    When the remote end of the pipe is closed, select() reports the fd as
    readable, but poll() has a separate POLLHUP return code for that.
    
    Spotted by Peter Geoghegan.

diff --git a/src/backend/port/unix_latch.c b/src/backend/port/unix_latch.c
index fc1a579..10bf2db 100644
--- a/src/backend/port/unix_latch.c
+++ b/src/backend/port/unix_latch.c
@@ -310,8 +310,13 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
 		{
 			result |= WL_SOCKET_WRITEABLE;
 		}
+		/*
+		 * We expect a POLLHUP when the remote end is closed, but because we
+		 * don't expect the pipe to become readable or to have any errors
+		 * either, treat those as postmaster death, too.
+		 */
 		if ((wakeEvents & WL_POSTMASTER_DEATH) &&
-			(pfds[nfds - 1].revents & POLLIN))
+			(pfds[nfds - 1].revents & (POLLHUP | POLLIN | POLLERR | POLLNVAL)))
 		{
 			result |= WL_POSTMASTER_DEATH;
 		}

commit 0495aaad8b337642830a4d4e82f8b8c02b27b1be
Author: Magnus Hagander <magnus@hagander.net>
Date:   Sun Jan 15 15:34:40 2012 +0100

    Allow a user to kill his own queries using pg_cancel_backend()
    
    Allows a user to use pg_cancel_queries() to cancel queries in
    other backends if they are running under the same role.
    pg_terminate_backend() still requires superuser permissoins.
    
    Short patch, many authors working on the bikeshed: Magnus Hagander,
    Josh Kupershmidt, Edward Muller, Greg Smith.

diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index 2e06346..7d7aba7 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -14262,8 +14262,8 @@ SELECT set_config('log_statement_stats', 'off', false);
    <para>
     The functions shown in <xref
     linkend="functions-admin-signal-table"> send control signals to
-    other server processes.  Use of these functions is restricted
-    to superusers.
+    other server processes.  Use of these functions is usually restricted
+    to superusers, with noted exceptions.
    </para>
 
    <table id="functions-admin-signal-table">
@@ -14280,7 +14280,10 @@ SELECT set_config('log_statement_stats', 'off', false);
         <literal><function>pg_cancel_backend(<parameter>pid</parameter> <type>int</>)</function></literal>
         </entry>
        <entry><type>boolean</type></entry>
-       <entry>Cancel a backend's current query</entry>
+       <entry>Cancel a backend's current query.  You can execute this against
+        another backend that has exactly the same role as the user calling the
+        function.  In all other cases, you must be a superuser.
+        </entry>
       </row>
       <row>
        <entry>
@@ -14322,6 +14325,10 @@ SELECT set_config('log_statement_stats', 'off', false);
     <command>postgres</command> processes on the server (using
     <application>ps</> on Unix or the <application>Task
     Manager</> on <productname>Windows</>).
+    For the less restrictive <function>pg_cancel_backend</>, the role of an
+    active backend can be found from
+    the <structfield>usename</structfield> column of the
+    <structname>pg_stat_activity</structname> view.
    </para>
 
    <para>
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 9715bdd..3de6a5c 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -30,6 +30,7 @@
 #include "postmaster/syslogger.h"
 #include "storage/fd.h"
 #include "storage/pmsignal.h"
+#include "storage/proc.h"
 #include "storage/procarray.h"
 #include "tcop/tcopprot.h"
 #include "utils/builtins.h"
@@ -70,15 +71,42 @@ current_query(PG_FUNCTION_ARGS)
 }
 
 /*
- * Functions to send signals to other backends.
+ * Send a signal to another backend.
+ * The signal is delivered if the user is either a superuser or the same
+ * role as the backend being signaled. For "dangerous" signals, an explicit
+ * check for superuser needs to be done prior to calling this function.
+ *
+ * Returns 0 on success, 1 on general failure, and 2 on permission error.
+ * In the event of a general failure (returncode 1), a warning message will
+ * be emitted. For permission errors, doing that is the responsibility of
+ * the caller.
  */
-static bool
+#define SIGNAL_BACKEND_SUCCESS 0
+#define SIGNAL_BACKEND_ERROR 1
+#define SIGNAL_BACKEND_NOPERMISSION 2
+static int
 pg_signal_backend(int pid, int sig)
 {
+	PGPROC	   *proc;
+
 	if (!superuser())
-		ereport(ERROR,
-				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-			(errmsg("must be superuser to signal other server processes"))));
+	{
+		/*
+		 * Since the user is not superuser, check for matching roles. Trust
+		 * that BackendPidGetProc will return NULL if the pid isn't valid,
+		 * even though the check for whether it's a backend process is below.
+		 * The IsBackendPid check can't be relied on as definitive even if it
+		 * was first. The process might end between successive checks
+		 * regardless of their order. There's no way to acquire a lock on an
+		 * arbitrary process to prevent that. But since so far all the callers
+		 * of this mechanism involve some request for ending the process
+		 * anyway, that it might end on its own first is not a problem.
+		 */
+		proc = BackendPidGetProc(pid);
+
+		if (proc == NULL || proc->roleId != GetUserId())
+			return SIGNAL_BACKEND_NOPERMISSION;
+	}
 
 	if (!IsBackendPid(pid))
 	{
@@ -88,9 +116,18 @@ pg_signal_backend(int pid, int sig)
 		 */
 		ereport(WARNING,
 				(errmsg("PID %d is not a PostgreSQL server process", pid)));
-		return false;
+		return SIGNAL_BACKEND_ERROR;
 	}
 
+	/*
+	 * Can the process we just validated above end, followed by the pid being
+	 * recycled for a new process, before reaching here?  Then we'd be trying
+	 * to kill the wrong thing.  Seems near impossible when sequential pid
+	 * assignment and wraparound is used.  Perhaps it could happen on a system
+	 * where pid re-use is randomized.	That race condition possibility seems
+	 * too unlikely to worry about.
+	 */
+
 	/* If we have setsid(), signal the backend's whole process group */
 #ifdef HAVE_SETSID
 	if (kill(-pid, sig))
@@ -101,23 +138,46 @@ pg_signal_backend(int pid, int sig)
 		/* Again, just a warning to allow loops */
 		ereport(WARNING,
 				(errmsg("could not send signal to process %d: %m", pid)));
-		return false;
+		return SIGNAL_BACKEND_ERROR;
 	}
-	return true;
+	return SIGNAL_BACKEND_SUCCESS;
 }
 
+/*
+ * Signal to cancel a backend process.	This is allowed if you are superuser or
+ * have the same role as the process being canceled.
+ */
 Datum
 pg_cancel_backend(PG_FUNCTION_ARGS)
 {
-	PG_RETURN_BOOL(pg_signal_backend(PG_GETARG_INT32(0), SIGINT));
+	int			r = pg_signal_backend(PG_GETARG_INT32(0), SIGINT);
+
+	if (r == SIGNAL_BACKEND_NOPERMISSION)
+		ereport(ERROR,
+				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+				 (errmsg("must be superuser or have the same role to cancel queries running in other server processes"))));
+
+	PG_RETURN_BOOL(r == SIGNAL_BACKEND_SUCCESS);
 }
 
+/*
+ * Signal to terminate a backend process.  Only allowed by superuser.
+ */
 Datum
 pg_terminate_backend(PG_FUNCTION_ARGS)
 {
-	PG_RETURN_BOOL(pg_signal_backend(PG_GETARG_INT32(0), SIGTERM));
+	if (!superuser())
+		ereport(ERROR,
+				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+			 errmsg("must be superuser to terminate other server processes"),
+				 errhint("You can cancel your own processes with pg_cancel_backend().")));
+
+	PG_RETURN_BOOL(pg_signal_backend(PG_GETARG_INT32(0), SIGTERM) == SIGNAL_BACKEND_SUCCESS);
 }
 
+/*
+ * Signal to reload the database configuration
+ */
 Datum
 pg_reload_conf(PG_FUNCTION_ARGS)
 {

commit 652300f5392f2d951b368c38f1cf123003e5e21e
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Sun Jan 15 14:00:52 2012 +0200

    pgcrypto: Remove inappropriate const qualifier
    
    The function in question does not in fact ensure that the passed
    argument is not changed, and the callers don't care much either.

diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c
index 6246900..a2c5293 100644
--- a/contrib/pgcrypto/mbuf.c
+++ b/contrib/pgcrypto/mbuf.c
@@ -136,7 +136,7 @@ mbuf_create(int len)
 }
 
 MBuf *
-mbuf_create_from_data(const uint8 *data, int len)
+mbuf_create_from_data(uint8 *data, int len)
 {
 	MBuf	   *mbuf;
 
diff --git a/contrib/pgcrypto/mbuf.h b/contrib/pgcrypto/mbuf.h
index 37d2db5..da016c0 100644
--- a/contrib/pgcrypto/mbuf.h
+++ b/contrib/pgcrypto/mbuf.h
@@ -77,7 +77,7 @@ struct PullFilterOps
  * Memory buffer
  */
 MBuf	   *mbuf_create(int len);
-MBuf	   *mbuf_create_from_data(const uint8 *data, int len);
+MBuf	   *mbuf_create_from_data(uint8 *data, int len);
 int			mbuf_tell(MBuf *mbuf);
 int			mbuf_avail(MBuf *mbuf);
 int			mbuf_size(MBuf *mbuf);

commit 7064fd06489e069d38a9d67c5322265cb8f7ceec
Author: Alvaro Herrera <alvherre@alvh.no-ip.org>
Date:   Sat Jan 14 19:36:39 2012 -0300

    Detect invalid permutations in isolationtester
    
    isolationtester is now able to continue running other permutations when
    it detects that one of them is invalid, which is useful during initial
    development of spec files.
    
    Author: Alexander Shulgin

diff --git a/src/test/isolation/isolationtester.c b/src/test/isolation/isolationtester.c
index b35e533..ab1ef03 100644
--- a/src/test/isolation/isolationtester.c
+++ b/src/test/isolation/isolationtester.c
@@ -550,8 +550,53 @@ run_permutation(TestSpec * testspec, int nsteps, Step ** steps)
 	for (i = 0; i < nsteps; i++)
 	{
 		Step *step = steps[i];
+		PGconn *conn = conns[1 + step->session];
 
-		if (!PQsendQuery(conns[1 + step->session], step->sql))
+		if (waiting != NULL && step->session == waiting->session)
+		{
+			PGcancel *cancel;
+			PGresult *res;
+			int j;
+
+			/*
+			 * This permutation is invalid: it can never happen in real life.
+			 *
+			 * A session is blocked on an earlier step (waiting) and no further
+			 * steps from this session can run until it is unblocked, but it
+			 * can only be unblocked by running steps from other sessions.
+			 */
+			fprintf(stderr, "invalid permutation detected\n");
+
+			/* Cancel the waiting statement from this session. */
+			cancel = PQgetCancel(conn);
+			if (cancel != NULL)
+			{
+				char buf[256];
+
+				PQcancel(cancel, buf, sizeof(buf));
+
+				/* Be sure to consume the error message. */
+				while ((res = PQgetResult(conn)) != NULL)
+					PQclear(res);
+
+				PQfreeCancel(cancel);
+			}
+
+			/*
+			 * Now we really have to complete all the running transactions to
+			 * make sure teardown doesn't block.
+			 */
+			for (j = 1; j < nconns; j++)
+			{
+				res = PQexec(conns[j], "ROLLBACK");
+				if (res != NULL)
+					PQclear(res);
+			}
+
+			goto teardown;
+		}
+
+		if (!PQsendQuery(conn, step->sql))
 		{
 			fprintf(stdout, "failed to send query for step %s: %s\n",
 					step->name, PQerrorMessage(conns[1 + step->session]));
@@ -590,6 +635,7 @@ run_permutation(TestSpec * testspec, int nsteps, Step ** steps)
 		report_error_message(waiting);
 	}
 
+teardown:
 	/* Perform per-session teardown */
 	for (i = 0; i < testspec->nsessions; i++)
 	{

commit d2a75837ccaa3b0da996969674b631dc3f778838
Author: Alvaro Herrera <alvherre@alvh.no-ip.org>
Date:   Sat Jan 14 18:58:49 2012 -0300

    Avoid NULL pointer dereference in isolationtester

diff --git a/src/test/isolation/isolationtester.c b/src/test/isolation/isolationtester.c
index 1d339e9..b35e533 100644
--- a/src/test/isolation/isolationtester.c
+++ b/src/test/isolation/isolationtester.c
@@ -406,14 +406,16 @@ run_named_permutations(TestSpec * testspec)
 		/* Find all the named steps from the lookup table */
 		for (j = 0; j < p->nsteps; j++)
 		{
-			steps[j] = *((Step **) bsearch(p->stepnames[j], allsteps, nallsteps,
-										 sizeof(Step *), &step_bsearch_cmp));
-			if (steps[j] == NULL)
+			Step	**this = (Step **) bsearch(p->stepnames[j], allsteps,
+											   nallsteps, sizeof(Step *),
+											   &step_bsearch_cmp);
+			if (this == NULL)
 			{
 				fprintf(stderr, "undefined step \"%s\" specified in permutation\n",
 						p->stepnames[j]);
 				exit_nicely();
 			}
+			steps[j] = *this;
 		}
 
 		run_permutation(testspec, p->nsteps, steps);

commit 00c5f55061df52ccfd82eae16f054e08818ad0ff
Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date:   Sat Jan 14 18:22:16 2012 +0200

    Make superuser imply replication privilege. The idea of a privilege that
    superuser doesn't have doesn't make much sense, as a superuser can do
    whatever he wants through other means, anyway. So instead of granting
    replication privilege to superusers in CREATE USER time by default, allow
    replication connection from superusers whether or not they have the
    replication privilege.
    
    Patch by Noah Misch, per discussion on bug report #6264

diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml
index 86c2729..c5db6ef 100644
--- a/doc/src/sgml/high-availability.sgml
+++ b/doc/src/sgml/high-availability.sgml
@@ -797,23 +797,14 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r'
      It is very important that the access privileges for replication be set up
      so that only trusted users can read the WAL stream, because it is
      easy to extract privileged information from it.  Standby servers must
-     authenticate to the primary as an account that has the
-     <literal>REPLICATION</> privilege. So a role with the
-     <literal>REPLICATION</> and <literal>LOGIN</> privileges needs to be
-     created on the primary.
+     authenticate to the primary as a superuser or an account that has the
+     <literal>REPLICATION</> privilege. It is recommended to create a
+     dedicated user account with <literal>REPLICATION</> and <literal>LOGIN</>
+     privileges for replication. While <literal>REPLICATION</> privilege gives
+     very high permissions, it does not allow the user to modify any data on
+     the primary system, which the <literal>SUPERUSER</> privilege does.
     </para>
 
-    <note>
-     <para>
-      It is recommended that a dedicated user account is used for replication.
-      While the <literal>REPLICATION</> privilege is granted to superuser
-      accounts by default, it is not recommended to use superuser accounts
-      for replication. While <literal>REPLICATION</> privilege gives very high
-      permissions, it does not allow the user to modify any data on the
-      primary system, which the <literal>SUPERUSER</> privilege does.
-     </para>
-    </note>
-
     <para>
      Client authentication for replication is controlled by a
      <filename>pg_hba.conf</> record specifying <literal>replication</> in the
diff --git a/doc/src/sgml/recovery-config.sgml b/doc/src/sgml/recovery-config.sgml
index 8647024..7e39c0d 100644
--- a/doc/src/sgml/recovery-config.sgml
+++ b/doc/src/sgml/recovery-config.sgml
@@ -325,9 +325,8 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"'  # Windows
           The connection string should specify the host name (or address)
           of the primary server, as well as the port number if it is not
           the same as the standby server's default.
-          Also specify a user name corresponding to a role that has the
-          <literal>REPLICATION</> and <literal>LOGIN</> privileges on the
-          primary (see
+          Also specify a user name corresponding to a suitably-privileged role
+          on the primary (see
           <xref linkend="streaming-replication-authentication">).
           A password needs to be provided too, if the primary demands password
           authentication.  It can be provided in the
diff --git a/doc/src/sgml/ref/create_role.sgml b/doc/src/sgml/ref/create_role.sgml
index 4953df6..7ec4d0a 100644
--- a/doc/src/sgml/ref/create_role.sgml
+++ b/doc/src/sgml/ref/create_role.sgml
@@ -185,8 +185,7 @@ CREATE ROLE <replaceable class="PARAMETER">name</replaceable> [ [ WITH ] <replac
         A role having the <literal>REPLICATION</> attribute is a very
         highly privileged role, and should only be used on roles actually
         used for replication. If not specified,
-        <literal>NOREPLICATION</literal> is the default for all roles except
-        superusers.
+        <literal>NOREPLICATION</literal> is the default.
        </para>
       </listitem>
      </varlistentry>
diff --git a/doc/src/sgml/ref/pg_basebackup.sgml b/doc/src/sgml/ref/pg_basebackup.sgml
index 8c8c78f..05d5bed 100644
--- a/doc/src/sgml/ref/pg_basebackup.sgml
+++ b/doc/src/sgml/ref/pg_basebackup.sgml
@@ -50,12 +50,13 @@ PostgreSQL documentation
 
   <para>
    The backup is made over a regular <productname>PostgreSQL</productname>
-   connection, and uses the replication protocol. The connection must be
-   made with a user having <literal>REPLICATION</literal> permissions (see
-   <xref linkend="role-attributes">), and the user must be granted explicit
-   permissions in <filename>pg_hba.conf</filename>. The server must also
-   be configured with <xref linkend="guc-max-wal-senders"> set high enough
-   to leave at least one session available for the backup.
+   connection, and uses the replication protocol. The connection must be made
+   with a superuser or a user having <literal>REPLICATION</literal>
+   permissions (see <xref linkend="role-attributes">),
+   and <filename>pg_hba.conf</filename> must explicitly permit the replication
+   connection. The server must also be configured
+   with <xref linkend="guc-max-wal-senders"> set high enough to leave at least
+   one session available for the backup.
   </para>
 
   <para>
diff --git a/doc/src/sgml/ref/pg_receivexlog.sgml b/doc/src/sgml/ref/pg_receivexlog.sgml
index 9a2a24b..fad7470 100644
--- a/doc/src/sgml/ref/pg_receivexlog.sgml
+++ b/doc/src/sgml/ref/pg_receivexlog.sgml
@@ -50,13 +50,13 @@ PostgreSQL documentation
 
   <para>
    The transaction log is streamed over a regular
-   <productname>PostgreSQL</productname> connection, and uses the
-   replication protocol. The connection must be
-   made with a user having <literal>REPLICATION</literal> permissions (see
-   <xref linkend="role-attributes">), and the user must be granted explicit
-   permissions in <filename>pg_hba.conf</filename>. The server must also
-   be configured with <xref linkend="guc-max-wal-senders"> set high enough
-   to leave at least one session available for the stream.
+   <productname>PostgreSQL</productname> connection, and uses the replication
+   protocol. The connection must be made with a superuser or a user
+   having <literal>REPLICATION</literal> permissions (see
+   <xref linkend="role-attributes">), and <filename>pg_hba.conf</filename>
+   must explicitly permit the replication connection. The server must also be
+   configured with <xref linkend="guc-max-wal-senders"> set high enough to
+   leave at least one session available for the stream.
   </para>
  </refsect1>
 
diff --git a/doc/src/sgml/user-manag.sgml b/doc/src/sgml/user-manag.sgml
index 0a4f82d..177ac7a 100644
--- a/doc/src/sgml/user-manag.sgml
+++ b/doc/src/sgml/user-manag.sgml
@@ -169,16 +169,11 @@ CREATE USER <replaceable>name</replaceable>;
       <listitem>
        <para>
         A database superuser bypasses all permission checks, except the right
-        to log in or the right to initiate replication.  This is a
-        dangerous privilege and should not be used carelessly; it is best
-        to do most of your work as a role that is not a superuser.
-        To create a new database superuser, use <literal>CREATE ROLE
-        <replaceable>name</replaceable> SUPERUSER</literal>.  You must do
-        this as a role that is already a superuser. Creating a superuser
-        will by default also grant permissions to initiate streaming
-        replication. For increased security this can be disallowed using
-        <literal>CREATE ROLE <replaceable>name</replaceable> SUPERUSER
-        NOREPLICATION</literal>.
+        to log in.  This is a dangerous privilege and should not be used
+        carelessly; it is best to do most of your work as a role that is not a
+        superuser.  To create a new database superuser, use <literal>CREATE
+        ROLE <replaceable>name</replaceable> SUPERUSER</literal>.  You must do
+        this as a role that is already a superuser.
        </para>
       </listitem>
      </varlistentry>
@@ -217,7 +212,8 @@ CREATE USER <replaceable>name</replaceable>;
       <listitem>
        <para>
         A role must explicitly be given permission to initiate streaming
-        replication. A role used for streaming replication must always
+        replication (except for superusers, since those bypass all permission
+        checks). A role used for streaming replication must always
         have <literal>LOGIN</> permission as well. To create such a role, use
         <literal>CREATE ROLE <replaceable>name</replaceable> REPLICATION
         LOGIN</literal>.
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index a90f0b1..9a88c90 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -239,16 +239,7 @@ CreateRole(CreateRoleStmt *stmt)
 	if (dpassword && dpassword->arg)
 		password = strVal(dpassword->arg);
 	if (dissuper)
-	{
 		issuper = intVal(dissuper->arg) != 0;
-
-		/*
-		 * Superusers get replication by default, but only if NOREPLICATION
-		 * wasn't explicitly mentioned
-		 */
-		if (issuper && !(disreplication && intVal(disreplication->arg) == 0))
-			isreplication = 1;
-	}
 	if (dinherit)
 		inherit = intVal(dinherit->arg) != 0;
 	if (dcreaterole)
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index f9e4dbc..1baa67d 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -659,11 +659,10 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 	{
 		Assert(!bootstrap);
 
-		/* must have authenticated as a replication role */
-		if (!is_authenticated_user_replication_role())
+		if (!superuser() && !is_authenticated_user_replication_role())
 			ereport(FATAL,
 					(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-					 errmsg("must be replication role to start walsender")));
+					 errmsg("must be superuser or replication role to start walsender")));
 
 		/* process any options passed in the startup packet */
 		if (MyProcPort != NULL)

commit ea038d65c2460408296a5708da8bb0bd8f3d00bc
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Sat Jan 14 15:01:24 2012 +0200

    initdb: Remove support for crypt authentication method
    
    This was removed from the backend a long time ago, but initdb still
    thought that it was OK to use in the -A option.

diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index e0474f7..9df2656 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -2675,7 +2675,6 @@ main(int argc, char *argv[])
 		strcmp(authmethod, "pam") != 0 &&
 		strncmp(authmethod, "pam ", 4) != 0 &&		/* pam with space = param */
 #endif
-		strcmp(authmethod, "crypt") != 0 &&
 		strcmp(authmethod, "password") != 0
 		)
 
@@ -2690,7 +2689,6 @@ main(int argc, char *argv[])
 	}
 
 	if ((strcmp(authmethod, "md5") == 0 ||
-		 strcmp(authmethod, "crypt") == 0 ||
 		 strcmp(authmethod, "password") == 0) &&
 		!(pwprompt || pwfilename))
 	{

commit d0dcb315db0043f10073a9a244cea138e9e60edd
Author: Robert Haas <rhaas@postgresql.org>
Date:   Fri Jan 13 08:22:31 2012 -0500

    Fix broken logic in lazy_vacuum_heap.
    
    As noted by Tom Lane, the previous coding in this area, which I
    introduced in commit bbb6e559c4ea0fb4c346beda76736451dc24eb4e, was
    poorly tested and caused the vacuum's second heap to go into what would
    have been an infinite loop but for the fact that it eventually caused a
    memory allocation failure.  This version seems to work better.

diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 846d305..2fc749e 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -991,7 +991,11 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
 								 vac_strategy);
 		if (!ConditionalLockBufferForCleanup(buf))
+		{
+			ReleaseBuffer(buf);
+			++tupindex;
 			continue;
+		}
 		tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats);
 
 		/* Now that we've compacted the page, record its available space */

commit 4d0b11a0ca347e5b0304004625b7eb6752e32ee7
Author: Robert Haas <rhaas@postgresql.org>
Date:   Fri Jan 13 08:21:45 2012 -0500

    Typo fix.

diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index ee8ba5e..d63ff29 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -750,7 +750,7 @@ ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime)
 	walrcv->lastMsgReceiptTime = lastMsgReceiptTime;
 	SpinLockRelease(&walrcv->mutex);
 
-	if (log_min_mesages <= DEBUG2)
+	if (log_min_messages <= DEBUG2)
 		elog(DEBUG2, "sendtime %s receipttime %s replication apply delay %d ms transfer latency %d ms",
 					timestamptz_to_str(sendTime),
 					timestamptz_to_str(lastMsgReceiptTime),

commit 5530623d0326e96e40b8d54275da256ca0fb6856
Author: Simon Riggs <simon@2ndQuadrant.com>
Date:   Fri Jan 13 13:02:44 2012 +0000

    Correctly initialise shared recoveryLastRecPtr in recovery.
    Previously we used ReadRecPtr rather than EndRecPtr, which was
    not a serious error but caused pg_stat_replication to report
    incorrect replay_location until at least one WAL record is replayed.
    
    Fujii Masao

diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 19ef66b..ce659ec 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -6407,7 +6407,7 @@ StartupXLOG(void)
 		 */
 		SpinLockAcquire(&xlogctl->info_lck);
 		xlogctl->replayEndRecPtr = ReadRecPtr;
-		xlogctl->recoveryLastRecPtr = ReadRecPtr;
+		xlogctl->recoveryLastRecPtr = EndRecPtr;
 		xlogctl->recoveryLastXTime = 0;
 		xlogctl->currentChunkStartTime = 0;
 		xlogctl->recoveryPause = false;

commit 3f1787c253967617a0e34fa4bfb7b2ab184ad484
Author: Simon Riggs <simon@2ndQuadrant.com>
Date:   Fri Jan 13 12:59:08 2012 +0000

    Minor but necessary improvements to WAL keepalives
    
    Fujii Masao

diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index ee59571..ee8ba5e 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -750,7 +750,8 @@ ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime)
 	walrcv->lastMsgReceiptTime = lastMsgReceiptTime;
 	SpinLockRelease(&walrcv->mutex);
 
-	elog(DEBUG2, "sendtime %s receipttime %s replication apply delay %d transfer latency %d",
+	if (log_min_mesages <= DEBUG2)
+		elog(DEBUG2, "sendtime %s receipttime %s replication apply delay %d ms transfer latency %d ms",
 					timestamptz_to_str(sendTime),
 					timestamptz_to_str(lastMsgReceiptTime),
 					GetReplicationApplyDelay(),
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 3598e56..3611713 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -834,7 +834,12 @@ WalSndLoop(void)
 			if (pq_is_send_pending())
 				wakeEvents |= WL_SOCKET_WRITEABLE;
 			else
+			{
 				WalSndKeepalive(output_message);
+				/* Try to flush pending output to the client */
+				if (pq_flush_if_writable() != 0)
+					break;
+			}
 
 			/* Determine time until replication timeout */
 			if (replication_timeout > 0)

commit 21b446dd0927f8f2a187d9461a0d3f11db836f77
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Thu Jan 12 16:40:14 2012 -0500

    Fix CLUSTER/VACUUM FULL for toast values owned by recently-updated rows.
    
    In commit 7b0d0e9356963d5c3e4d329a917f5fbb82a2ef05, I made CLUSTER and
    VACUUM FULL try to preserve toast value OIDs from the original toast table
    to the new one.  However, if we have to copy both live and recently-dead
    versions of a row that has a toasted column, those versions may well
    reference the same toast value with the same OID.  The patch then led to
    duplicate-key failures as we tried to insert the toast value twice with the
    same OID.  (The previous behavior was not very desirable either, since it
    would have silently inserted the same value twice with different OIDs.
    That wastes space, but what's worse is that the toast values inserted for
    already-dead heap rows would not be reclaimed by subsequent ordinary
    VACUUMs, since they go into the new toast table marked live not deleted.)
    
    To fix, check if the copied OID already exists in the new toast table, and
    if so, assume that it stores the desired value.  This is reasonably safe
    since the only case where we will copy an OID from a previous toast pointer
    is when toast_insert_or_update was given that toast pointer and so we just
    pulled the data from the old table; if we got two different values that way
    then we have big problems anyway.  We do have to assume that no other
    backend is inserting items into the new toast table concurrently, but
    that's surely safe for CLUSTER and VACUUM FULL.
    
    Per bug #6393 from Maxim Boguk.  Back-patch to 9.0, same as the previous
    patch.

diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 3ba37f6..28b5a20 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -76,7 +76,8 @@ do { \
 static void toast_delete_datum(Relation rel, Datum value);
 static Datum toast_save_datum(Relation rel, Datum value,
 				 struct varlena *oldexternal, int options);
-static bool toast_valueid_exists(Oid toastrelid, Oid valueid);
+static bool toastrel_valueid_exists(Relation toastrel, Oid valueid);
+static bool toastid_valueid_exists(Oid toastrelid, Oid valueid);
 static struct varlena *toast_fetch_datum(struct varlena * attr);
 static struct varlena *toast_fetch_datum_slice(struct varlena * attr,
 						int32 sliceoffset, int32 length);
@@ -1342,7 +1343,34 @@ toast_save_datum(Relation rel, Datum value,
 			/* Must copy to access aligned fields */
 			VARATT_EXTERNAL_GET_POINTER(old_toast_pointer, oldexternal);
 			if (old_toast_pointer.va_toastrelid == rel->rd_toastoid)
+			{
+				/* This value came from the old toast table; reuse its OID */
 				toast_pointer.va_valueid = old_toast_pointer.va_valueid;
+
+				/*
+				 * There is a corner case here: the table rewrite might have
+				 * to copy both live and recently-dead versions of a row, and
+				 * those versions could easily reference the same toast value.
+				 * When we copy the second or later version of such a row,
+				 * reusing the OID will mean we select an OID that's already
+				 * in the new toast table.  Check for that, and if so, just
+				 * fall through without writing the data again.
+				 *
+				 * While annoying and ugly-looking, this is a good thing
+				 * because it ensures that we wind up with only one copy of
+				 * the toast value when there is only one copy in the old
+				 * toast table.  Before we detected this case, we'd have made
+				 * multiple copies, wasting space; and what's worse, the
+				 * copies belonging to already-deleted heap tuples would not
+				 * be reclaimed by VACUUM.
+				 */
+				if (toastrel_valueid_exists(toastrel,
+											toast_pointer.va_valueid))
+				{
+					/* Match, so short-circuit the data storage loop below */
+					data_todo = 0;
+				}
+			}
 		}
 		if (toast_pointer.va_valueid == InvalidOid)
 		{
@@ -1356,8 +1384,8 @@ toast_save_datum(Relation rel, Datum value,
 					GetNewOidWithIndex(toastrel,
 									   RelationGetRelid(toastidx),
 									   (AttrNumber) 1);
-			} while (toast_valueid_exists(rel->rd_toastoid,
-										  toast_pointer.va_valueid));
+			} while (toastid_valueid_exists(rel->rd_toastoid,
+											toast_pointer.va_valueid));
 		}
 	}
 
@@ -1495,25 +1523,19 @@ toast_delete_datum(Relation rel, Datum value)
 
 
 /* ----------
- * toast_valueid_exists -
+ * toastrel_valueid_exists -
  *
  *	Test whether a toast value with the given ID exists in the toast relation
  * ----------
  */
 static bool
-toast_valueid_exists(Oid toastrelid, Oid valueid)
+toastrel_valueid_exists(Relation toastrel, Oid valueid)
 {
 	bool		result = false;
-	Relation	toastrel;
 	ScanKeyData toastkey;
 	SysScanDesc toastscan;
 
 	/*
-	 * Open the toast relation
-	 */
-	toastrel = heap_open(toastrelid, AccessShareLock);
-
-	/*
 	 * Setup a scan key to find chunks with matching va_valueid
 	 */
 	ScanKeyInit(&toastkey,
@@ -1530,10 +1552,27 @@ toast_valueid_exists(Oid toastrelid, Oid valueid)
 	if (systable_getnext(toastscan) != NULL)
 		result = true;
 
-	/*
-	 * End scan and close relations
-	 */
 	systable_endscan(toastscan);
+
+	return result;
+}
+
+/* ----------
+ * toastid_valueid_exists -
+ *
+ *	As above, but work from toast rel's OID not an open relation
+ * ----------
+ */
+static bool
+toastid_valueid_exists(Oid toastrelid, Oid valueid)
+{
+	bool		result;
+	Relation	toastrel;
+
+	toastrel = heap_open(toastrelid, AccessShareLock);
+
+	result = toastrel_valueid_exists(toastrel, valueid);
+
 	heap_close(toastrel, AccessShareLock);
 
 	return result;
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 997449e..9408f25 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -787,16 +787,19 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
 		 * When doing swap by content, any toast pointers written into NewHeap
 		 * must use the old toast table's OID, because that's where the toast
 		 * data will eventually be found.  Set this up by setting rd_toastoid.
-		 * This also tells tuptoaster.c to preserve the toast value OIDs,
-		 * which we want so as not to invalidate toast pointers in system
-		 * catalog caches.
+		 * This also tells toast_save_datum() to preserve the toast value
+		 * OIDs, which we want so as not to invalidate toast pointers in
+		 * system catalog caches, and to avoid making multiple copies of a
+		 * single toast value.
 		 *
 		 * Note that we must hold NewHeap open until we are done writing data,
 		 * since the relcache will not guarantee to remember this setting once
 		 * the relation is closed.	Also, this technique depends on the fact
 		 * that no one will try to read from the NewHeap until after we've
 		 * finished writing it and swapping the rels --- otherwise they could
-		 * follow the toast pointers to the wrong place.
+		 * follow the toast pointers to the wrong place.  (It would actually
+		 * work for values copied over from the old toast table, but not for
+		 * any values that we toast which were previously not toasted.)
 		 */
 		NewHeap->rd_toastoid = OldHeap->rd_rel->reltoastrelid;
 	}
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index 0e6286b..d404c2a 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -159,7 +159,8 @@ typedef struct RelationData
 	 * have the existing toast table's OID, not the OID of the transient toast
 	 * table.  If rd_toastoid isn't InvalidOid, it is the OID to place in
 	 * toast pointers inserted into this rel.  (Note it's set on the new
-	 * version of the main heap, not the toast table itself.)
+	 * version of the main heap, not the toast table itself.)  This also
+	 * causes toast_save_datum() to try to preserve toast value OIDs.
 	 */
 	Oid			rd_toastoid;	/* Real TOAST table's OID, or InvalidOid */
 

commit de5a08c59de39df07599723cb212ae8297903f48
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Thu Jan 12 14:18:08 2012 -0500

    Tweak duplicate-index-column regression test to avoid locale sensitivity.
    
    The originally-chosen test case gives different results in es_EC locale
    because of unusual rule for sorting strings beginning with "LL".  Adjust
    the comparison value to avoid that, while hopefully not introducing new
    locale dependencies elsewhere.  Per report from Jaime Casanova.

diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out
index 3be2016..b1fcada 100644
--- a/src/test/regress/expected/create_index.out
+++ b/src/test/regress/expected/create_index.out
@@ -2468,18 +2468,18 @@ CREATE INDEX dupindexcols_i ON dupindexcols (f1, id, f1 text_pattern_ops);
 VACUUM ANALYZE dupindexcols;
 EXPLAIN (COSTS OFF)
   SELECT count(*) FROM dupindexcols
-    WHERE f1 > 'LX' and id < 1000 and f1 ~<~ 'YX';
+    WHERE f1 > 'MA' and id < 1000 and f1 ~<~ 'YX';
                                    QUERY PLAN                                    
 ---------------------------------------------------------------------------------
  Aggregate
    ->  Index Only Scan using dupindexcols_i on dupindexcols
-         Index Cond: ((f1 > 'LX'::text) AND (id < 1000) AND (f1 ~<~ 'YX'::text))
+         Index Cond: ((f1 > 'MA'::text) AND (id < 1000) AND (f1 ~<~ 'YX'::text))
 (3 rows)
 
 SELECT count(*) FROM dupindexcols
-  WHERE f1 > 'LX' and id < 1000 and f1 ~<~ 'YX';
+  WHERE f1 > 'MA' and id < 1000 and f1 ~<~ 'YX';
  count 
 -------
-   500
+   497
 (1 row)
 
diff --git a/src/test/regress/sql/create_index.sql b/src/test/regress/sql/create_index.sql
index 6d25ff9..5e5fc22 100644
--- a/src/test/regress/sql/create_index.sql
+++ b/src/test/regress/sql/create_index.sql
@@ -816,6 +816,6 @@ VACUUM ANALYZE dupindexcols;
 
 EXPLAIN (COSTS OFF)
   SELECT count(*) FROM dupindexcols
-    WHERE f1 > 'LX' and id < 1000 and f1 ~<~ 'YX';
+    WHERE f1 > 'MA' and id < 1000 and f1 ~<~ 'YX';
 SELECT count(*) FROM dupindexcols
-  WHERE f1 > 'LX' and id < 1000 and f1 ~<~ 'YX';
+  WHERE f1 > 'MA' and id < 1000 and f1 ~<~ 'YX';

commit 50363c8f86f89fe611ba417575218978917f1ac0
Author: Alvaro Herrera <alvherre@alvh.no-ip.org>
Date:   Wed Jan 11 18:46:18 2012 -0300

    Validate number of steps specified in permutation
    
    A permutation that specifies more steps than defined causes
    isolationtester to crash, so avoid that.  Using less steps than defined
    should probably not be a problem, but no spec currently does that.

diff --git a/src/test/isolation/isolationtester.c b/src/test/isolation/isolationtester.c
index 38f1e78..1d339e9 100644
--- a/src/test/isolation/isolationtester.c
+++ b/src/test/isolation/isolationtester.c
@@ -395,6 +395,12 @@ run_named_permutations(TestSpec * testspec)
 		Permutation *p = testspec->permutations[i];
 		Step	  **steps;
 
+		if (p->nsteps != nallsteps)
+		{
+			fprintf(stderr, "invalid number of steps in permutation %d\n", i + 1);
+			exit_nicely();
+		}
+
 		steps = malloc(p->nsteps * sizeof(Step *));
 
 		/* Find all the named steps from the lookup table */
@@ -404,7 +410,8 @@ run_named_permutations(TestSpec * testspec)
 										 sizeof(Step *), &step_bsearch_cmp));
 			if (steps[j] == NULL)
 			{
-				fprintf(stderr, "undefined step \"%s\" specified in permutation\n", p->stepnames[j]);
+				fprintf(stderr, "undefined step \"%s\" specified in permutation\n",
+						p->stepnames[j]);
 				exit_nicely();
 			}
 		}

commit 1b9dea04b5cd8b1b2f8041e4aece9d573f007eb1
Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date:   Wed Jan 11 11:00:53 2012 +0200

    Remove useless 'needlock' argument from GetXLogInsertRecPtr. It was always
    passed as 'true'.

diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index db7d993..19ef66b 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -9411,16 +9411,14 @@ GetStandbyFlushRecPtr(void)
  * Get latest WAL insert pointer
  */
 XLogRecPtr
-GetXLogInsertRecPtr(bool needlock)
+GetXLogInsertRecPtr(void)
 {
 	XLogCtlInsert *Insert = &XLogCtl->Insert;
 	XLogRecPtr	current_recptr;
 
-	if (needlock)
-		LWLockAcquire(WALInsertLock, LW_SHARED);
+	LWLockAcquire(WALInsertLock, LW_SHARED);
 	INSERT_RECPTR(current_recptr, Insert, Insert->curridx);
-	if (needlock)
-		LWLockRelease(WALInsertLock);
+	LWLockRelease(WALInsertLock);
 
 	return current_recptr;
 }
diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c
index 1ca279d..2e10d4d 100644
--- a/src/backend/access/transam/xlogfuncs.c
+++ b/src/backend/access/transam/xlogfuncs.c
@@ -200,7 +200,7 @@ pg_current_xlog_insert_location(PG_FUNCTION_ARGS)
 				 errmsg("recovery is in progress"),
 				 errhint("WAL control functions cannot be executed during recovery.")));
 
-	current_recptr = GetXLogInsertRecPtr(true);
+	current_recptr = GetXLogInsertRecPtr();
 
 	snprintf(location, sizeof(location), "%X/%X",
 			 current_recptr.xlogid, current_recptr.xrecoff);
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index 93622c4..1ddf4bf 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -288,7 +288,7 @@ extern bool XLogInsertAllowed(void);
 extern void GetXLogReceiptTime(TimestampTz *rtime, bool *fromStream);
 extern XLogRecPtr GetXLogReplayRecPtr(XLogRecPtr *restoreLastRecPtr);
 extern XLogRecPtr GetStandbyFlushRecPtr(void);
-extern XLogRecPtr GetXLogInsertRecPtr(bool needlock);
+extern XLogRecPtr GetXLogInsertRecPtr(void);
 extern XLogRecPtr GetXLogWriteRecPtr(void);
 extern bool RecoveryIsPaused(void);
 extern void SetRecoveryPause(bool recoveryPause);

commit 9c808f89c2841dd847555898a8db45fcd69e913b
Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date:   Wed Jan 11 09:46:18 2012 +0200

    Refactor XLogInsert a bit. The rdata entries for backup blocks are now
    constructed before acquiring WALInsertLock, which slightly reduces the time
    the lock is held. Although I could not measure any benefit in benchmarks,
    the code is more readable this way.

diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 8e65962..db7d993 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -694,6 +694,7 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
 	uint32		freespace;
 	int			curridx;
 	XLogRecData *rdt;
+	XLogRecData *rdt_lastnormal;
 	Buffer		dtbuf[XLR_MAX_BKP_BLOCKS];
 	bool		dtbuf_bkp[XLR_MAX_BKP_BLOCKS];
 	BkpBlock	dtbuf_xlg[XLR_MAX_BKP_BLOCKS];
@@ -708,6 +709,7 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
 	bool		updrqst;
 	bool		doPageWrites;
 	bool		isLogSwitch = (rmid == RM_XLOG_ID && info == XLOG_SWITCH);
+	uint8		info_orig = info;
 
 	/* cross-check on whether we should be here or not */
 	if (!XLogInsertAllowed())
@@ -731,23 +733,18 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
 	}
 
 	/*
-	 * Here we scan the rdata chain, determine which buffers must be backed
-	 * up, and compute the CRC values for the data.  Note that the record
-	 * header isn't added into the CRC initially since we don't know the final
-	 * length or info bits quite yet.  Thus, the CRC will represent the CRC of
-	 * the whole record in the order "rdata, then backup blocks, then record
-	 * header".
+	 * Here we scan the rdata chain, to determine which buffers must be backed
+	 * up.
 	 *
 	 * We may have to loop back to here if a race condition is detected below.
 	 * We could prevent the race by doing all this work while holding the
 	 * insert lock, but it seems better to avoid doing CRC calculations while
-	 * holding the lock.  This means we have to be careful about modifying the
-	 * rdata chain until we know we aren't going to loop back again.  The only
-	 * change we allow ourselves to make earlier is to set rdt->data = NULL in
-	 * chain items we have decided we will have to back up the whole buffer
-	 * for.  This is OK because we will certainly decide the same thing again
-	 * for those items if we do it over; doing it here saves an extra pass
-	 * over the chain later.
+	 * holding the lock.
+	 *
+	 * We add entries for backup blocks to the chain, so that they don't
+	 * need any special treatment in the critical section where the chunks are
+	 * copied into the WAL buffers. Those entries have to be unlinked from the
+	 * chain if we have to loop back here.
 	 */
 begin:;
 	for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -764,7 +761,6 @@ begin:;
 	 */
 	doPageWrites = fullPageWrites || Insert->forcePageWrites;
 
-	INIT_CRC32(rdata_crc);
 	len = 0;
 	for (rdt = rdata;;)
 	{
@@ -772,7 +768,6 @@ begin:;
 		{
 			/* Simple data, just include it */
 			len += rdt->len;
-			COMP_CRC32(rdata_crc, rdt->data, rdt->len);
 		}
 		else
 		{
@@ -783,12 +778,12 @@ begin:;
 				{
 					/* Buffer already referenced by earlier chain item */
 					if (dtbuf_bkp[i])
+					{
 						rdt->data = NULL;
+						rdt->len = 0;
+					}
 					else if (rdt->data)
-					{
 						len += rdt->len;
-						COMP_CRC32(rdata_crc, rdt->data, rdt->len);
-					}
 					break;
 				}
 				if (dtbuf[i] == InvalidBuffer)
@@ -800,12 +795,10 @@ begin:;
 					{
 						dtbuf_bkp[i] = true;
 						rdt->data = NULL;
+						rdt->len = 0;
 					}
 					else if (rdt->data)
-					{
 						len += rdt->len;
-						COMP_CRC32(rdata_crc, rdt->data, rdt->len);
-					}
 					break;
 				}
 			}
@@ -820,39 +813,6 @@ begin:;
 	}
 
 	/*
-	 * Now add the backup block headers and data into the CRC
-	 */
-	for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
-	{
-		if (dtbuf_bkp[i])
-		{
-			BkpBlock   *bkpb = &(dtbuf_xlg[i]);
-			char	   *page;
-
-			COMP_CRC32(rdata_crc,
-					   (char *) bkpb,
-					   sizeof(BkpBlock));
-			page = (char *) BufferGetBlock(dtbuf[i]);
-			if (bkpb->hole_length == 0)
-			{
-				COMP_CRC32(rdata_crc,
-						   page,
-						   BLCKSZ);
-			}
-			else
-			{
-				/* must skip the hole */
-				COMP_CRC32(rdata_crc,
-						   page,
-						   bkpb->hole_offset);
-				COMP_CRC32(rdata_crc,
-						   page + (bkpb->hole_offset + bkpb->hole_length),
-						   BLCKSZ - (bkpb->hole_offset + bkpb->hole_length));
-			}
-		}
-	}
-
-	/*
 	 * NOTE: We disallow len == 0 because it provides a useful bit of extra
 	 * error checking in ReadRecord.  This means that all callers of
 	 * XLogInsert must supply at least some not-in-a-buffer data.  However, we
@@ -862,70 +822,20 @@ begin:;
 	if (len == 0 && !isLogSwitch)
 		elog(PANIC, "invalid xlog record length %u", len);
 
-	START_CRIT_SECTION();
-
-	/* Now wait to get insert lock */
-	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
-
-	/*
-	 * Check to see if my RedoRecPtr is out of date.  If so, may have to go
-	 * back and recompute everything.  This can only happen just after a
-	 * checkpoint, so it's better to be slow in this case and fast otherwise.
-	 *
-	 * If we aren't doing full-page writes then RedoRecPtr doesn't actually
-	 * affect the contents of the XLOG record, so we'll update our local copy
-	 * but not force a recomputation.
-	 */
-	if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
-	{
-		Assert(XLByteLT(RedoRecPtr, Insert->RedoRecPtr));
-		RedoRecPtr = Insert->RedoRecPtr;
-
-		if (doPageWrites)
-		{
-			for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
-			{
-				if (dtbuf[i] == InvalidBuffer)
-					continue;
-				if (dtbuf_bkp[i] == false &&
-					XLByteLE(dtbuf_lsn[i], RedoRecPtr))
-				{
-					/*
-					 * Oops, this buffer now needs to be backed up, but we
-					 * didn't think so above.  Start over.
-					 */
-					LWLockRelease(WALInsertLock);
-					END_CRIT_SECTION();
-					goto begin;
-				}
-			}
-		}
-	}
-
-	/*
-	 * Also check to see if forcePageWrites was just turned on; if we weren't
-	 * already doing full-page writes then go back and recompute. (If it was
-	 * just turned off, we could recompute the record without full pages, but
-	 * we choose not to bother.)
-	 */
-	if (Insert->forcePageWrites && !doPageWrites)
-	{
-		/* Oops, must redo it with full-page data */
-		LWLockRelease(WALInsertLock);
-		END_CRIT_SECTION();
-		goto begin;
-	}
-
 	/*
 	 * Make additional rdata chain entries for the backup blocks, so that we
-	 * don't need to special-case them in the write loop.  Note that we have
-	 * now irrevocably changed the input rdata chain.  At the exit of this
-	 * loop, write_len includes the backup block data.
+	 * don't need to special-case them in the write loop.  This modifies the
+	 * original rdata chain, but we keep a pointer to the last regular entry,
+	 * rdt_lastnormal, so that we can undo this if we have to loop back to the
+	 * beginning.
+	 *
+	 * At the exit of this loop, write_len includes the backup block data.
 	 *
 	 * Also set the appropriate info bits to show which buffers were backed
 	 * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
 	 * buffer value (ignoring InvalidBuffer) appearing in the rdata chain.
 	 */
+	rdt_lastnormal = rdt;
 	write_len = len;
 	for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
 	{
@@ -975,6 +885,76 @@ begin:;
 	}
 
 	/*
+	 * Calculate CRC of the data, including all the backup blocks
+	 *
+	 * Note that the record header isn't added into the CRC initially since
+	 * we don't know the prev-link yet.  Thus, the CRC will represent the CRC
+	 * of the whole record in the order: rdata, then backup blocks, then
+	 * record header.
+	 */
+	INIT_CRC32(rdata_crc);
+	for (rdt = rdata; rdt != NULL; rdt = rdt->next)
+		COMP_CRC32(rdata_crc, rdt->data, rdt->len);
+
+	START_CRIT_SECTION();
+
+	/* Now wait to get insert lock */
+	LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
+
+	/*
+	 * Check to see if my RedoRecPtr is out of date.  If so, may have to go
+	 * back and recompute everything.  This can only happen just after a
+	 * checkpoint, so it's better to be slow in this case and fast otherwise.
+	 *
+	 * If we aren't doing full-page writes then RedoRecPtr doesn't actually
+	 * affect the contents of the XLOG record, so we'll update our local copy
+	 * but not force a recomputation.
+	 */
+	if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
+	{
+		Assert(XLByteLT(RedoRecPtr, Insert->RedoRecPtr));
+		RedoRecPtr = Insert->RedoRecPtr;
+
+		if (doPageWrites)
+		{
+			for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
+			{
+				if (dtbuf[i] == InvalidBuffer)
+					continue;
+				if (dtbuf_bkp[i] == false &&
+					XLByteLE(dtbuf_lsn[i], RedoRecPtr))
+				{
+					/*
+					 * Oops, this buffer now needs to be backed up, but we
+					 * didn't think so above.  Start over.
+					 */
+					LWLockRelease(WALInsertLock);
+					END_CRIT_SECTION();
+					rdt_lastnormal->next = NULL;
+					info = info_orig;
+					goto begin;
+				}
+			}
+		}
+	}
+
+	/*
+	 * Also check to see if forcePageWrites was just turned on; if we weren't
+	 * already doing full-page writes then go back and recompute. (If it was
+	 * just turned off, we could recompute the record without full pages, but
+	 * we choose not to bother.)
+	 */
+	if (Insert->forcePageWrites && !doPageWrites)
+	{
+		/* Oops, must redo it with full-page data. */
+		LWLockRelease(WALInsertLock);
+		END_CRIT_SECTION();
+		rdt_lastnormal->next = NULL;
+		info = info_orig;
+		goto begin;
+	}
+
+	/*
 	 * If there isn't enough space on the current XLOG page for a record
 	 * header, advance to the next page (leaving the unused space as zeroes).
 	 */

commit 26e89e7f23194e390ec4326cd0198a1992c2adf0
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Tue Jan 10 22:49:17 2012 +0200

    Fix typos

diff --git a/src/pl/plpython/plpy_cursorobject.c b/src/pl/plpython/plpy_cursorobject.c
index b1ef7d7..4226dc7 100644
--- a/src/pl/plpython/plpy_cursorobject.c
+++ b/src/pl/plpython/plpy_cursorobject.c
@@ -134,7 +134,7 @@ PLy_cursor_query(const char *query)
 		SPI_freeplan(plan);
 
 		if (portal == NULL)
-			elog(ERROR, "SPI_cursor_open() failed:%s",
+			elog(ERROR, "SPI_cursor_open() failed: %s",
 				 SPI_result_code_string(SPI_result));
 
 		cursor->portalname = PLy_strdup(portal->name);
@@ -255,7 +255,7 @@ PLy_cursor_plan(PyObject *ob, PyObject *args)
 		portal = SPI_cursor_open(NULL, plan->plan, plan->values, nulls,
 								 PLy_curr_procedure->fn_readonly);
 		if (portal == NULL)
-			elog(ERROR, "SPI_cursor_open() failed:%s",
+			elog(ERROR, "SPI_cursor_open() failed: %s",
 				 SPI_result_code_string(SPI_result));
 
 		cursor->portalname = PLy_strdup(portal->name);

commit a9f2e31cf653bad72debae616521130065e55077
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Tue Jan 10 21:46:29 2012 +0200

    Support CREATE TABLE (LIKE ...) with foreign tables and views
    
    Composite types are not yet supported, because parserOpenTable()
    rejects them.

diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index 97968bb..f55a001 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -368,6 +368,11 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
       If the same name is specified explicitly or in another
       <literal>LIKE</literal> clause, an error is signalled.
      </para>
+     <para>
+      The <literal>LIKE</literal> clause can also be used to copy columns from
+      views or foreign tables.  Inapplicable options (e.g., <literal>INCLUDING
+      INDEXES</literal> from a view) are ignored.
+     </para>
     </listitem>
    </varlistentry>
 
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index e14ae09..335bdc6 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -644,10 +644,12 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
 	relation = parserOpenTable(cxt->pstate, table_like_clause->relation,
 							   AccessShareLock);
 
-	if (relation->rd_rel->relkind != RELKIND_RELATION)
+	if (relation->rd_rel->relkind != RELKIND_RELATION
+		&& relation->rd_rel->relkind != RELKIND_VIEW
+		&& relation->rd_rel->relkind != RELKIND_FOREIGN_TABLE)
 		ereport(ERROR,
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				 errmsg("LIKE source relation \"%s\" is not a table",
+				 errmsg("LIKE source relation \"%s\" is not a table, view, or foreign table",
 						table_like_clause->relation->relname)));
 
 	/*
diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out
index 9ff632e..40b6766 100644
--- a/src/test/regress/expected/create_table_like.out
+++ b/src/test/regress/expected/create_table_like.out
@@ -220,3 +220,22 @@ ERROR:  column "a" has a storage parameter conflict
 DETAIL:  MAIN versus EXTENDED
 DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE;
 NOTICE:  drop cascades to table inhe
+/* LIKE with other relation kinds */
+CREATE TABLE ctlt4 (a int, b text);
+CREATE SEQUENCE ctlseq1;
+CREATE TABLE ctlt10 (LIKE ctlseq1);  -- fail
+ERROR:  LIKE source relation "ctlseq1" is not a table, view, or foreign table
+CREATE VIEW ctlv1 AS SELECT * FROM ctlt4;
+CREATE TABLE ctlt11 (LIKE ctlv1);
+CREATE TABLE ctlt11a (LIKE ctlv1 INCLUDING ALL);
+CREATE TYPE ctlty1 AS (a int, b text);
+CREATE TABLE ctlt12 (LIKE ctlty1);  -- currently fails
+ERROR:  "ctlty1" is a composite type
+LINE 1: CREATE TABLE ctlt12 (LIKE ctlty1);
+                                  ^
+DROP SEQUENCE ctlseq1;
+DROP TYPE ctlty1;
+DROP VIEW ctlv1;
+DROP TABLE IF EXISTS ctlt4, ctlt10, ctlt11, ctlt11a, ctlt12;
+NOTICE:  table "ctlt10" does not exist, skipping
+NOTICE:  table "ctlt12" does not exist, skipping
diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql
index 58cea44..db66e48 100644
--- a/src/test/regress/sql/create_table_like.sql
+++ b/src/test/regress/sql/create_table_like.sql
@@ -97,3 +97,23 @@ CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4);
 CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1);
 
 DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE;
+
+
+/* LIKE with other relation kinds */
+
+CREATE TABLE ctlt4 (a int, b text);
+
+CREATE SEQUENCE ctlseq1;
+CREATE TABLE ctlt10 (LIKE ctlseq1);  -- fail
+
+CREATE VIEW ctlv1 AS SELECT * FROM ctlt4;
+CREATE TABLE ctlt11 (LIKE ctlv1);
+CREATE TABLE ctlt11a (LIKE ctlv1 INCLUDING ALL);
+
+CREATE TYPE ctlty1 AS (a int, b text);
+CREATE TABLE ctlt12 (LIKE ctlty1);  -- currently fails
+
+DROP SEQUENCE ctlseq1;
+DROP TYPE ctlty1;
+DROP VIEW ctlv1;
+DROP TABLE IF EXISTS ctlt4, ctlt10, ctlt11, ctlt11a, ctlt12;

commit 07123dff778389382f08d2152742bb061d351c21
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Tue Jan 10 20:58:16 2012 +0200

    pg_dump: Dump foreign options in sorted order

diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index d1598ea..13fc667 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -5733,7 +5733,8 @@ getTableAttrs(TableInfo *tblinfo, int numTables)
 							  "pg_catalog.array_to_string(ARRAY("
 							  "SELECT pg_catalog.quote_ident(option_name) || "
 							  "' ' || pg_catalog.quote_literal(option_value) "
-							  "FROM pg_catalog.pg_options_to_table(attfdwoptions)"
+							  "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
+							  "ORDER BY option_name"
 							  "), E',\n    ') AS attfdwoptions "
 			 "FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
 							  "ON a.atttypid = t.oid "
@@ -6564,7 +6565,8 @@ getForeignDataWrappers(int *numForeignDataWrappers)
 						  "array_to_string(ARRAY("
 						  "SELECT quote_ident(option_name) || ' ' || "
 						  "quote_literal(option_value) "
-						  "FROM pg_options_to_table(fdwoptions)"
+						  "FROM pg_options_to_table(fdwoptions) "
+						  "ORDER BY option_name"
 						  "), E',\n    ') AS fdwoptions "
 						  "FROM pg_foreign_data_wrapper",
 						  username_subquery);
@@ -6578,7 +6580,8 @@ getForeignDataWrappers(int *numForeignDataWrappers)
 						  "array_to_string(ARRAY("
 						  "SELECT quote_ident(option_name) || ' ' || "
 						  "quote_literal(option_value) "
-						  "FROM pg_options_to_table(fdwoptions)"
+						  "FROM pg_options_to_table(fdwoptions) "
+						  "ORDER BY option_name"
 						  "), E',\n    ') AS fdwoptions "
 						  "FROM pg_foreign_data_wrapper",
 						  username_subquery);
@@ -6667,7 +6670,8 @@ getForeignServers(int *numForeignServers)
 					  "array_to_string(ARRAY("
 					  "SELECT quote_ident(option_name) || ' ' || "
 					  "quote_literal(option_value) "
-					  "FROM pg_options_to_table(srvoptions)"
+					  "FROM pg_options_to_table(srvoptions) "
+					  "ORDER BY option_name"
 					  "), E',\n    ') AS srvoptions "
 					  "FROM pg_foreign_server",
 					  username_subquery);
@@ -11777,7 +11781,8 @@ dumpUserMappings(Archive *fout,
 					  "array_to_string(ARRAY("
 					  "SELECT quote_ident(option_name) || ' ' || "
 					  "quote_literal(option_value) "
-					  "FROM pg_options_to_table(umoptions)"
+					  "FROM pg_options_to_table(umoptions) "
+					  "ORDER BY option_name"
 					  "), E',\n    ') AS umoptions "
 					  "FROM pg_user_mappings "
 					  "WHERE srvid = '%u' "
@@ -12438,7 +12443,8 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 							  "pg_catalog.array_to_string(ARRAY("
 							  "SELECT pg_catalog.quote_ident(option_name) || "
 							  "' ' || pg_catalog.quote_literal(option_value) "
-							  "FROM pg_catalog.pg_options_to_table(ftoptions)"
+							  "FROM pg_catalog.pg_options_to_table(ftoptions) "
+							  "ORDER BY option_name"
 							  "), E',\n    ') AS ftoptions "
 							  "FROM pg_catalog.pg_foreign_table ft "
 							  "JOIN pg_catalog.pg_foreign_server fs "

commit 89b3c6cc8b560f7f46a6a25b270aed5330c09a0e
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Mon Jan 9 19:56:27 2012 -0500

    Fix one-byte buffer overrun in contrib/test_parser.
    
    The original coding examined the next character before verifying that
    there *is* a next character.  In the worst case with the input buffer
    right up against the end of memory, this would result in a segfault.
    
    Problem spotted by Paul Guyot; this commit extends his patch to fix an
    additional case.  In addition, make the code a tad more readable by not
    overloading the usage of *tlen.

diff --git a/contrib/test_parser/test_parser.c b/contrib/test_parser/test_parser.c
index c27d7d3..da7f04c 100644
--- a/contrib/test_parser/test_parser.c
+++ b/contrib/test_parser/test_parser.c
@@ -73,31 +73,32 @@ testprs_getlexeme(PG_FUNCTION_ARGS)
 	ParserState *pst = (ParserState *) PG_GETARG_POINTER(0);
 	char	  **t = (char **) PG_GETARG_POINTER(1);
 	int		   *tlen = (int *) PG_GETARG_POINTER(2);
+	int			startpos = pst->pos;
 	int			type;
 
-	*tlen = pst->pos;
 	*t = pst->buffer + pst->pos;
 
-	if ((pst->buffer)[pst->pos] == ' ')
+	if (pst->pos < pst->len &&
+		(pst->buffer)[pst->pos] == ' ')
 	{
 		/* blank type */
 		type = 12;
-		/* go to the next non-white-space character */
-		while ((pst->buffer)[pst->pos] == ' ' &&
-			   pst->pos < pst->len)
+		/* go to the next non-space character */
+		while (pst->pos < pst->len &&
+			   (pst->buffer)[pst->pos] == ' ')
 			(pst->pos)++;
 	}
 	else
 	{
 		/* word type */
 		type = 3;
-		/* go to the next white-space character */
-		while ((pst->buffer)[pst->pos] != ' ' &&
-			   pst->pos < pst->len)
+		/* go to the next space character */
+		while (pst->pos < pst->len &&
+			   (pst->buffer)[pst->pos] != ' ')
 			(pst->pos)++;
 	}
 
-	*tlen = pst->pos - *tlen;
+	*tlen = pst->pos - startpos;
 
 	/* we are finished if (*tlen == 0) */
 	if (*tlen == 0)

commit 743ed082accbc542294a4408e2e45a6ffb8ec966
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Mon Jan 9 21:10:50 2012 +0200

    Add compatibility note about grant options on GRANT reference page
    
    Point out in the compatibility section that granting grant options to
    PUBLIC is not supported by PostgreSQL.  This is already mentioned
    earlier, but since it concerns the information schema, it might be
    worth pointing out explicitly as a compatibility issue.

diff --git a/doc/src/sgml/ref/grant.sgml b/doc/src/sgml/ref/grant.sgml
index 51dd2e0..c5edaed 100644
--- a/doc/src/sgml/ref/grant.sgml
+++ b/doc/src/sgml/ref/grant.sgml
@@ -631,6 +631,12 @@ GRANT admins TO joe;
    </para>
 
    <para>
+    According to the SQL standard, grant options can be granted to
+    <literal>PUBLIC</literal>; PostgreSQL only supports granting grant options
+    to roles.
+   </para>
+
+   <para>
     The SQL standard provides for a <literal>USAGE</literal> privilege
     on other kinds of objects: character sets, collations,
     translations.

commit dc3f33f6be0be3f7ce25511d4485506ec43e20aa
Author: Robert Haas <rhaas@postgresql.org>
Date:   Mon Jan 9 13:31:58 2012 -0500

    Fix pathname in pgindent README.
    
    Kevin Grittner

diff --git a/src/tools/pgindent/README b/src/tools/pgindent/README
index d88c201..a47b809 100644
--- a/src/tools/pgindent/README
+++ b/src/tools/pgindent/README
@@ -22,7 +22,7 @@ This can format all PostgreSQL *.c and *.h files, but excludes *.y, and
 
 	find . -name '*.[ch]' -type f -print | \
 	egrep -v -f src/tools/pgindent/exclude_file_patterns | \
-	xargs -n100 pgindent src/tools/pgindent/typedefs.list
+	xargs -n100 src/tools/pgindent/pgindent src/tools/pgindent/typedefs.list
 
 6) Remove any files that generate errors and restore their original
    versions.

commit 822128947eb707830ad1b775c216517c3a54befc
Author: Magnus Hagander <magnus@hagander.net>
Date:   Mon Jan 9 18:18:25 2012 +0100

    Add .gitignore file for entab
    
    Kevin Grittner

diff --git a/src/tools/entab/.gitignore b/src/tools/entab/.gitignore
new file mode 100644
index 0000000..94db843
--- /dev/null
+++ b/src/tools/entab/.gitignore
@@ -0,0 +1 @@
+/entab

commit 4ecd359c420203082489e2b5dddd098e34a1b70c
Author: Magnus Hagander <magnus@hagander.net>
Date:   Mon Jan 9 18:16:51 2012 +0100

    Fix comment language
    
    Per comment from Heikki

diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index e3a0e92..c390cbf 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -379,8 +379,8 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline, char *sysi
 		{
 			/*
 			 * keepalive message, sent in 9.2 and newer. We just ignore
-			 * this message completely, but need to forward past it
-			 * in our reading.
+			 * this message completely, but need to skip past it in the
+			 * stream.
 			 */
 			if (r != STREAMING_KEEPALIVE_SIZE)
 			{

commit 6b020d228b976821181ac673964f302b2c32f12d
Author: Magnus Hagander <magnus@hagander.net>
Date:   Mon Jan 9 11:53:38 2012 +0100

    Fix pg_basebackup for keepalive messages
    
    Teach pg_basebackup in streaming mode to deal with keepalive messages.
    Also change the order of checks to complain at the message rather than
    block size when a new message is introduced.
    
    In passing, switch to using sizeof() instead of hardcoded sizes for
    WAL protocol structs.

diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index c18db4f..e3a0e92 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -33,8 +33,9 @@
 #include <unistd.h>
 
 
-/* Size of the streaming replication protocol header */
-#define STREAMING_HEADER_SIZE (1+8+8+8)
+/* Size of the streaming replication protocol headers */
+#define STREAMING_HEADER_SIZE (1+sizeof(WalDataMessageHeader))
+#define STREAMING_KEEPALIVE_SIZE (1+sizeof(PrimaryKeepaliveMessage))
 
 const XLogRecPtr InvalidXLogRecPtr = {0, 0};
 
@@ -374,18 +375,33 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline, char *sysi
 					progname, PQerrorMessage(conn));
 			return false;
 		}
-		if (r < STREAMING_HEADER_SIZE + 1)
+		if (copybuf[0] == 'k')
 		{
-			fprintf(stderr, _("%s: streaming header too small: %i\n"),
-					progname, r);
-			return false;
+			/*
+			 * keepalive message, sent in 9.2 and newer. We just ignore
+			 * this message completely, but need to forward past it
+			 * in our reading.
+			 */
+			if (r != STREAMING_KEEPALIVE_SIZE)
+			{
+				fprintf(stderr, _("%s: keepalive message is incorrect size: %i\n"),
+						progname, r);
+				return false;
+			}
+			continue;
 		}
-		if (copybuf[0] != 'w')
+		else if (copybuf[0] != 'w')
 		{
 			fprintf(stderr, _("%s: unrecognized streaming header: \"%c\"\n"),
 					progname, copybuf[0]);
 			return false;
 		}
+		if (r < STREAMING_HEADER_SIZE + 1)
+		{
+			fprintf(stderr, _("%s: streaming header too small: %i\n"),
+					progname, r);
+			return false;
+		}
 
 		/* Extract WAL location for this block */
 		memcpy(&blockpos, copybuf + 1, 8);

commit db49517c62750322fb2a37ff6324ecc00965e641
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Sat Jan 7 14:58:13 2012 +0200

    Rename the internal structures of the CREATE TABLE (LIKE ...) facility
    
    The original implementation of this interpreted it as a kind of
    "inheritance" facility and named all the internal structures
    accordingly.  This turned out to be very confusing, because it has
    nothing to do with the INHERITS feature.  So rename all the internal
    parser infrastructure, update the comments, adjust the error messages,
    and split up the regression tests.

diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index 30e4154..97968bb 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -24,7 +24,7 @@ PostgreSQL documentation
 CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] <replaceable class="PARAMETER">table_name</replaceable> ( [
   { <replaceable class="PARAMETER">column_name</replaceable> <replaceable class="PARAMETER">data_type</replaceable> [ COLLATE <replaceable>collation</replaceable> ] [ <replaceable class="PARAMETER">column_constraint</replaceable> [ ... ] ]
     | <replaceable>table_constraint</replaceable>
-    | LIKE <replaceable>parent_table</replaceable> [ <replaceable>like_option</replaceable> ... ] }
+    | LIKE <replaceable>source_table</replaceable> [ <replaceable>like_option</replaceable> ... ] }
     [, ... ]
 ] )
 [ INHERITS ( <replaceable>parent_table</replaceable> [, ... ] ) ]
@@ -312,7 +312,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
    </varlistentry>
 
    <varlistentry>
-    <term><literal>LIKE <replaceable>parent_table</replaceable> [ <replaceable>like_option</replaceable> ... ]</literal></term>
+    <term><literal>LIKE <replaceable>source_table</replaceable> [ <replaceable>like_option</replaceable> ... ]</literal></term>
     <listitem>
      <para>
       The <literal>LIKE</literal> clause specifies a table from which
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 756e3a6..71da0d8 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -2727,10 +2727,10 @@ _copyCreateStmt(const CreateStmt *from)
 	return newnode;
 }
 
-static InhRelation *
-_copyInhRelation(const InhRelation *from)
+static TableLikeClause *
+_copyTableLikeClause(const TableLikeClause *from)
 {
-	InhRelation *newnode = makeNode(InhRelation);
+	TableLikeClause *newnode = makeNode(TableLikeClause);
 
 	COPY_NODE_FIELD(relation);
 	COPY_SCALAR_FIELD(options);
@@ -4134,8 +4134,8 @@ copyObject(const void *from)
 		case T_CreateStmt:
 			retval = _copyCreateStmt(from);
 			break;
-		case T_InhRelation:
-			retval = _copyInhRelation(from);
+		case T_TableLikeClause:
+			retval = _copyTableLikeClause(from);
 			break;
 		case T_DefineStmt:
 			retval = _copyDefineStmt(from);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 9eff42f..ba949db 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -1160,7 +1160,7 @@ _equalCreateStmt(const CreateStmt *a, const CreateStmt *b)
 }
 
 static bool
-_equalInhRelation(const InhRelation *a, const InhRelation *b)
+_equalTableLikeClause(const TableLikeClause *a, const TableLikeClause *b)
 {
 	COMPARE_NODE_FIELD(relation);
 	COMPARE_SCALAR_FIELD(options);
@@ -2677,8 +2677,8 @@ equal(const void *a, const void *b)
 		case T_CreateStmt:
 			retval = _equalCreateStmt(a, b);
 			break;
-		case T_InhRelation:
-			retval = _equalInhRelation(a, b);
+		case T_TableLikeClause:
+			retval = _equalTableLikeClause(a, b);
 			break;
 		case T_DefineStmt:
 			retval = _equalDefineStmt(a, b);
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index cb94614..8bc1947 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -2066,9 +2066,9 @@ _outDefElem(StringInfo str, const DefElem *node)
 }
 
 static void
-_outInhRelation(StringInfo str, const InhRelation *node)
+_outTableLikeClause(StringInfo str, const TableLikeClause *node)
 {
-	WRITE_NODE_TYPE("INHRELATION");
+	WRITE_NODE_TYPE("TABLELIKECLAUSE");
 
 	WRITE_NODE_FIELD(relation);
 	WRITE_UINT_FIELD(options);
@@ -3142,8 +3142,8 @@ _outNode(StringInfo str, const void *obj)
 			case T_DefElem:
 				_outDefElem(str, obj);
 				break;
-			case T_InhRelation:
-				_outInhRelation(str, obj);
+			case T_TableLikeClause:
+				_outTableLikeClause(str, obj);
 				break;
 			case T_LockingClause:
 				_outLockingClause(str, obj);
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 87d7305..0ec039b 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -2718,18 +2718,10 @@ ConstraintAttr:
 		;
 
 
-/*
- * SQL99 supports wholesale borrowing of a table definition via the LIKE clause.
- * This seems to be a poor man's inheritance capability, with the resulting
- * tables completely decoupled except for the original commonality in definitions.
- *
- * This is very similar to CREATE TABLE AS except for the INCLUDING DEFAULTS extension
- * which is a part of SQL:2003.
- */
 TableLikeClause:
 			LIKE qualified_name TableLikeOptionList
 				{
-					InhRelation *n = makeNode(InhRelation);
+					TableLikeClause *n = makeNode(TableLikeClause);
 					n->relation = $2;
 					n->options = $3;
 					$$ = (Node *)n;
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 19ccf99..e14ae09 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -102,8 +102,8 @@ static void transformColumnDefinition(CreateStmtContext *cxt,
 						  ColumnDef *column);
 static void transformTableConstraint(CreateStmtContext *cxt,
 						 Constraint *constraint);
-static void transformInhRelation(CreateStmtContext *cxt,
-					 InhRelation *inhrelation);
+static void transformTableLikeClause(CreateStmtContext *cxt,
+					 TableLikeClause *table_like_clause);
 static void transformOfType(CreateStmtContext *cxt,
 				TypeName *ofTypename);
 static char *chooseIndexName(const RangeVar *relation, IndexStmt *index_stmt);
@@ -238,8 +238,8 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
 				transformTableConstraint(&cxt, (Constraint *) element);
 				break;
 
-			case T_InhRelation:
-				transformInhRelation(&cxt, (InhRelation *) element);
+			case T_TableLikeClause:
+				transformTableLikeClause(&cxt, (TableLikeClause *) element);
 				break;
 
 			default:
@@ -625,14 +625,14 @@ transformTableConstraint(CreateStmtContext *cxt, Constraint *constraint)
 }
 
 /*
- * transformInhRelation
+ * transformTableLikeClause
  *
- * Change the LIKE <subtable> portion of a CREATE TABLE statement into
+ * Change the LIKE <srctable> portion of a CREATE TABLE statement into
  * column definitions which recreate the user defined column portions of
- * <subtable>.
+ * <srctable>.
  */
 static void
-transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
+transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_clause)
 {
 	AttrNumber	parent_attno;
 	Relation	relation;
@@ -641,17 +641,17 @@ transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
 	AclResult	aclresult;
 	char	   *comment;
 
-	relation = parserOpenTable(cxt->pstate, inhRelation->relation,
+	relation = parserOpenTable(cxt->pstate, table_like_clause->relation,
 							   AccessShareLock);
 
 	if (relation->rd_rel->relkind != RELKIND_RELATION)
 		ereport(ERROR,
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				 errmsg("inherited relation \"%s\" is not a table",
-						inhRelation->relation->relname)));
+				 errmsg("LIKE source relation \"%s\" is not a table",
+						table_like_clause->relation->relname)));
 
 	/*
-	 * Check for SELECT privilages
+	 * Check for SELECT privileges
 	 */
 	aclresult = pg_class_aclcheck(RelationGetRelid(relation), GetUserId(),
 								  ACL_SELECT);
@@ -708,7 +708,7 @@ transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
 		 * Copy default, if present and the default has been requested
 		 */
 		if (attribute->atthasdef &&
-			(inhRelation->options & CREATE_TABLE_LIKE_DEFAULTS))
+			(table_like_clause->options & CREATE_TABLE_LIKE_DEFAULTS))
 		{
 			Node	   *this_default = NULL;
 			AttrDefault *attrdef;
@@ -736,13 +736,13 @@ transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
 		}
 
 		/* Likewise, copy storage if requested */
-		if (inhRelation->options & CREATE_TABLE_LIKE_STORAGE)
+		if (table_like_clause->options & CREATE_TABLE_LIKE_STORAGE)
 			def->storage = attribute->attstorage;
 		else
 			def->storage = 0;
 
 		/* Likewise, copy comment if requested */
-		if ((inhRelation->options & CREATE_TABLE_LIKE_COMMENTS) &&
+		if ((table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS) &&
 			(comment = GetComment(attribute->attrelid,
 								  RelationRelationId,
 								  attribute->attnum)) != NULL)
@@ -764,7 +764,7 @@ transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
 	 * Copy CHECK constraints if requested, being careful to adjust attribute
 	 * numbers
 	 */
-	if ((inhRelation->options & CREATE_TABLE_LIKE_CONSTRAINTS) &&
+	if ((table_like_clause->options & CREATE_TABLE_LIKE_CONSTRAINTS) &&
 		tupleDesc->constr)
 	{
 		AttrNumber *attmap = varattnos_map_schema(tupleDesc, cxt->columns);
@@ -787,7 +787,7 @@ transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
 			cxt->ckconstraints = lappend(cxt->ckconstraints, n);
 
 			/* Copy comment on constraint */
-			if ((inhRelation->options & CREATE_TABLE_LIKE_COMMENTS) &&
+			if ((table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS) &&
 				(comment = GetComment(get_constraint_oid(RelationGetRelid(relation),
 														 n->conname, false),
 									  ConstraintRelationId,
@@ -810,7 +810,7 @@ transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
 	/*
 	 * Likewise, copy indexes if requested
 	 */
-	if ((inhRelation->options & CREATE_TABLE_LIKE_INDEXES) &&
+	if ((table_like_clause->options & CREATE_TABLE_LIKE_INDEXES) &&
 		relation->rd_rel->relhasindex)
 	{
 		AttrNumber *attmap = varattnos_map_schema(tupleDesc, cxt->columns);
@@ -831,7 +831,7 @@ transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
 			index_stmt = generateClonedIndexStmt(cxt, parent_index, attmap);
 
 			/* Copy comment on index */
-			if (inhRelation->options & CREATE_TABLE_LIKE_COMMENTS)
+			if (table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS)
 			{
 				comment = GetComment(parent_index_oid, RelationRelationId, 0);
 
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index bf39dcc..b116808 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -389,7 +389,7 @@ typedef enum NodeTag
 	T_FuncWithArgs,
 	T_AccessPriv,
 	T_CreateOpClassItem,
-	T_InhRelation,
+	T_TableLikeClause,
 	T_FunctionParameter,
 	T_LockingClause,
 	T_RowMarkClause,
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 0be3fb1..dce0e72 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -504,16 +504,16 @@ typedef struct ColumnDef
 } ColumnDef;
 
 /*
- * inhRelation - Relation a CREATE TABLE is to inherit attributes of
+ * TableLikeClause - CREATE TABLE ( ... LIKE ... ) clause
  */
-typedef struct InhRelation
+typedef struct TableLikeClause
 {
 	NodeTag		type;
 	RangeVar   *relation;
-	bits32		options;		/* OR of CreateStmtLikeOption flags */
-} InhRelation;
+	bits32		options;		/* OR of TableLikeOption flags */
+} TableLikeClause;
 
-typedef enum CreateStmtLikeOption
+typedef enum TableLikeOption
 {
 	CREATE_TABLE_LIKE_DEFAULTS = 1 << 0,
 	CREATE_TABLE_LIKE_CONSTRAINTS = 1 << 1,
@@ -521,7 +521,7 @@ typedef enum CreateStmtLikeOption
 	CREATE_TABLE_LIKE_STORAGE = 1 << 3,
 	CREATE_TABLE_LIKE_COMMENTS = 1 << 4,
 	CREATE_TABLE_LIKE_ALL = 0x7FFFFFFF
-} CreateStmtLikeOption;
+} TableLikeOption;
 
 /*
  * IndexElem - index parameters (used in CREATE INDEX)
diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out
new file mode 100644
index 0000000..9ff632e
--- /dev/null
+++ b/src/test/regress/expected/create_table_like.out
@@ -0,0 +1,222 @@
+/* Test inheritance of structure (LIKE) */
+CREATE TABLE inhx (xx text DEFAULT 'text');
+/*
+ * Test double inheritance
+ *
+ * Ensure that defaults are NOT included unless
+ * INCLUDING DEFAULTS is specified
+ */
+CREATE TABLE ctla (aa TEXT);
+CREATE TABLE ctlb (bb TEXT) INHERITS (ctla);
+CREATE TABLE inhe (ee text, LIKE inhx) inherits (ctlb);
+INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4');
+SELECT * FROM inhe; /* Columns aa, bb, xx value NULL, ee */
+   aa    |   bb    | ee |   xx    
+---------+---------+----+---------
+ ee-col1 | ee-col2 |    | ee-col4
+(1 row)
+
+SELECT * FROM inhx; /* Empty set since LIKE inherits structure only */
+ xx 
+----
+(0 rows)
+
+SELECT * FROM ctlb; /* Has ee entry */
+   aa    |   bb    
+---------+---------
+ ee-col1 | ee-col2
+(1 row)
+
+SELECT * FROM ctla; /* Has ee entry */
+   aa    
+---------
+ ee-col1
+(1 row)
+
+CREATE TABLE inhf (LIKE inhx, LIKE inhx); /* Throw error */
+ERROR:  column "xx" specified more than once
+CREATE TABLE inhf (LIKE inhx INCLUDING DEFAULTS INCLUDING CONSTRAINTS);
+INSERT INTO inhf DEFAULT VALUES;
+SELECT * FROM inhf; /* Single entry with value 'text' */
+  xx  
+------
+ text
+(1 row)
+
+ALTER TABLE inhx add constraint foo CHECK (xx = 'text');
+ALTER TABLE inhx ADD PRIMARY KEY (xx);
+NOTICE:  ALTER TABLE / ADD PRIMARY KEY will create implicit index "inhx_pkey" for table "inhx"
+CREATE TABLE inhg (LIKE inhx); /* Doesn't copy constraint */
+INSERT INTO inhg VALUES ('foo');
+DROP TABLE inhg;
+CREATE TABLE inhg (x text, LIKE inhx INCLUDING CONSTRAINTS, y text); /* Copies constraints */
+INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds */
+INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds -- Unique constraints not copied */
+INSERT INTO inhg VALUES ('x', 'foo',  'y');  /* fails due to constraint */
+ERROR:  new row for relation "inhg" violates check constraint "foo"
+DETAIL:  Failing row contains (x, foo, y).
+SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y */
+ x |  xx  | y 
+---+------+---
+ x | text | y
+ x | text | y
+(2 rows)
+
+DROP TABLE inhg;
+CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, y text); /* copies indexes */
+NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "inhg_pkey" for table "inhg"
+INSERT INTO inhg VALUES (5, 10);
+INSERT INTO inhg VALUES (20, 10); -- should fail
+ERROR:  duplicate key value violates unique constraint "inhg_pkey"
+DETAIL:  Key (xx)=(10) already exists.
+DROP TABLE inhg;
+/* Multiple primary keys creation should fail */
+CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, PRIMARY KEY(x)); /* fails */
+ERROR:  multiple primary keys for table "inhg" are not allowed
+CREATE TABLE inhz (xx text DEFAULT 'text', yy int UNIQUE);
+NOTICE:  CREATE TABLE / UNIQUE will create implicit index "inhz_yy_key" for table "inhz"
+CREATE UNIQUE INDEX inhz_xx_idx on inhz (xx) WHERE xx <> 'test';
+/* Ok to create multiple unique indexes */
+CREATE TABLE inhg (x text UNIQUE, LIKE inhz INCLUDING INDEXES);
+NOTICE:  CREATE TABLE / UNIQUE will create implicit index "inhg_x_key" for table "inhg"
+NOTICE:  CREATE TABLE / UNIQUE will create implicit index "inhg_yy_key" for table "inhg"
+INSERT INTO inhg (xx, yy, x) VALUES ('test', 5, 10);
+INSERT INTO inhg (xx, yy, x) VALUES ('test', 10, 15);
+INSERT INTO inhg (xx, yy, x) VALUES ('foo', 10, 15); -- should fail
+ERROR:  duplicate key value violates unique constraint "inhg_x_key"
+DETAIL:  Key (x)=(15) already exists.
+DROP TABLE inhg;
+DROP TABLE inhz;
+-- including storage and comments
+CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text);
+NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "ctlt1_pkey" for table "ctlt1"
+CREATE INDEX ctlt1_b_key ON ctlt1 (b);
+CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b));
+COMMENT ON COLUMN ctlt1.a IS 'A';
+COMMENT ON COLUMN ctlt1.b IS 'B';
+COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check';
+COMMENT ON INDEX ctlt1_pkey IS 'index pkey';
+COMMENT ON INDEX ctlt1_b_key IS 'index b_key';
+ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN;
+CREATE TABLE ctlt2 (c text);
+ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL;
+COMMENT ON COLUMN ctlt2.c IS 'C';
+CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text);
+ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL;
+ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN;
+COMMENT ON COLUMN ctlt3.a IS 'A3';
+COMMENT ON COLUMN ctlt3.c IS 'C';
+COMMENT ON CONSTRAINT ctlt3_a_check ON ctlt3 IS 't3_a_check';
+CREATE TABLE ctlt4 (a text, c text);
+ALTER TABLE ctlt4 ALTER COLUMN c SET STORAGE EXTERNAL;
+CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING STORAGE);
+\d+ ctlt12_storage
+                   Table "public.ctlt12_storage"
+ Column | Type | Modifiers | Storage  | Stats target | Description 
+--------+------+-----------+----------+--------------+-------------
+ a      | text | not null  | main     |              | 
+ b      | text |           | extended |              | 
+ c      | text |           | external |              | 
+Has OIDs: no
+
+CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS);
+\d+ ctlt12_comments
+                  Table "public.ctlt12_comments"
+ Column | Type | Modifiers | Storage  | Stats target | Description 
+--------+------+-----------+----------+--------------+-------------
+ a      | text | not null  | extended |              | A
+ b      | text |           | extended |              | B
+ c      | text |           | extended |              | C
+Has OIDs: no
+
+CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1);
+NOTICE:  merging column "a" with inherited definition
+NOTICE:  merging column "b" with inherited definition
+NOTICE:  merging constraint "ctlt1_a_check" with inherited definition
+\d+ ctlt1_inh
+                     Table "public.ctlt1_inh"
+ Column | Type | Modifiers | Storage  | Stats target | Description 
+--------+------+-----------+----------+--------------+-------------
+ a      | text | not null  | main     |              | A
+ b      | text |           | extended |              | B
+Check constraints:
+    "ctlt1_a_check" CHECK (length(a) > 2)
+Inherits: ctlt1
+Has OIDs: no
+
+SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass;
+ description 
+-------------
+ t1_a_check
+(1 row)
+
+CREATE TABLE ctlt13_inh () INHERITS (ctlt1, ctlt3);
+NOTICE:  merging multiple inherited definitions of column "a"
+\d+ ctlt13_inh
+                     Table "public.ctlt13_inh"
+ Column | Type | Modifiers | Storage  | Stats target | Description 
+--------+------+-----------+----------+--------------+-------------
+ a      | text | not null  | main     |              | 
+ b      | text |           | extended |              | 
+ c      | text |           | external |              | 
+Check constraints:
+    "ctlt1_a_check" CHECK (length(a) > 2)
+    "ctlt3_a_check" CHECK (length(a) < 5)
+Inherits: ctlt1,
+          ctlt3
+Has OIDs: no
+
+CREATE TABLE ctlt13_like (LIKE ctlt3 INCLUDING CONSTRAINTS INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (ctlt1);
+NOTICE:  merging column "a" with inherited definition
+\d+ ctlt13_like
+                    Table "public.ctlt13_like"
+ Column | Type | Modifiers | Storage  | Stats target | Description 
+--------+------+-----------+----------+--------------+-------------
+ a      | text | not null  | main     |              | A3
+ b      | text |           | extended |              | 
+ c      | text |           | external |              | C
+Check constraints:
+    "ctlt1_a_check" CHECK (length(a) > 2)
+    "ctlt3_a_check" CHECK (length(a) < 5)
+Inherits: ctlt1
+Has OIDs: no
+
+SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt13_like'::regclass;
+ description 
+-------------
+ t3_a_check
+(1 row)
+
+CREATE TABLE ctlt_all (LIKE ctlt1 INCLUDING ALL);
+NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "ctlt_all_pkey" for table "ctlt_all"
+\d+ ctlt_all
+                      Table "public.ctlt_all"
+ Column | Type | Modifiers | Storage  | Stats target | Description 
+--------+------+-----------+----------+--------------+-------------
+ a      | text | not null  | main     |              | A
+ b      | text |           | extended |              | B
+Indexes:
+    "ctlt_all_pkey" PRIMARY KEY, btree (a)
+    "ctlt_all_b_idx" btree (b)
+    "ctlt_all_expr_idx" btree ((a || b))
+Check constraints:
+    "ctlt1_a_check" CHECK (length(a) > 2)
+Has OIDs: no
+
+SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid;
+    relname     | objsubid | description 
+----------------+----------+-------------
+ ctlt_all_b_idx |        0 | index b_key
+ ctlt_all_pkey  |        0 | index pkey
+(2 rows)
+
+CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4);
+NOTICE:  merging multiple inherited definitions of column "a"
+ERROR:  inherited column "a" has a storage parameter conflict
+DETAIL:  MAIN versus EXTENDED
+CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1);
+NOTICE:  merging column "a" with inherited definition
+ERROR:  column "a" has a storage parameter conflict
+DETAIL:  MAIN versus EXTENDED
+DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE;
+NOTICE:  drop cascades to table inhe
diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out
index 309c1db..7e8f572 100644
--- a/src/test/regress/expected/inherit.out
+++ b/src/test/regress/expected/inherit.out
@@ -587,93 +587,6 @@ CREATE TABLE otherchild (tomorrow date default now())
 NOTICE:  merging multiple inherited definitions of column "tomorrow"
 NOTICE:  merging column "tomorrow" with inherited definition
 DROP TABLE firstparent, secondparent, jointchild, thirdparent, otherchild;
-/* Test inheritance of structure (LIKE) */
-CREATE TABLE inhx (xx text DEFAULT 'text');
-/*
- * Test double inheritance
- *
- * Ensure that defaults are NOT included unless
- * INCLUDING DEFAULTS is specified
- */
-CREATE TABLE inhe (ee text, LIKE inhx) inherits (b);
-INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4');
-SELECT * FROM inhe; /* Columns aa, bb, xx value NULL, ee */
-   aa    |   bb    | ee |   xx    
----------+---------+----+---------
- ee-col1 | ee-col2 |    | ee-col4
-(1 row)
-
-SELECT * FROM inhx; /* Empty set since LIKE inherits structure only */
- xx 
-----
-(0 rows)
-
-SELECT * FROM b; /* Has ee entry */
-   aa    |   bb    
----------+---------
- ee-col1 | ee-col2
-(1 row)
-
-SELECT * FROM a; /* Has ee entry */
-   aa    
----------
- ee-col1
-(1 row)
-
-CREATE TABLE inhf (LIKE inhx, LIKE inhx); /* Throw error */
-ERROR:  column "xx" specified more than once
-CREATE TABLE inhf (LIKE inhx INCLUDING DEFAULTS INCLUDING CONSTRAINTS);
-INSERT INTO inhf DEFAULT VALUES;
-SELECT * FROM inhf; /* Single entry with value 'text' */
-  xx  
-------
- text
-(1 row)
-
-ALTER TABLE inhx add constraint foo CHECK (xx = 'text');
-ALTER TABLE inhx ADD PRIMARY KEY (xx);
-NOTICE:  ALTER TABLE / ADD PRIMARY KEY will create implicit index "inhx_pkey" for table "inhx"
-CREATE TABLE inhg (LIKE inhx); /* Doesn't copy constraint */
-INSERT INTO inhg VALUES ('foo');
-DROP TABLE inhg;
-CREATE TABLE inhg (x text, LIKE inhx INCLUDING CONSTRAINTS, y text); /* Copies constraints */
-INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds */
-INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds -- Unique constraints not copied */
-INSERT INTO inhg VALUES ('x', 'foo',  'y');  /* fails due to constraint */
-ERROR:  new row for relation "inhg" violates check constraint "foo"
-DETAIL:  Failing row contains (x, foo, y).
-SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y */
- x |  xx  | y 
----+------+---
- x | text | y
- x | text | y
-(2 rows)
-
-DROP TABLE inhg;
-CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, y text); /* copies indexes */
-NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "inhg_pkey" for table "inhg"
-INSERT INTO inhg VALUES (5, 10);
-INSERT INTO inhg VALUES (20, 10); -- should fail
-ERROR:  duplicate key value violates unique constraint "inhg_pkey"
-DETAIL:  Key (xx)=(10) already exists.
-DROP TABLE inhg;
-/* Multiple primary keys creation should fail */
-CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, PRIMARY KEY(x)); /* fails */
-ERROR:  multiple primary keys for table "inhg" are not allowed
-CREATE TABLE inhz (xx text DEFAULT 'text', yy int UNIQUE);
-NOTICE:  CREATE TABLE / UNIQUE will create implicit index "inhz_yy_key" for table "inhz"
-CREATE UNIQUE INDEX inhz_xx_idx on inhz (xx) WHERE xx <> 'test';
-/* Ok to create multiple unique indexes */
-CREATE TABLE inhg (x text UNIQUE, LIKE inhz INCLUDING INDEXES);
-NOTICE:  CREATE TABLE / UNIQUE will create implicit index "inhg_x_key" for table "inhg"
-NOTICE:  CREATE TABLE / UNIQUE will create implicit index "inhg_yy_key" for table "inhg"
-INSERT INTO inhg (xx, yy, x) VALUES ('test', 5, 10);
-INSERT INTO inhg (xx, yy, x) VALUES ('test', 10, 15);
-INSERT INTO inhg (xx, yy, x) VALUES ('foo', 10, 15); -- should fail
-ERROR:  duplicate key value violates unique constraint "inhg_x_key"
-DETAIL:  Key (x)=(15) already exists.
-DROP TABLE inhg;
-DROP TABLE inhz;
 -- Test changing the type of inherited columns
 insert into d values('test','one','two','three');
 alter table a alter column aa type integer using bit_length(aa);
@@ -963,171 +876,39 @@ drop table pp1 cascade;
 NOTICE:  drop cascades to 2 other objects
 DETAIL:  drop cascades to table cc1
 drop cascades to table cc2
--- including storage and comments
-CREATE TABLE t1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text);
-NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
-CREATE INDEX t1_b_key ON t1 (b);
-CREATE INDEX t1_fnidx ON t1 ((a || b));
-COMMENT ON COLUMN t1.a IS 'A';
-COMMENT ON COLUMN t1.b IS 'B';
-COMMENT ON CONSTRAINT t1_a_check ON t1 IS 't1_a_check';
-COMMENT ON INDEX t1_pkey IS 'index pkey';
-COMMENT ON INDEX t1_b_key IS 'index b_key';
-ALTER TABLE t1 ALTER COLUMN a SET STORAGE MAIN;
-CREATE TABLE t2 (c text);
-ALTER TABLE t2 ALTER COLUMN c SET STORAGE EXTERNAL;
-COMMENT ON COLUMN t2.c IS 'C';
-CREATE TABLE t3 (a text CHECK (length(a) < 5), c text);
-ALTER TABLE t3 ALTER COLUMN c SET STORAGE EXTERNAL;
-ALTER TABLE t3 ALTER COLUMN a SET STORAGE MAIN;
-COMMENT ON COLUMN t3.a IS 'A3';
-COMMENT ON COLUMN t3.c IS 'C';
-COMMENT ON CONSTRAINT t3_a_check ON t3 IS 't3_a_check';
-CREATE TABLE t4 (a text, c text);
-ALTER TABLE t4 ALTER COLUMN c SET STORAGE EXTERNAL;
-CREATE TABLE t12_storage (LIKE t1 INCLUDING STORAGE, LIKE t2 INCLUDING STORAGE);
-\d+ t12_storage
-                    Table "public.t12_storage"
- Column | Type | Modifiers | Storage  | Stats target | Description 
---------+------+-----------+----------+--------------+-------------
- a      | text | not null  | main     |              | 
- b      | text |           | extended |              | 
- c      | text |           | external |              | 
-Has OIDs: no
-
-CREATE TABLE t12_comments (LIKE t1 INCLUDING COMMENTS, LIKE t2 INCLUDING COMMENTS);
-\d+ t12_comments
-                    Table "public.t12_comments"
- Column | Type | Modifiers | Storage  | Stats target | Description 
---------+------+-----------+----------+--------------+-------------
- a      | text | not null  | extended |              | A
- b      | text |           | extended |              | B
- c      | text |           | extended |              | C
-Has OIDs: no
-
-CREATE TABLE t1_inh (LIKE t1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (t1);
-NOTICE:  merging column "a" with inherited definition
-NOTICE:  merging column "b" with inherited definition
-NOTICE:  merging constraint "t1_a_check" with inherited definition
-\d+ t1_inh
-                       Table "public.t1_inh"
- Column | Type | Modifiers | Storage  | Stats target | Description 
---------+------+-----------+----------+--------------+-------------
- a      | text | not null  | main     |              | A
- b      | text |           | extended |              | B
-Check constraints:
-    "t1_a_check" CHECK (length(a) > 2)
-Inherits: t1
-Has OIDs: no
-
-SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 't1_inh'::regclass;
- description 
--------------
- t1_a_check
-(1 row)
-
-CREATE TABLE t13_inh () INHERITS (t1, t3);
-NOTICE:  merging multiple inherited definitions of column "a"
-\d+ t13_inh
-                      Table "public.t13_inh"
- Column | Type | Modifiers | Storage  | Stats target | Description 
---------+------+-----------+----------+--------------+-------------
- a      | text | not null  | main     |              | 
- b      | text |           | extended |              | 
- c      | text |           | external |              | 
-Check constraints:
-    "t1_a_check" CHECK (length(a) > 2)
-    "t3_a_check" CHECK (length(a) < 5)
-Inherits: t1,
-          t3
-Has OIDs: no
-
-CREATE TABLE t13_like (LIKE t3 INCLUDING CONSTRAINTS INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (t1);
-NOTICE:  merging column "a" with inherited definition
-\d+ t13_like
-                      Table "public.t13_like"
- Column | Type | Modifiers | Storage  | Stats target | Description 
---------+------+-----------+----------+--------------+-------------
- a      | text | not null  | main     |              | A3
- b      | text |           | extended |              | 
- c      | text |           | external |              | C
-Check constraints:
-    "t1_a_check" CHECK (length(a) > 2)
-    "t3_a_check" CHECK (length(a) < 5)
-Inherits: t1
-Has OIDs: no
-
-SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 't13_like'::regclass;
- description 
--------------
- t3_a_check
-(1 row)
-
-CREATE TABLE t_all (LIKE t1 INCLUDING ALL);
-NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "t_all_pkey" for table "t_all"
-\d+ t_all
-                       Table "public.t_all"
- Column | Type | Modifiers | Storage  | Stats target | Description 
---------+------+-----------+----------+--------------+-------------
- a      | text | not null  | main     |              | A
- b      | text |           | extended |              | B
-Indexes:
-    "t_all_pkey" PRIMARY KEY, btree (a)
-    "t_all_b_idx" btree (b)
-    "t_all_expr_idx" btree ((a || b))
-Check constraints:
-    "t1_a_check" CHECK (length(a) > 2)
-Has OIDs: no
-
-SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 't_all'::regclass ORDER BY c.relname, objsubid;
-   relname   | objsubid | description 
--------------+----------+-------------
- t_all_b_idx |        0 | index b_key
- t_all_pkey  |        0 | index pkey
-(2 rows)
-
-CREATE TABLE inh_error1 () INHERITS (t1, t4);
-NOTICE:  merging multiple inherited definitions of column "a"
-ERROR:  inherited column "a" has a storage parameter conflict
-DETAIL:  MAIN versus EXTENDED
-CREATE TABLE inh_error2 (LIKE t4 INCLUDING STORAGE) INHERITS (t1);
-NOTICE:  merging column "a" with inherited definition
-ERROR:  column "a" has a storage parameter conflict
-DETAIL:  MAIN versus EXTENDED
-DROP TABLE t1, t2, t3, t4, t12_storage, t12_comments, t1_inh, t13_inh, t13_like, t_all;
 -- Test for renaming in simple multiple inheritance
-CREATE TABLE t1 (a int, b int);
-CREATE TABLE s1 (b int, c int);
-CREATE TABLE ts (d int) INHERITS (t1, s1);
+CREATE TABLE inht1 (a int, b int);
+CREATE TABLE inhs1 (b int, c int);
+CREATE TABLE inhts (d int) INHERITS (inht1, inhs1);
 NOTICE:  merging multiple inherited definitions of column "b"
-ALTER TABLE t1 RENAME a TO aa;
-ALTER TABLE t1 RENAME b TO bb;                -- to be failed
+ALTER TABLE inht1 RENAME a TO aa;
+ALTER TABLE inht1 RENAME b TO bb;                -- to be failed
 ERROR:  cannot rename inherited column "b"
-ALTER TABLE ts RENAME aa TO aaa;      -- to be failed
+ALTER TABLE inhts RENAME aa TO aaa;      -- to be failed
 ERROR:  cannot rename inherited column "aa"
-ALTER TABLE ts RENAME d TO dd;
-\d+ ts
-                          Table "public.ts"
+ALTER TABLE inhts RENAME d TO dd;
+\d+ inhts
+                        Table "public.inhts"
  Column |  Type   | Modifiers | Storage | Stats target | Description 
 --------+---------+-----------+---------+--------------+-------------
  aa     | integer |           | plain   |              | 
  b      | integer |           | plain   |              | 
  c      | integer |           | plain   |              | 
  dd     | integer |           | plain   |              | 
-Inherits: t1,
-          s1
+Inherits: inht1,
+          inhs1
 Has OIDs: no
 
-DROP TABLE ts;
+DROP TABLE inhts;
 -- Test for renaming in diamond inheritance
-CREATE TABLE t2 (x int) INHERITS (t1);
-CREATE TABLE t3 (y int) INHERITS (t1);
-CREATE TABLE t4 (z int) INHERITS (t2, t3);
+CREATE TABLE inht2 (x int) INHERITS (inht1);
+CREATE TABLE inht3 (y int) INHERITS (inht1);
+CREATE TABLE inht4 (z int) INHERITS (inht2, inht3);
 NOTICE:  merging multiple inherited definitions of column "aa"
 NOTICE:  merging multiple inherited definitions of column "b"
-ALTER TABLE t1 RENAME aa TO aaa;
-\d+ t4
-                          Table "public.t4"
+ALTER TABLE inht1 RENAME aa TO aaa;
+\d+ inht4
+                        Table "public.inht4"
  Column |  Type   | Modifiers | Storage | Stats target | Description 
 --------+---------+-----------+---------+--------------+-------------
  aaa    | integer |           | plain   |              | 
@@ -1135,17 +916,17 @@ ALTER TABLE t1 RENAME aa TO aaa;
  x      | integer |           | plain   |              | 
  y      | integer |           | plain   |              | 
  z      | integer |           | plain   |              | 
-Inherits: t2,
-          t3
+Inherits: inht2,
+          inht3
 Has OIDs: no
 
-CREATE TABLE ts (d int) INHERITS (t2, s1);
+CREATE TABLE inhts (d int) INHERITS (inht2, inhs1);
 NOTICE:  merging multiple inherited definitions of column "b"
-ALTER TABLE t1 RENAME aaa TO aaaa;
-ALTER TABLE t1 RENAME b TO bb;                -- to be failed
+ALTER TABLE inht1 RENAME aaa TO aaaa;
+ALTER TABLE inht1 RENAME b TO bb;                -- to be failed
 ERROR:  cannot rename inherited column "b"
-\d+ ts
-                          Table "public.ts"
+\d+ inhts
+                        Table "public.inhts"
  Column |  Type   | Modifiers | Storage | Stats target | Description 
 --------+---------+-----------+---------+--------------+-------------
  aaaa   | integer |           | plain   |              | 
@@ -1153,12 +934,12 @@ ERROR:  cannot rename inherited column "b"
  x      | integer |           | plain   |              | 
  c      | integer |           | plain   |              | 
  d      | integer |           | plain   |              | 
-Inherits: t2,
-          s1
+Inherits: inht2,
+          inhs1
 Has OIDs: no
 
 WITH RECURSIVE r AS (
-  SELECT 't1'::regclass AS inhrelid
+  SELECT 'inht1'::regclass AS inhrelid
 UNION ALL
   SELECT c.inhrelid FROM pg_inherits c, r WHERE r.inhrelid = c.inhparent
 )
@@ -1169,26 +950,26 @@ SELECT a.attrelid::regclass, a.attname, a.attinhcount, e.expected
   ORDER BY a.attrelid::regclass::name, a.attnum;
  attrelid | attname | attinhcount | expected 
 ----------+---------+-------------+----------
- t2       | aaaa    |           1 |        1
- t2       | b       |           1 |        1
- t3       | aaaa    |           1 |        1
- t3       | b       |           1 |        1
- t4       | aaaa    |           2 |        2
- t4       | b       |           2 |        2
- t4       | x       |           1 |        2
- t4       | y       |           1 |        2
- ts       | aaaa    |           1 |        1
- ts       | b       |           2 |        1
- ts       | x       |           1 |        1
- ts       | c       |           1 |        1
+ inht2    | aaaa    |           1 |        1
+ inht2    | b       |           1 |        1
+ inht3    | aaaa    |           1 |        1
+ inht3    | b       |           1 |        1
+ inht4    | aaaa    |           2 |        2
+ inht4    | b       |           2 |        2
+ inht4    | x       |           1 |        2
+ inht4    | y       |           1 |        2
+ inhts    | aaaa    |           1 |        1
+ inhts    | b       |           2 |        1
+ inhts    | x       |           1 |        1
+ inhts    | c       |           1 |        1
 (12 rows)
 
-DROP TABLE t1, s1 CASCADE;
+DROP TABLE inht1, inhs1 CASCADE;
 NOTICE:  drop cascades to 4 other objects
-DETAIL:  drop cascades to table t2
-drop cascades to table ts
-drop cascades to table t3
-drop cascades to table t4
+DETAIL:  drop cascades to table inht2
+drop cascades to table inhts
+drop cascades to table inht3
+drop cascades to table inht4
 --
 -- Test merge-append plans for inheritance trees
 --
diff --git a/src/test/regress/expected/sanity_check.out b/src/test/regress/expected/sanity_check.out
index 05ac11c..7f560d2 100644
--- a/src/test/regress/expected/sanity_check.out
+++ b/src/test/regress/expected/sanity_check.out
@@ -55,7 +55,6 @@ SELECT relname, relhasindex
  hobbies_r               | f
  ihighway                | t
  inet_tbl                | f
- inhe                    | f
  inhf                    | f
  inhx                    | t
  insert_tbl              | f
@@ -165,7 +164,7 @@ SELECT relname, relhasindex
  timetz_tbl              | f
  tinterval_tbl           | f
  varchar_tbl             | f
-(154 rows)
+(153 rows)
 
 --
 -- another sanity check: every system catalog that has OIDs should have
diff --git a/src/test/regress/output/misc.source b/src/test/regress/output/misc.source
index 03aa10d..2f4d482 100644
--- a/src/test/regress/output/misc.source
+++ b/src/test/regress/output/misc.source
@@ -627,7 +627,6 @@ SELECT user_relns() AS user_relns
  iexit
  ihighway
  inet_tbl
- inhe
  inhf
  inhx
  insert_seq
@@ -686,7 +685,7 @@ SELECT user_relns() AS user_relns
  toyemp
  varchar_tbl
  xacttest
-(108 rows)
+(107 rows)
 
 SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer')));
  name 
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index b47b08b..3bedad0 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -59,7 +59,7 @@ test: create_index create_view
 # ----------
 # Another group of parallel tests
 # ----------
-test: create_aggregate create_cast constraints triggers inherit typed_table vacuum drop_if_exists
+test: create_aggregate create_cast constraints triggers inherit create_table_like typed_table vacuum drop_if_exists
 
 # ----------
 # sanity_check does a vacuum, affecting the sort order of SELECT *
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 57806b5..0b64569 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -61,6 +61,7 @@ test: create_cast
 test: constraints
 test: triggers
 test: inherit
+test: create_table_like
 test: typed_table
 test: vacuum
 test: drop_if_exists
diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql
new file mode 100644
index 0000000..58cea44
--- /dev/null
+++ b/src/test/regress/sql/create_table_like.sql
@@ -0,0 +1,99 @@
+/* Test inheritance of structure (LIKE) */
+CREATE TABLE inhx (xx text DEFAULT 'text');
+
+/*
+ * Test double inheritance
+ *
+ * Ensure that defaults are NOT included unless
+ * INCLUDING DEFAULTS is specified
+ */
+CREATE TABLE ctla (aa TEXT);
+CREATE TABLE ctlb (bb TEXT) INHERITS (ctla);
+
+CREATE TABLE inhe (ee text, LIKE inhx) inherits (ctlb);
+INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4');
+SELECT * FROM inhe; /* Columns aa, bb, xx value NULL, ee */
+SELECT * FROM inhx; /* Empty set since LIKE inherits structure only */
+SELECT * FROM ctlb; /* Has ee entry */
+SELECT * FROM ctla; /* Has ee entry */
+
+CREATE TABLE inhf (LIKE inhx, LIKE inhx); /* Throw error */
+
+CREATE TABLE inhf (LIKE inhx INCLUDING DEFAULTS INCLUDING CONSTRAINTS);
+INSERT INTO inhf DEFAULT VALUES;
+SELECT * FROM inhf; /* Single entry with value 'text' */
+
+ALTER TABLE inhx add constraint foo CHECK (xx = 'text');
+ALTER TABLE inhx ADD PRIMARY KEY (xx);
+CREATE TABLE inhg (LIKE inhx); /* Doesn't copy constraint */
+INSERT INTO inhg VALUES ('foo');
+DROP TABLE inhg;
+CREATE TABLE inhg (x text, LIKE inhx INCLUDING CONSTRAINTS, y text); /* Copies constraints */
+INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds */
+INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds -- Unique constraints not copied */
+INSERT INTO inhg VALUES ('x', 'foo',  'y');  /* fails due to constraint */
+SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y */
+DROP TABLE inhg;
+
+CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, y text); /* copies indexes */
+INSERT INTO inhg VALUES (5, 10);
+INSERT INTO inhg VALUES (20, 10); -- should fail
+DROP TABLE inhg;
+/* Multiple primary keys creation should fail */
+CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, PRIMARY KEY(x)); /* fails */
+CREATE TABLE inhz (xx text DEFAULT 'text', yy int UNIQUE);
+CREATE UNIQUE INDEX inhz_xx_idx on inhz (xx) WHERE xx <> 'test';
+/* Ok to create multiple unique indexes */
+CREATE TABLE inhg (x text UNIQUE, LIKE inhz INCLUDING INDEXES);
+INSERT INTO inhg (xx, yy, x) VALUES ('test', 5, 10);
+INSERT INTO inhg (xx, yy, x) VALUES ('test', 10, 15);
+INSERT INTO inhg (xx, yy, x) VALUES ('foo', 10, 15); -- should fail
+DROP TABLE inhg;
+DROP TABLE inhz;
+
+-- including storage and comments
+CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text);
+CREATE INDEX ctlt1_b_key ON ctlt1 (b);
+CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b));
+COMMENT ON COLUMN ctlt1.a IS 'A';
+COMMENT ON COLUMN ctlt1.b IS 'B';
+COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check';
+COMMENT ON INDEX ctlt1_pkey IS 'index pkey';
+COMMENT ON INDEX ctlt1_b_key IS 'index b_key';
+ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN;
+
+CREATE TABLE ctlt2 (c text);
+ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL;
+COMMENT ON COLUMN ctlt2.c IS 'C';
+
+CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text);
+ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL;
+ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN;
+COMMENT ON COLUMN ctlt3.a IS 'A3';
+COMMENT ON COLUMN ctlt3.c IS 'C';
+COMMENT ON CONSTRAINT ctlt3_a_check ON ctlt3 IS 't3_a_check';
+
+CREATE TABLE ctlt4 (a text, c text);
+ALTER TABLE ctlt4 ALTER COLUMN c SET STORAGE EXTERNAL;
+
+CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING STORAGE);
+\d+ ctlt12_storage
+CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS);
+\d+ ctlt12_comments
+CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1);
+\d+ ctlt1_inh
+SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass;
+CREATE TABLE ctlt13_inh () INHERITS (ctlt1, ctlt3);
+\d+ ctlt13_inh
+CREATE TABLE ctlt13_like (LIKE ctlt3 INCLUDING CONSTRAINTS INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (ctlt1);
+\d+ ctlt13_like
+SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt13_like'::regclass;
+
+CREATE TABLE ctlt_all (LIKE ctlt1 INCLUDING ALL);
+\d+ ctlt_all
+SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid;
+
+CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4);
+CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1);
+
+DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE;
diff --git a/src/test/regress/sql/inherit.sql b/src/test/regress/sql/inherit.sql
index 6914404..4dfbc07 100644
--- a/src/test/regress/sql/inherit.sql
+++ b/src/test/regress/sql/inherit.sql
@@ -133,56 +133,6 @@ CREATE TABLE otherchild (tomorrow date default now())
 
 DROP TABLE firstparent, secondparent, jointchild, thirdparent, otherchild;
 
-/* Test inheritance of structure (LIKE) */
-CREATE TABLE inhx (xx text DEFAULT 'text');
-
-/*
- * Test double inheritance
- *
- * Ensure that defaults are NOT included unless
- * INCLUDING DEFAULTS is specified
- */
-CREATE TABLE inhe (ee text, LIKE inhx) inherits (b);
-INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4');
-SELECT * FROM inhe; /* Columns aa, bb, xx value NULL, ee */
-SELECT * FROM inhx; /* Empty set since LIKE inherits structure only */
-SELECT * FROM b; /* Has ee entry */
-SELECT * FROM a; /* Has ee entry */
-
-CREATE TABLE inhf (LIKE inhx, LIKE inhx); /* Throw error */
-
-CREATE TABLE inhf (LIKE inhx INCLUDING DEFAULTS INCLUDING CONSTRAINTS);
-INSERT INTO inhf DEFAULT VALUES;
-SELECT * FROM inhf; /* Single entry with value 'text' */
-
-ALTER TABLE inhx add constraint foo CHECK (xx = 'text');
-ALTER TABLE inhx ADD PRIMARY KEY (xx);
-CREATE TABLE inhg (LIKE inhx); /* Doesn't copy constraint */
-INSERT INTO inhg VALUES ('foo');
-DROP TABLE inhg;
-CREATE TABLE inhg (x text, LIKE inhx INCLUDING CONSTRAINTS, y text); /* Copies constraints */
-INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds */
-INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds -- Unique constraints not copied */
-INSERT INTO inhg VALUES ('x', 'foo',  'y');  /* fails due to constraint */
-SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y */
-DROP TABLE inhg;
-
-CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, y text); /* copies indexes */
-INSERT INTO inhg VALUES (5, 10);
-INSERT INTO inhg VALUES (20, 10); -- should fail
-DROP TABLE inhg;
-/* Multiple primary keys creation should fail */
-CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, PRIMARY KEY(x)); /* fails */
-CREATE TABLE inhz (xx text DEFAULT 'text', yy int UNIQUE);
-CREATE UNIQUE INDEX inhz_xx_idx on inhz (xx) WHERE xx <> 'test';
-/* Ok to create multiple unique indexes */
-CREATE TABLE inhg (x text UNIQUE, LIKE inhz INCLUDING INDEXES);
-INSERT INTO inhg (xx, yy, x) VALUES ('test', 5, 10);
-INSERT INTO inhg (xx, yy, x) VALUES ('test', 10, 15);
-INSERT INTO inhg (xx, yy, x) VALUES ('foo', 10, 15); -- should fail
-DROP TABLE inhg;
-DROP TABLE inhz;
-
 -- Test changing the type of inherited columns
 insert into d values('test','one','two','three');
 alter table a alter column aa type integer using bit_length(aa);
@@ -302,81 +252,34 @@ alter table pp1 add column a2 int check (a2 > 0);
 \d cc2
 drop table pp1 cascade;
 
--- including storage and comments
-CREATE TABLE t1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text);
-CREATE INDEX t1_b_key ON t1 (b);
-CREATE INDEX t1_fnidx ON t1 ((a || b));
-COMMENT ON COLUMN t1.a IS 'A';
-COMMENT ON COLUMN t1.b IS 'B';
-COMMENT ON CONSTRAINT t1_a_check ON t1 IS 't1_a_check';
-COMMENT ON INDEX t1_pkey IS 'index pkey';
-COMMENT ON INDEX t1_b_key IS 'index b_key';
-ALTER TABLE t1 ALTER COLUMN a SET STORAGE MAIN;
-
-CREATE TABLE t2 (c text);
-ALTER TABLE t2 ALTER COLUMN c SET STORAGE EXTERNAL;
-COMMENT ON COLUMN t2.c IS 'C';
-
-CREATE TABLE t3 (a text CHECK (length(a) < 5), c text);
-ALTER TABLE t3 ALTER COLUMN c SET STORAGE EXTERNAL;
-ALTER TABLE t3 ALTER COLUMN a SET STORAGE MAIN;
-COMMENT ON COLUMN t3.a IS 'A3';
-COMMENT ON COLUMN t3.c IS 'C';
-COMMENT ON CONSTRAINT t3_a_check ON t3 IS 't3_a_check';
-
-CREATE TABLE t4 (a text, c text);
-ALTER TABLE t4 ALTER COLUMN c SET STORAGE EXTERNAL;
-
-CREATE TABLE t12_storage (LIKE t1 INCLUDING STORAGE, LIKE t2 INCLUDING STORAGE);
-\d+ t12_storage
-CREATE TABLE t12_comments (LIKE t1 INCLUDING COMMENTS, LIKE t2 INCLUDING COMMENTS);
-\d+ t12_comments
-CREATE TABLE t1_inh (LIKE t1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (t1);
-\d+ t1_inh
-SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 't1_inh'::regclass;
-CREATE TABLE t13_inh () INHERITS (t1, t3);
-\d+ t13_inh
-CREATE TABLE t13_like (LIKE t3 INCLUDING CONSTRAINTS INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (t1);
-\d+ t13_like
-SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 't13_like'::regclass;
-
-CREATE TABLE t_all (LIKE t1 INCLUDING ALL);
-\d+ t_all
-SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 't_all'::regclass ORDER BY c.relname, objsubid;
-
-CREATE TABLE inh_error1 () INHERITS (t1, t4);
-CREATE TABLE inh_error2 (LIKE t4 INCLUDING STORAGE) INHERITS (t1);
-
-DROP TABLE t1, t2, t3, t4, t12_storage, t12_comments, t1_inh, t13_inh, t13_like, t_all;
-
 -- Test for renaming in simple multiple inheritance
-CREATE TABLE t1 (a int, b int);
-CREATE TABLE s1 (b int, c int);
-CREATE TABLE ts (d int) INHERITS (t1, s1);
+CREATE TABLE inht1 (a int, b int);
+CREATE TABLE inhs1 (b int, c int);
+CREATE TABLE inhts (d int) INHERITS (inht1, inhs1);
 
-ALTER TABLE t1 RENAME a TO aa;
-ALTER TABLE t1 RENAME b TO bb;                -- to be failed
-ALTER TABLE ts RENAME aa TO aaa;      -- to be failed
-ALTER TABLE ts RENAME d TO dd;
-\d+ ts
+ALTER TABLE inht1 RENAME a TO aa;
+ALTER TABLE inht1 RENAME b TO bb;                -- to be failed
+ALTER TABLE inhts RENAME aa TO aaa;      -- to be failed
+ALTER TABLE inhts RENAME d TO dd;
+\d+ inhts
 
-DROP TABLE ts;
+DROP TABLE inhts;
 
 -- Test for renaming in diamond inheritance
-CREATE TABLE t2 (x int) INHERITS (t1);
-CREATE TABLE t3 (y int) INHERITS (t1);
-CREATE TABLE t4 (z int) INHERITS (t2, t3);
+CREATE TABLE inht2 (x int) INHERITS (inht1);
+CREATE TABLE inht3 (y int) INHERITS (inht1);
+CREATE TABLE inht4 (z int) INHERITS (inht2, inht3);
 
-ALTER TABLE t1 RENAME aa TO aaa;
-\d+ t4
+ALTER TABLE inht1 RENAME aa TO aaa;
+\d+ inht4
 
-CREATE TABLE ts (d int) INHERITS (t2, s1);
-ALTER TABLE t1 RENAME aaa TO aaaa;
-ALTER TABLE t1 RENAME b TO bb;                -- to be failed
-\d+ ts
+CREATE TABLE inhts (d int) INHERITS (inht2, inhs1);
+ALTER TABLE inht1 RENAME aaa TO aaaa;
+ALTER TABLE inht1 RENAME b TO bb;                -- to be failed
+\d+ inhts
 
 WITH RECURSIVE r AS (
-  SELECT 't1'::regclass AS inhrelid
+  SELECT 'inht1'::regclass AS inhrelid
 UNION ALL
   SELECT c.inhrelid FROM pg_inherits c, r WHERE r.inhrelid = c.inhparent
 )
@@ -386,7 +289,7 @@ SELECT a.attrelid::regclass, a.attname, a.attinhcount, e.expected
   JOIN pg_attribute a ON e.inhrelid = a.attrelid WHERE NOT attislocal
   ORDER BY a.attrelid::regclass::name, a.attnum;
 
-DROP TABLE t1, s1 CASCADE;
+DROP TABLE inht1, inhs1 CASCADE;
 
 --
 -- Test merge-append plans for inheritance trees

commit 0a41e865845bfa5d7aafcc5fe000dafa26573fef
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Sat Jan 7 15:38:52 2012 -0500

    Use __sync_lock_test_and_set() for spinlocks on ARM, if available.
    
    Historically we've used the SWPB instruction for TAS() on ARM, but this
    is deprecated and not available on ARMv6 and later.  Instead, make use
    of a GCC builtin if available.  We'll still fall back to SWPB if not,
    so as not to break existing ports using older GCC versions.
    
    Eventually we might want to try using __sync_lock_test_and_set() on some
    other architectures too, but for now that seems to present only risk and
    not reward.
    
    Back-patch to all supported versions, since people might want to use any
    of them on more recent ARM chips.
    
    Martin Pitt

diff --git a/configure b/configure
index 5cb3d9b..af4f9a3 100755
--- a/configure
+++ b/configure
@@ -22596,6 +22596,71 @@ fi
 done
 
 
+{ $as_echo "$as_me:$LINENO: checking for builtin locking functions" >&5
+$as_echo_n "checking for builtin locking functions... " >&6; }
+if test "${pgac_cv_gcc_int_atomics+set}" = set; then
+  $as_echo_n "(cached) " >&6
+else
+  cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+
+int
+main ()
+{
+int lock = 0;
+   __sync_lock_test_and_set(&lock, 1);
+   __sync_lock_release(&lock);
+  ;
+  return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+  (eval "$ac_link") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 $as_test_x conftest$ac_exeext
+       }; then
+  pgac_cv_gcc_int_atomics="yes"
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	pgac_cv_gcc_int_atomics="no"
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+      conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $pgac_cv_gcc_int_atomics" >&5
+$as_echo "$pgac_cv_gcc_int_atomics" >&6; }
+if test x"$pgac_cv_gcc_int_atomics" = x"yes"; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_GCC_INT_ATOMICS 1
+_ACEOF
+
+fi
+
 
 #
 # Pthreads
diff --git a/configure.in b/configure.in
index 3f195a6..9cad436 100644
--- a/configure.in
+++ b/configure.in
@@ -1454,6 +1454,17 @@ fi
 AC_CHECK_FUNCS([strtoll strtoq], [break])
 AC_CHECK_FUNCS([strtoull strtouq], [break])
 
+AC_CACHE_CHECK([for builtin locking functions], pgac_cv_gcc_int_atomics,
+[AC_TRY_LINK([],
+  [int lock = 0;
+   __sync_lock_test_and_set(&lock, 1);
+   __sync_lock_release(&lock);],
+  [pgac_cv_gcc_int_atomics="yes"],
+  [pgac_cv_gcc_int_atomics="no"])])
+if test x"$pgac_cv_gcc_int_atomics" = x"yes"; then
+  AC_DEFINE(HAVE_GCC_INT_ATOMICS, 1, [Define to 1 if you have __sync_lock_test_and_set(int *) and friends.])
+fi
+
 
 #
 # Pthreads
diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in
index e1b7fea..db84f49 100644
--- a/src/include/pg_config.h.in
+++ b/src/include/pg_config.h.in
@@ -179,6 +179,9 @@
 /* Define to 1 if your compiler understands __FUNCTION__. */
 #undef HAVE_FUNCNAME__FUNCTION
 
+/* Define to 1 if you have __sync_lock_test_and_set(int *) and friends. */
+#undef HAVE_GCC_INT_ATOMICS
+
 /* Define to 1 if you have the `getaddrinfo' function. */
 #undef HAVE_GETADDRINFO
 
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index 9b02d1f..074838e 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -275,13 +275,33 @@ tas(volatile slock_t *lock)
 #endif	 /* __ia64__ || __ia64 */
 
 
+/*
+ * On ARM, we use __sync_lock_test_and_set(int *, int) if available, and if
+ * not fall back on the SWPB instruction.  SWPB does not work on ARMv6 or
+ * later, so the compiler builtin is preferred if available.  Note also that
+ * the int-width variant of the builtin works on more chips than other widths.
+ */
 #if defined(__arm__) || defined(__arm)
 #define HAS_TEST_AND_SET
 
-typedef unsigned char slock_t;
-
 #define TAS(lock) tas(lock)
 
+#ifdef HAVE_GCC_INT_ATOMICS
+
+typedef int slock_t;
+
+static __inline__ int
+tas(volatile slock_t *lock)
+{
+	return __sync_lock_test_and_set(lock, 1);
+}
+
+#define S_UNLOCK(lock) __sync_lock_release(lock)
+
+#else /* !HAVE_GCC_INT_ATOMICS */
+
+typedef unsigned char slock_t;
+
 static __inline__ int
 tas(volatile slock_t *lock)
 {
@@ -295,6 +315,7 @@ tas(volatile slock_t *lock)
 	return (int) _res;
 }
 
+#endif	 /* HAVE_GCC_INT_ATOMICS */
 #endif	 /* __arm__ */
 
 

commit 1fc3d18faa8f4476944bc6854be0f7f6adf4aec8
Author: Robert Haas <rhaas@postgresql.org>
Date:   Fri Jan 6 22:56:00 2012 -0500

    Slightly reorganize struct SnapshotData.
    
    This squeezes out a bunch of alignment padding, reducing the size
    from 72 to 56 bytes on my machine.  At least in my testing, this
    didn't produce any measurable performance improvement, but the space
    savings seem like enough justification.
    
    Andres Freund

diff --git a/src/include/utils/snapshot.h b/src/include/utils/snapshot.h
index 93c02fa..900272e 100644
--- a/src/include/utils/snapshot.h
+++ b/src/include/utils/snapshot.h
@@ -46,13 +46,14 @@ typedef struct SnapshotData
 	 */
 	TransactionId xmin;			/* all XID < xmin are visible to me */
 	TransactionId xmax;			/* all XID >= xmax are invisible to me */
-	uint32		xcnt;			/* # of xact ids in xip[] */
 	TransactionId *xip;			/* array of xact IDs in progress */
+	uint32		xcnt;			/* # of xact ids in xip[] */
 	/* note: all ids in xip[] satisfy xmin <= xip[i] < xmax */
 	int32		subxcnt;		/* # of xact ids in subxip[] */
 	TransactionId *subxip;		/* array of subxact IDs in progress */
 	bool		suboverflowed;	/* has the subxip array overflowed? */
 	bool		takenDuringRecovery;	/* recovery-shaped snapshot? */
+	bool		copied;			/* false if it's a static snapshot */
 
 	/*
 	 * note: all ids in subxip[] are >= xmin, but we don't bother filtering
@@ -61,7 +62,6 @@ typedef struct SnapshotData
 	CommandId	curcid;			/* in my xact, CID < curcid are visible */
 	uint32		active_count;	/* refcount on ActiveSnapshot stack */
 	uint32		regd_count;		/* refcount on RegisteredSnapshotList */
-	bool		copied;			/* false if it's a static snapshot */
 } SnapshotData;
 
 /*

commit df970a0ac8fb416b179825a135c18ad3293076af
Author: Robert Haas <rhaas@postgresql.org>
Date:   Fri Jan 6 22:54:43 2012 -0500

    Fix backwards logic in previous commit.
    
    I wrote this code before committing it, but managed not to include it in
    the actual commit.

diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 0cd273e..c373016 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -9947,11 +9947,6 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
 				 errmsg("\"%s\" is not a composite type", rv->relname)));
 
-	if (reltype == OBJECT_FOREIGN_TABLE && relkind != RELKIND_FOREIGN_TABLE)
-		ereport(ERROR,
-				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				 errmsg("\"%s\" is not a foreign table", rv->relname)));
-
 	if (reltype == OBJECT_INDEX && relkind != RELKIND_INDEX
 		&& !IsA(stmt, RenameStmt))
 		ereport(ERROR,
@@ -9968,6 +9963,12 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
 				 errmsg("\"%s\" is a composite type", rv->relname),
 				 errhint("Use ALTER TYPE instead.")));
 
+	if (reltype != OBJECT_FOREIGN_TABLE && relkind == RELKIND_FOREIGN_TABLE)
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is a foreign table", rv->relname),
+				 errhint("Use ALTER FOREIGN TABLE instead.")));
+
 	/*
 	 * Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be
 	 * moved to a different schema, such as indexes and TOAST tables.

commit 1489e2f26a4c0318938b3085f50976512f321d84
Author: Robert Haas <rhaas@postgresql.org>
Date:   Fri Jan 6 22:42:26 2012 -0500

    Improve behavior of concurrent ALTER TABLE, and do some refactoring.
    
    ALTER TABLE (and ALTER VIEW, ALTER SEQUENCE, etc.) now use a
    RangeVarGetRelid callback to check permissions before acquiring a table
    lock.  We also now use the same callback for all forms of ALTER TABLE,
    rather than having separate, almost-identical callbacks for ALTER TABLE
    .. SET SCHEMA and ALTER TABLE .. RENAME, and no callback at all for
    everything else.
    
    I went ahead and changed the code so that no form of ALTER TABLE works
    on foreign tables; you must use ALTER FOREIGN TABLE instead.  In 9.1,
    it was possible to use ALTER TABLE .. SET SCHEMA or ALTER TABLE ..
    RENAME on a foreign table, but not any other form of ALTER TABLE, which
    did not seem terribly useful or consistent.
    
    Patch by me; review by Noah Misch.

diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index c41c70c..7c658c0 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -192,8 +192,7 @@ ExecAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt)
 		case OBJECT_TABLE:
 		case OBJECT_VIEW:
 		case OBJECT_FOREIGN_TABLE:
-			AlterTableNamespace(stmt->relation, stmt->newschema,
-								stmt->objectType, AccessExclusiveLock);
+			AlterTableNamespace(stmt);
 			break;
 
 		case OBJECT_TSPARSER:
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 3b52415..0cd273e 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -387,6 +387,8 @@ static const char *storage_name(char c);
 
 static void RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid,
 								Oid oldRelOid, void *arg);
+static void RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid,
+								 Oid oldrelid, void *arg);
 
 
 /* ----------------------------------------------------------------
@@ -2319,81 +2321,6 @@ renameatt(RenameStmt *stmt)
 }
 
 /*
- * Perform permissions and integrity checks before acquiring a relation lock.
- */
-static void
-RangeVarCallbackForRenameRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
-								  void *arg)
-{
-	RenameStmt	   *stmt = (RenameStmt *) arg;
-	ObjectType		reltype;
-	HeapTuple		tuple;
-	Form_pg_class	classform;
-	AclResult   	aclresult;
-	char			relkind;
-
-	tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
-	if (!HeapTupleIsValid(tuple))
-		return;							/* concurrently dropped */
-	classform = (Form_pg_class) GETSTRUCT(tuple);
-	relkind = classform->relkind;
-
-	/* Must own table. */
-	if (!pg_class_ownercheck(relid, GetUserId()))
-		aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
-					   NameStr(classform->relname));
-
-	/* No system table modifications unless explicitly allowed. */
-	if (!allowSystemTableMods && IsSystemClass(classform))
-		ereport(ERROR,
-				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-				 errmsg("permission denied: \"%s\" is a system catalog",
-						NameStr(classform->relname))));
-
-	/* Must (still) have CREATE rights on containing namespace. */
- 	aclresult = pg_namespace_aclcheck(classform->relnamespace, GetUserId(),
-									  ACL_CREATE);
-	if (aclresult != ACLCHECK_OK)
-		aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
-					   get_namespace_name(classform->relnamespace));
-
-	/*
-	 * For compatibility with prior releases, we don't complain if ALTER TABLE
-	 * or ALTER INDEX is used to rename some other type of relation.  But
-	 * ALTER SEQUENCE/VIEW/FOREIGN TABLE are only to be used with relations of
-	 * that type.
-	 */
-	reltype = stmt->renameType;
-	if (reltype == OBJECT_SEQUENCE && relkind != RELKIND_SEQUENCE)
-		ereport(ERROR,
-				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				 errmsg("\"%s\" is not a sequence", rv->relname)));
-
-	if (reltype == OBJECT_VIEW && relkind != RELKIND_VIEW)
-		ereport(ERROR,
-				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				 errmsg("\"%s\" is not a view", rv->relname)));
-
-	if (reltype == OBJECT_FOREIGN_TABLE && relkind != RELKIND_FOREIGN_TABLE)
-		ereport(ERROR,
-				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				 errmsg("\"%s\" is not a foreign table", rv->relname)));
-
-	/*
-	 * Don't allow ALTER TABLE on composite types. We want people to use ALTER
-	 * TYPE for that.
-	 */
-	if (relkind == RELKIND_COMPOSITE_TYPE)
-		ereport(ERROR,
-				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				 errmsg("\"%s\" is a composite type", rv->relname),
-				 errhint("Use ALTER TYPE instead.")));
-
-	ReleaseSysCache(tuple);
-}
-
-
-/*
  * Execute ALTER TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE RENAME
  */
 void
@@ -2410,7 +2337,7 @@ RenameRelation(RenameStmt *stmt)
 	 */
 	relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
 									 false, false,
-									 RangeVarCallbackForRenameRelation,
+									 RangeVarCallbackForAlterRelation,
 									 (void *) stmt);
 
 	/* Do the work */
@@ -2546,6 +2473,19 @@ CheckTableNotInUse(Relation rel, const char *stmt)
 }
 
 /*
+ * AlterTableLookupRelation
+ *		Look up, and lock, the OID for the relation named by an alter table
+ *		statement.
+ */
+Oid
+AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
+{
+	return RangeVarGetRelidExtended(stmt->relation, lockmode, false, false,
+									RangeVarCallbackForAlterRelation,
+									(void *) stmt);
+}
+
+/*
  * AlterTable
  *		Execute ALTER TABLE, which can be a list of subcommands
  *
@@ -2579,90 +2519,26 @@ CheckTableNotInUse(Relation rel, const char *stmt)
  * Thanks to the magic of MVCC, an error anywhere along the way rolls back
  * the whole operation; we don't have to do anything special to clean up.
  *
- * We lock the table as the first action, with an appropriate lock level
+ * The caller must lock the relation, with an appropriate lock level 
  * for the subcommands requested. Any subcommand that needs to rewrite
  * tuples in the table forces the whole command to be executed with
- * AccessExclusiveLock. If all subcommands do not require rewrite table
- * then we may be able to use lower lock levels. We pass the lock level down
+ * AccessExclusiveLock (actually, that is currently required always, but
+ * we hope to relax it at some point).  We pass the lock level down
  * so that we can apply it recursively to inherited tables. Note that the
- * lock level we want as we recurse may well be higher than required for
+ * lock level we want as we recurse might well be higher than required for
  * that specific subcommand. So we pass down the overall lock requirement,
  * rather than reassess it at lower levels.
  */
 void
-AlterTable(AlterTableStmt *stmt)
+AlterTable(Oid relid, LOCKMODE lockmode, AlterTableStmt *stmt)
 {
 	Relation	rel;
-	LOCKMODE	lockmode = AlterTableGetLockLevel(stmt->cmds);
 
-	/*
-	 * Acquire same level of lock as already acquired during parsing.
-	 */
-	rel = relation_openrv(stmt->relation, lockmode);
+	/* Caller is required to provide an adequate lock. */
+	rel = relation_open(relid, NoLock);
 
 	CheckTableNotInUse(rel, "ALTER TABLE");
 
-	/* Check relation type against type specified in the ALTER command */
-	switch (stmt->relkind)
-	{
-		case OBJECT_TABLE:
-
-			/*
-			 * For mostly-historical reasons, we allow ALTER TABLE to apply to
-			 * almost all relation types.
-			 */
-			if (rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE
-				|| rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
-				ereport(ERROR,
-						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						 errmsg("\"%s\" is not a table",
-								RelationGetRelationName(rel))));
-			break;
-
-		case OBJECT_INDEX:
-			if (rel->rd_rel->relkind != RELKIND_INDEX)
-				ereport(ERROR,
-						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						 errmsg("\"%s\" is not an index",
-								RelationGetRelationName(rel))));
-			break;
-
-		case OBJECT_SEQUENCE:
-			if (rel->rd_rel->relkind != RELKIND_SEQUENCE)
-				ereport(ERROR,
-						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						 errmsg("\"%s\" is not a sequence",
-								RelationGetRelationName(rel))));
-			break;
-
-		case OBJECT_TYPE:
-			if (rel->rd_rel->relkind != RELKIND_COMPOSITE_TYPE)
-				ereport(ERROR,
-						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						 errmsg("\"%s\" is not a composite type",
-								RelationGetRelationName(rel))));
-			break;
-
-		case OBJECT_VIEW:
-			if (rel->rd_rel->relkind != RELKIND_VIEW)
-				ereport(ERROR,
-						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						 errmsg("\"%s\" is not a view",
-								RelationGetRelationName(rel))));
-			break;
-
-		case OBJECT_FOREIGN_TABLE:
-			if (rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE)
-				ereport(ERROR,
-						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						 errmsg("\"%s\" is not a foreign table",
-								RelationGetRelationName(rel))));
-			break;
-
-		default:
-			elog(ERROR, "unrecognized object type: %d", (int) stmt->relkind);
-	}
-
 	ATController(rel, stmt->cmds, interpretInhOption(stmt->relation->inhOpt),
 				 lockmode);
 }
@@ -9531,103 +9407,10 @@ ATExecGenericOptions(Relation rel, List *options)
 }
 
 /*
- * Perform permissions and integrity checks before acquiring a relation lock.
- */
-static void
-RangeVarCallbackForAlterTableNamespace(const RangeVar *rv, Oid relid,
-									   Oid oldrelid, void *arg)
-{
-	HeapTuple		tuple;
-	Form_pg_class	form;
-	ObjectType		stmttype;
-
-	tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
-	if (!HeapTupleIsValid(tuple))
-		return;							/* concurrently dropped */
-	form = (Form_pg_class) GETSTRUCT(tuple);
-
-	/* Must own table. */
-	if (!pg_class_ownercheck(relid, GetUserId()))
-		aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname);
-
-	/* No system table modifications unless explicitly allowed. */
-	if (!allowSystemTableMods && IsSystemClass(form))
-		ereport(ERROR,
-				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-				 errmsg("permission denied: \"%s\" is a system catalog",
-						rv->relname)));
-
-	/* Check relation type against type specified in the ALTER command */
-	stmttype = * (ObjectType *) arg;
-	switch (stmttype)
-	{
-		case OBJECT_TABLE:
-
-			/*
-			 * For mostly-historical reasons, we allow ALTER TABLE to apply to
-			 * all relation types.
-			 */
-			break;
-
-		case OBJECT_SEQUENCE:
-			if (form->relkind != RELKIND_SEQUENCE)
-				ereport(ERROR,
-						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						 errmsg("\"%s\" is not a sequence", rv->relname)));
-			break;
-
-		case OBJECT_VIEW:
-			if (form->relkind != RELKIND_VIEW)
-				ereport(ERROR,
-						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						 errmsg("\"%s\" is not a view", rv->relname)));
-			break;
-
-		case OBJECT_FOREIGN_TABLE:
-			if (form->relkind != RELKIND_FOREIGN_TABLE)
-				ereport(ERROR,
-						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						 errmsg("\"%s\" is not a foreign table", rv->relname)));
-			break;
-
-		default:
-			elog(ERROR, "unrecognized object type: %d", (int) stmttype);
-	}
-
-	/* Can we change the schema of this tuple? */
-	switch (form->relkind)
-	{
-		case RELKIND_RELATION:
-		case RELKIND_VIEW:
-		case RELKIND_SEQUENCE:
-		case RELKIND_FOREIGN_TABLE:
-			/* ok to change schema */
-			break;
-		case RELKIND_COMPOSITE_TYPE:
-			ereport(ERROR,
-					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-					 errmsg("\"%s\" is a composite type", rv->relname),
-					 errhint("Use ALTER TYPE instead.")));
-			break;
-		case RELKIND_INDEX:
-		case RELKIND_TOASTVALUE:
-			/* FALL THRU */
-		default:
-			ereport(ERROR,
-					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-			errmsg("\"%s\" is not a table, view, sequence, or foreign table",
-				   rv->relname)));
-	}
-
-	ReleaseSysCache(tuple);
-}
-
-/*
  * Execute ALTER TABLE SET SCHEMA
  */
 void
-AlterTableNamespace(RangeVar *relation, const char *newschema,
-					ObjectType stmttype, LOCKMODE lockmode)
+AlterTableNamespace(AlterObjectSchemaStmt *stmt)
 {
 	Relation	rel;
 	Oid			relid;
@@ -9635,9 +9418,10 @@ AlterTableNamespace(RangeVar *relation, const char *newschema,
 	Oid			nspOid;
 	Relation	classRel;
 
-	relid = RangeVarGetRelidExtended(relation, lockmode, false, false,
-									 RangeVarCallbackForAlterTableNamespace,
-									 (void *) &stmttype);
+	relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
+									 false, false,
+									 RangeVarCallbackForAlterRelation,
+									 (void *) stmt);
 	rel = relation_open(relid, NoLock);
 
 	oldNspOid = RelationGetNamespace(rel);
@@ -9658,7 +9442,7 @@ AlterTableNamespace(RangeVar *relation, const char *newschema,
 	}
 
 	/* get schema OID and check its permissions */
-	nspOid = LookupCreationNamespace(newschema);
+	nspOid = LookupCreationNamespace(stmt->newschema);
 
 	/* common checks on switching namespaces */
 	CheckSetNamespace(oldNspOid, nspOid, RelationRelationId, relid);
@@ -9675,7 +9459,8 @@ AlterTableNamespace(RangeVar *relation, const char *newschema,
 	if (rel->rd_rel->relkind == RELKIND_RELATION)
 	{
 		AlterIndexNamespaces(classRel, rel, oldNspOid, nspOid);
-		AlterSeqNamespaces(classRel, rel, oldNspOid, nspOid, newschema, lockmode);
+		AlterSeqNamespaces(classRel, rel, oldNspOid, nspOid, stmt->newschema,
+						   AccessExclusiveLock);
 		AlterConstraintNamespaces(relid, oldNspOid, nspOid, false);
 	}
 
@@ -10077,3 +9862,123 @@ RangeVarCallbackOwnsTable(const RangeVar *relation,
 	if (!pg_class_ownercheck(relId, GetUserId()))
 		aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, relation->relname);
 }
+
+/*
+ * Common RangeVarGetRelid callback for rename, set schema, and alter table
+ * processing.
+ */
+static void
+RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
+								 void *arg)
+{
+	Node		   *stmt = (Node *) arg;
+	ObjectType		reltype;
+	HeapTuple		tuple;
+	Form_pg_class	classform;
+	AclResult   	aclresult;
+	char			relkind;
+
+	tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
+	if (!HeapTupleIsValid(tuple))
+		return;							/* concurrently dropped */
+	classform = (Form_pg_class) GETSTRUCT(tuple);
+	relkind = classform->relkind;
+
+	/* Must own relation. */
+	if (!pg_class_ownercheck(relid, GetUserId()))
+		aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname);
+
+	/* No system table modifications unless explicitly allowed. */
+	if (!allowSystemTableMods && IsSystemClass(classform))
+		ereport(ERROR,
+				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+				 errmsg("permission denied: \"%s\" is a system catalog",
+						rv->relname)));
+
+	/*
+	 * Extract the specified relation type from the statement parse tree.
+	 *
+	 * Also, for ALTER .. RENAME, check permissions: the user must (still)
+	 * have CREATE rights on the containing namespace.
+	 */
+	if (IsA(stmt, RenameStmt))
+	{
+	 	aclresult = pg_namespace_aclcheck(classform->relnamespace,
+										  GetUserId(), ACL_CREATE);
+		if (aclresult != ACLCHECK_OK)
+			aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
+						   get_namespace_name(classform->relnamespace));
+		reltype = ((RenameStmt *) stmt)->renameType;
+	}
+	else if (IsA(stmt, AlterObjectSchemaStmt))
+		reltype = ((AlterObjectSchemaStmt *) stmt)->objectType;
+	else if (IsA(stmt, AlterTableStmt))
+		reltype = ((AlterTableStmt *) stmt)->relkind;
+	else
+	{
+		reltype = OBJECT_TABLE;			/* placate compiler */
+		elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt));
+	}
+
+	/*
+	 * For compatibility with prior releases, we allow ALTER TABLE to be
+	 * used with most other types of relations (but not composite types).
+	 * We allow similar flexibility for ALTER INDEX in the case of RENAME,
+	 * but not otherwise.  Otherwise, the user must select the correct form
+	 * of the command for the relation at issue.
+	 */
+	if (reltype == OBJECT_SEQUENCE && relkind != RELKIND_SEQUENCE)
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is not a sequence", rv->relname)));
+
+	if (reltype == OBJECT_VIEW && relkind != RELKIND_VIEW)
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is not a view", rv->relname)));
+
+	if (reltype == OBJECT_FOREIGN_TABLE && relkind != RELKIND_FOREIGN_TABLE)
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is not a foreign table", rv->relname)));
+
+	if (reltype == OBJECT_TYPE && relkind != RELKIND_COMPOSITE_TYPE)
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is not a composite type", rv->relname)));
+
+	if (reltype == OBJECT_FOREIGN_TABLE && relkind != RELKIND_FOREIGN_TABLE)
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is not a foreign table", rv->relname)));
+
+	if (reltype == OBJECT_INDEX && relkind != RELKIND_INDEX
+		&& !IsA(stmt, RenameStmt))
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is not an index", rv->relname)));
+
+	/*
+	 * Don't allow ALTER TABLE on composite types. We want people to use ALTER
+	 * TYPE for that.
+	 */
+	if (reltype != OBJECT_TYPE && relkind == RELKIND_COMPOSITE_TYPE)
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is a composite type", rv->relname),
+				 errhint("Use ALTER TYPE instead.")));
+
+	/*
+	 * Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be
+	 * moved to a different schema, such as indexes and TOAST tables.
+	 */
+	if (IsA(stmt, AlterObjectSchemaStmt) && relkind != RELKIND_RELATION
+		&& relkind != RELKIND_VIEW && relkind != RELKIND_SEQUENCE
+		&& relkind != RELKIND_FOREIGN_TABLE)
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+			errmsg("\"%s\" is not a table, view, sequence, or foreign table",
+				   rv->relname)));
+
+	ReleaseSysCache(tuple);
+}
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 704bbe9..de16a61 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -699,12 +699,23 @@ standard_ProcessUtility(Node *parsetree,
 
 		case T_AlterTableStmt:
 			{
+				AlterTableStmt *atstmt = (AlterTableStmt *) parsetree;
+				Oid			relid;
 				List	   *stmts;
 				ListCell   *l;
+				LOCKMODE	lockmode;
+
+				/*
+				 * Figure out lock mode, and acquire lock.  This also does
+				 * basic permissions checks, so that we won't wait for a lock
+				 * on (for example) a relation on which we have no
+				 * permissions.
+				 */
+				lockmode = AlterTableGetLockLevel(atstmt->cmds);
+				relid = AlterTableLookupRelation(atstmt, lockmode);
 
 				/* Run parse analysis ... */
-				stmts = transformAlterTableStmt((AlterTableStmt *) parsetree,
-												queryString);
+				stmts = transformAlterTableStmt(atstmt, queryString);
 
 				/* ... and do it */
 				foreach(l, stmts)
@@ -714,7 +725,7 @@ standard_ProcessUtility(Node *parsetree,
 					if (IsA(stmt, AlterTableStmt))
 					{
 						/* Do the table alteration proper */
-						AlterTable((AlterTableStmt *) stmt);
+						AlterTable(relid, lockmode, (AlterTableStmt *) stmt);
 					}
 					else
 					{
diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h
index e73315e..03f397d 100644
--- a/src/include/commands/tablecmds.h
+++ b/src/include/commands/tablecmds.h
@@ -24,7 +24,9 @@ extern Oid	DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId);
 
 extern void RemoveRelations(DropStmt *drop);
 
-extern void AlterTable(AlterTableStmt *stmt);
+extern Oid	AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode);
+
+extern void AlterTable(Oid relid, LOCKMODE lockmode, AlterTableStmt *stmt);
 
 extern LOCKMODE AlterTableGetLockLevel(List *cmds);
 
@@ -32,8 +34,7 @@ extern void ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, L
 
 extern void AlterTableInternal(Oid relid, List *cmds, bool recurse);
 
-extern void AlterTableNamespace(RangeVar *relation, const char *newschema,
-					ObjectType stmttype, LOCKMODE lockmode);
+extern void AlterTableNamespace(AlterObjectSchemaStmt *stmt);
 
 extern void AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
 							   Oid oldNspOid, Oid newNspOid,

commit 33aaa139e6302e81b4fbf2570be20188bb974c4f
Author: Robert Haas <rhaas@postgresql.org>
Date:   Fri Jan 6 14:30:23 2012 -0500

    Make the number of CLOG buffers adaptive, based on shared_buffers.
    
    Previously, this was hardcoded: we always had 8.  Performance testing
    shows that isn't enough, especially on big SMP systems, so we allow it
    to scale up as high as 32 when there's adequate memory.  On the flip
    side, when shared_buffers is very small, drop the number of CLOG buffers
    down to as little as 4, so that we can start the postmaster even
    when very little shared memory is available.
    
    Per extensive discussion with Simon Riggs, Tom Lane, and others on
    pgsql-hackers.

diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 4060e60..69b6ef3 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -35,6 +35,7 @@
 #include "access/clog.h"
 #include "access/slru.h"
 #include "access/transam.h"
+#include "miscadmin.h"
 #include "pg_trace.h"
 
 /*
@@ -409,6 +410,34 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
 	return status;
 }
 
+/*
+ * Number of shared CLOG buffers.
+ *
+ * Testing during the PostgreSQL 9.2 development cycle revealed that on a
+ * large multi-processor system, it was possible to have more CLOG page
+ * requests in flight at one time than the numebr of CLOG buffers which existed
+ * at that time, which was hardcoded to 8.  Further testing revealed that
+ * performance dropped off with more than 32 CLOG buffers, possibly because
+ * the linear buffer search algorithm doesn't scale well.
+ *
+ * Unconditionally increasing the number of CLOG buffers to 32 did not seem
+ * like a good idea, because it would increase the minimum amount of shared
+ * memory required to start, which could be a problem for people running very
+ * small configurations.  The following formula seems to represent a reasonable
+ * compromise: people with very low values for shared_buffers will get fewer
+ * CLOG buffers as well, and everyone else will get 32.
+ *
+ * It is likely that some further work will be needed here in future releases;
+ * for example, on a 64-core server, the maximum number of CLOG requests that
+ * can be simultaneously in flight will be even larger.  But that will
+ * apparently require more than just changing the formula, so for now we take
+ * the easy way out.
+ */
+Size
+CLOGShmemBuffers(void)
+{
+	return Min(32, Max(4, NBuffers / 512));
+}
 
 /*
  * Initialization of shared memory for CLOG
@@ -416,14 +445,14 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
 Size
 CLOGShmemSize(void)
 {
-	return SimpleLruShmemSize(NUM_CLOG_BUFFERS, CLOG_LSNS_PER_PAGE);
+	return SimpleLruShmemSize(CLOGShmemBuffers(), CLOG_LSNS_PER_PAGE);
 }
 
 void
 CLOGShmemInit(void)
 {
 	ClogCtl->PagePrecedes = CLOGPagePrecedes;
-	SimpleLruInit(ClogCtl, "CLOG Ctl", NUM_CLOG_BUFFERS, CLOG_LSNS_PER_PAGE,
+	SimpleLruInit(ClogCtl, "CLOG Ctl", CLOGShmemBuffers(), CLOG_LSNS_PER_PAGE,
 				  CLogControlLock, "pg_clog");
 }
 
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 96f0d38..cc41568 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -171,7 +171,7 @@ NumLWLocks(void)
 	numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
 
 	/* clog.c needs one per CLOG buffer */
-	numLocks += NUM_CLOG_BUFFERS;
+	numLocks += CLOGShmemBuffers();
 
 	/* subtrans.c needs one per SubTrans buffer */
 	numLocks += NUM_SUBTRANS_BUFFERS;
diff --git a/src/include/access/clog.h b/src/include/access/clog.h
index 9cf54a4..bed3b8c 100644
--- a/src/include/access/clog.h
+++ b/src/include/access/clog.h
@@ -28,14 +28,11 @@ typedef int XidStatus;
 #define TRANSACTION_STATUS_SUB_COMMITTED	0x03
 
 
-/* Number of SLRU buffers to use for clog */
-#define NUM_CLOG_BUFFERS	8
-
-
 extern void TransactionIdSetTreeStatus(TransactionId xid, int nsubxids,
 				   TransactionId *subxids, XidStatus status, XLogRecPtr lsn);
 extern XidStatus TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn);
 
+extern Size CLOGShmemBuffers(void);
 extern Size CLOGShmemSize(void);
 extern void CLOGShmemInit(void);
 extern void BootStrapCLOG(void);

commit 7a72efda72a85eef1513f2a02449e24dc4bdfc74
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Fri Jan 6 13:31:37 2012 -0500

    Fix typo, pg_types_date.h => pgtypes_date.h.
    
    Spotted by Koizumi Satoru.

diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml
index 68833ca..dedd886 100644
--- a/doc/src/sgml/ecpg.sgml
+++ b/doc/src/sgml/ecpg.sgml
@@ -1043,7 +1043,7 @@ ts = 2010-06-27 18:03:56.949343
 
      <para>
       In addition, the DATE type can be handled in the same way. The
-      program has to include <filename>pg_types_date.h</filename>, declare a host variable
+      program has to include <filename>pgtypes_date.h</filename>, declare a host variable
       as the date type and convert a DATE value into a text form using
       <function>PGTYPESdate_to_asc()</function> function. For more details about the
       pgtypes library functions, see <xref linkend="ecpg-pgtypes">.

commit f3316a05b5ddee619ba0617716a4fef3ceb29ded
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Fri Jan 6 13:04:09 2012 -0500

    Fix pg_restore's direct-to-database mode for INSERT-style table data.
    
    In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
    lexer that was in pg_backup_db.c, thinking that it had no real purpose
    beyond separating COPY data from SQL commands, which purpose had been
    obsoleted by long-ago fixes in pg_dump's archive file format.
    Unfortunately this was in error: that code was also used to identify
    command boundaries in INSERT-style table data, which is run together as a
    single string in the archive file for better compressibility.  As a result,
    direct-to-database restores from archive files made with --inserts or
    --column-inserts fail in our latest releases, as reported by Dick Visser.
    
    To fix, restore the mini SQL lexer, but simplify it by adjusting the
    calling logic so that it's only required to cope with INSERT-style table
    data, not arbitrary SQL commands.  This allows us to not have to deal with
    SQL comments, E'' strings, or dollar-quoted strings, none of which have
    ever been emitted by dumpTableData_insert.
    
    Also, fix the lexer to cope with standard-conforming strings, which was the
    actual bug that the previous patch was meant to solve.
    
    Back-patch to all supported branches.  The previous patch went back to 8.2,
    which unfortunately means that the EOL release of 8.2 contains this bug,
    but I don't think we're doing another 8.2 release just because of that.

diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index d9edebb..234e50f 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -620,20 +620,20 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
 					if (te->copyStmt && strlen(te->copyStmt) > 0)
 					{
 						ahprintf(AH, "%s", te->copyStmt);
-						AH->writingCopyData = true;
+						AH->outputKind = OUTPUT_COPYDATA;
 					}
+					else
+						AH->outputKind = OUTPUT_OTHERDATA;
 
 					(*AH->PrintTocDataPtr) (AH, te, ropt);
 
 					/*
 					 * Terminate COPY if needed.
 					 */
-					if (AH->writingCopyData)
-					{
-						if (RestoringToDB(AH))
-							EndDBCopyMode(AH, te);
-						AH->writingCopyData = false;
-					}
+					if (AH->outputKind == OUTPUT_COPYDATA &&
+						RestoringToDB(AH))
+						EndDBCopyMode(AH, te);
+					AH->outputKind = OUTPUT_SQLCMDS;
 
 					/* close out the transaction started above */
 					if (is_parallel && te->created)
@@ -1975,6 +1975,8 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt,
 	AH->mode = mode;
 	AH->compression = compression;
 
+	memset(&(AH->sqlparse), 0, sizeof(AH->sqlparse));
+
 	/* Open stdout with no compression for AH output handle */
 	AH->gzOut = 0;
 	AH->OF = stdout;
@@ -4194,7 +4196,8 @@ CloneArchive(ArchiveHandle *AH)
 	clone = (ArchiveHandle *) pg_malloc(sizeof(ArchiveHandle));
 	memcpy(clone, AH, sizeof(ArchiveHandle));
 
-	/* Handle format-independent fields ... none at the moment */
+	/* Handle format-independent fields */
+	memset(&(clone->sqlparse), 0, sizeof(clone->sqlparse));
 
 	/* The clone will have its own connection, so disregard connection state */
 	clone->connection = NULL;
@@ -4227,7 +4230,9 @@ DeCloneArchive(ArchiveHandle *AH)
 	/* Clear format-specific state */
 	(AH->DeClonePtr) (AH);
 
-	/* Clear state allocated by CloneArchive ... none at the moment */
+	/* Clear state allocated by CloneArchive */
+	if (AH->sqlparse.curCmd)
+		destroyPQExpBuffer(AH->sqlparse.curCmd);
 
 	/* Clear any connection-local state */
 	if (AH->currUser)
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index 7a4fd36..6dd5158 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -134,6 +134,20 @@ typedef size_t (*CustomOutPtr) (struct _archiveHandle * AH, const void *buf, siz
 
 typedef enum
 {
+	SQL_SCAN = 0,				/* normal */
+	SQL_IN_SINGLE_QUOTE,		/* '...' literal */
+	SQL_IN_DOUBLE_QUOTE			/* "..." identifier */
+} sqlparseState;
+
+typedef struct
+{
+	sqlparseState state;		/* see above */
+	bool		backSlash;		/* next char is backslash quoted? */
+	PQExpBuffer curCmd;			/* incomplete line (NULL if not created) */
+} sqlparseInfo;
+
+typedef enum
+{
 	STAGE_NONE = 0,
 	STAGE_INITIALIZING,
 	STAGE_PROCESSING,
@@ -142,6 +156,13 @@ typedef enum
 
 typedef enum
 {
+	OUTPUT_SQLCMDS = 0,			/* emitting general SQL commands */
+	OUTPUT_COPYDATA,			/* writing COPY data */
+	OUTPUT_OTHERDATA			/* writing data as INSERT commands */
+} ArchiverOutput;
+
+typedef enum
+{
 	REQ_SCHEMA = 1,
 	REQ_DATA = 2,
 	REQ_ALL = REQ_SCHEMA + REQ_DATA
@@ -167,6 +188,8 @@ typedef struct _archiveHandle
 								 * Added V1.7 */
 	ArchiveFormat format;		/* Archive format */
 
+	sqlparseInfo sqlparse;		/* state for parsing INSERT data */
+
 	time_t		createDate;		/* Date archive created */
 
 	/*
@@ -217,7 +240,7 @@ typedef struct _archiveHandle
 	PGconn	   *connection;
 	int			connectToDB;	/* Flag to indicate if direct DB connection is
 								 * required */
-	bool		writingCopyData;	/* True when we are sending COPY data */
+	ArchiverOutput outputKind;	/* Flag for what we're currently writing */
 	bool		pgCopyIn;		/* Currently in libpq 'COPY IN' mode. */
 
 	int			loFd;			/* BLOB fd */
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index bd1b8ef..62c8b33 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -365,14 +365,92 @@ ExecuteSqlCommand(ArchiveHandle *AH, const char *qry, const char *desc)
 
 
 /*
+ * Process non-COPY table data (that is, INSERT commands).
+ *
+ * The commands have been run together as one long string for compressibility,
+ * and we are receiving them in bufferloads with arbitrary boundaries, so we
+ * have to locate command boundaries and save partial commands across calls.
+ * All state must be kept in AH->sqlparse, not in local variables of this
+ * routine.  We assume that AH->sqlparse was filled with zeroes when created.
+ *
+ * We have to lex the data to the extent of identifying literals and quoted
+ * identifiers, so that we can recognize statement-terminating semicolons.
+ * We assume that INSERT data will not contain SQL comments, E'' literals,
+ * or dollar-quoted strings, so this is much simpler than a full SQL lexer.
+ */
+static void
+ExecuteInsertCommands(ArchiveHandle *AH, const char *buf, size_t bufLen)
+{
+	const char *qry = buf;
+	const char *eos = buf + bufLen;
+
+	/* initialize command buffer if first time through */
+	if (AH->sqlparse.curCmd == NULL)
+		AH->sqlparse.curCmd = createPQExpBuffer();
+
+	for (; qry < eos; qry++)
+	{
+		char	ch = *qry;
+
+		/* For neatness, we skip any newlines between commands */
+		if (!(ch == '\n' && AH->sqlparse.curCmd->len == 0))
+			appendPQExpBufferChar(AH->sqlparse.curCmd, ch);
+
+		switch (AH->sqlparse.state)
+		{
+			case SQL_SCAN:		/* Default state == 0, set in _allocAH */
+				if (ch == ';')
+				{
+					/*
+					 * We've found the end of a statement. Send it and reset
+					 * the buffer.
+					 */
+					ExecuteSqlCommand(AH, AH->sqlparse.curCmd->data,
+									  "could not execute query");
+					resetPQExpBuffer(AH->sqlparse.curCmd);
+				}
+				else if (ch == '\'')
+				{
+					AH->sqlparse.state = SQL_IN_SINGLE_QUOTE;
+					AH->sqlparse.backSlash = false;
+				}
+				else if (ch == '"')
+				{
+					AH->sqlparse.state = SQL_IN_DOUBLE_QUOTE;
+				}
+				break;
+
+			case SQL_IN_SINGLE_QUOTE:
+				/* We needn't handle '' specially */
+				if (ch == '\'' && !AH->sqlparse.backSlash)
+					AH->sqlparse.state = SQL_SCAN;
+				else if (ch == '\\' && !AH->public.std_strings)
+					AH->sqlparse.backSlash = !AH->sqlparse.backSlash;
+				else
+					AH->sqlparse.backSlash = false;
+				break;
+
+			case SQL_IN_DOUBLE_QUOTE:
+				/* We needn't handle "" specially */
+				if (ch == '"')
+					AH->sqlparse.state = SQL_SCAN;
+				break;
+		}
+	}
+}
+
+
+/*
  * Implement ahwrite() for direct-to-DB restore
  */
 int
 ExecuteSqlCommandBuf(ArchiveHandle *AH, const char *buf, size_t bufLen)
 {
-	if (AH->writingCopyData)
+	if (AH->outputKind == OUTPUT_COPYDATA)
 	{
 		/*
+		 * COPY data.
+		 *
 		 * We drop the data on the floor if libpq has failed to enter COPY
 		 * mode; this allows us to behave reasonably when trying to continue
 		 * after an error in a COPY command.
@@ -382,9 +460,19 @@ ExecuteSqlCommandBuf(ArchiveHandle *AH, const char *buf, size_t bufLen)
 			die_horribly(AH, modulename, "error returned by PQputCopyData: %s",
 						 PQerrorMessage(AH->connection));
 	}
+	else if (AH->outputKind == OUTPUT_OTHERDATA)
+	{
+		/*
+		 * Table data expressed as INSERT commands.
+		 */
+		ExecuteInsertCommands(AH, buf, bufLen);
+	}
 	else
 	{
 		/*
+		 * General SQL commands; we assume that commands will not be split
+		 * across calls.
+		 *
 		 * In most cases the data passed to us will be a null-terminated
 		 * string, but if it's not, we have to add a trailing null.
 		 */
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 8db4071..d1598ea 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -1399,6 +1399,14 @@ dumpTableData_copy(Archive *fout, void *dcontext)
 	return 1;
 }
 
+/*
+ * Dump table data using INSERT commands.
+ *
+ * Caution: when we restore from an archive file direct to database, the
+ * INSERT commands emitted by this function have to be parsed by
+ * pg_backup_db.c's ExecuteInsertCommands(), which will not handle comments,
+ * E'' strings, or dollar-quoted strings.  So don't emit anything like that.
+ */
 static int
 dumpTableData_insert(Archive *fout, void *dcontext)
 {

commit 7e4911b2ae33acff7b85234b91372133ec6df9d4
Author: Robert Haas <rhaas@postgresql.org>
Date:   Fri Jan 6 08:32:32 2012 -0500

    Fix variable confusion in BufferSync().
    
    As noted by Heikki Linnakangas, the previous coding confused the "flags"
    variable with the "mask" variable.  The affect of this appears to be that
    unlogged buffers would get written out at every checkpoint rather than
    only at shutdown time.  Although that's arguably an acceptable failure
    mode, I'm back-patching this change, since it seems like a poor idea to
    rely on this happening to work.

diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 91cc001..8f68bcc 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -1191,7 +1191,7 @@ BufferSync(int flags)
 	 * buffers.  But at shutdown time, we write all dirty buffers.
 	 */
 	if (!(flags & CHECKPOINT_IS_SHUTDOWN))
-		flags |= BM_PERMANENT;
+		mask |= BM_PERMANENT;
 
 	/*
 	 * Loop over all buffers, and mark the ones that need to be written with

commit bd0e74a9ce98c65c94565fb603dcc7b710cd4227
Author: Andrew Dunstan <andrew@dunslane.net>
Date:   Thu Jan 5 17:59:19 2012 -0500

    Fix breakage from earlier plperl fix.
    
    Apparently the perl garbage collector was a bit too eager, so here
    we control when the new SV is garbage collected.

diff --git a/src/pl/plperl/plperl_helpers.h b/src/pl/plperl/plperl_helpers.h
index c671820..800a408 100644
--- a/src/pl/plperl/plperl_helpers.h
+++ b/src/pl/plperl/plperl_helpers.h
@@ -45,25 +45,32 @@ utf_e2u(const char *str)
 static inline char *
 sv2cstr(SV *sv)
 {
-	char	   *val;
+	char	   *val, *res;
 	STRLEN		len;
+	SV         *nsv;
 
 	/*
 	 * get a utf8 encoded char * out of perl. *note* it may not be valid utf8!
 	 *
 	 * SvPVutf8() croaks nastily on certain things, like typeglobs and
-	 * readonly object such as $^V. That's a perl bug - it's not supposed to
-	 * happen. To avoid crashing the backend, we make a mortal copy of the
-	 * sv before passing it to SvPVutf8(). The copy will be garbage collected
-	 * very soon (see perldoc perlguts).
+	 * readonly objects such as $^V. That's a perl bug - it's not supposed to
+	 * happen. To avoid crashing the backend, we make a copy of the
+	 * sv before passing it to SvPVutf8(). The copy is garbage collected 
+	 * when we're done with it.
 	 */
-	val = SvPVutf8(sv_mortalcopy(sv), len);
+	nsv = newSVsv(sv);
+	val = SvPVutf8(nsv, len);
 
 	/*
-	 * we use perls length in the event we had an embedded null byte to ensure
+	 * we use perl's length in the event we had an embedded null byte to ensure
 	 * we error out properly
 	 */
-	return utf_u2e(val, len);
+	res =  utf_u2e(val, len);
+
+	/* safe now to garbage collect the new SV */
+	SvREFCNT_dec(nsv);
+
+	return res;
 }
 
 /*

commit 7e53515480853604aac825bd3e53e7f9716632b4
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Thu Jan 5 21:13:14 2012 +0200

    pg_dump: Dump foreign options in prettier format
    
    Dump them using line breaks and indentation instead of everything on
    one line.

diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index af45886..8db4071 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -5726,7 +5726,7 @@ getTableAttrs(TableInfo *tblinfo, int numTables)
 							  "SELECT pg_catalog.quote_ident(option_name) || "
 							  "' ' || pg_catalog.quote_literal(option_value) "
 							  "FROM pg_catalog.pg_options_to_table(attfdwoptions)"
-							  "), ', ') AS attfdwoptions "
+							  "), E',\n    ') AS attfdwoptions "
 			 "FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
 							  "ON a.atttypid = t.oid "
 							  "WHERE a.attrelid = '%u'::pg_catalog.oid "
@@ -6557,7 +6557,7 @@ getForeignDataWrappers(int *numForeignDataWrappers)
 						  "SELECT quote_ident(option_name) || ' ' || "
 						  "quote_literal(option_value) "
 						  "FROM pg_options_to_table(fdwoptions)"
-						  "), ', ') AS fdwoptions "
+						  "), E',\n    ') AS fdwoptions "
 						  "FROM pg_foreign_data_wrapper",
 						  username_subquery);
 	}
@@ -6571,7 +6571,7 @@ getForeignDataWrappers(int *numForeignDataWrappers)
 						  "SELECT quote_ident(option_name) || ' ' || "
 						  "quote_literal(option_value) "
 						  "FROM pg_options_to_table(fdwoptions)"
-						  "), ', ') AS fdwoptions "
+						  "), E',\n    ') AS fdwoptions "
 						  "FROM pg_foreign_data_wrapper",
 						  username_subquery);
 	}
@@ -6660,7 +6660,7 @@ getForeignServers(int *numForeignServers)
 					  "SELECT quote_ident(option_name) || ' ' || "
 					  "quote_literal(option_value) "
 					  "FROM pg_options_to_table(srvoptions)"
-					  "), ', ') AS srvoptions "
+					  "), E',\n    ') AS srvoptions "
 					  "FROM pg_foreign_server",
 					  username_subquery);
 
@@ -11575,7 +11575,7 @@ dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo)
 		appendPQExpBuffer(q, " VALIDATOR %s", fdwinfo->fdwvalidator);
 
 	if (strlen(fdwinfo->fdwoptions) > 0)
-		appendPQExpBuffer(q, " OPTIONS (%s)", fdwinfo->fdwoptions);
+		appendPQExpBuffer(q, " OPTIONS (\n    %s\n)", fdwinfo->fdwoptions);
 
 	appendPQExpBuffer(q, ";\n");
 
@@ -11679,7 +11679,7 @@ dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo)
 	appendPQExpBuffer(q, "%s", fmtId(fdwname));
 
 	if (srvinfo->srvoptions && strlen(srvinfo->srvoptions) > 0)
-		appendPQExpBuffer(q, " OPTIONS (%s)", srvinfo->srvoptions);
+		appendPQExpBuffer(q, " OPTIONS (\n    %s\n)", srvinfo->srvoptions);
 
 	appendPQExpBuffer(q, ";\n");
 
@@ -11770,7 +11770,7 @@ dumpUserMappings(Archive *fout,
 					  "SELECT quote_ident(option_name) || ' ' || "
 					  "quote_literal(option_value) "
 					  "FROM pg_options_to_table(umoptions)"
-					  "), ', ') AS umoptions "
+					  "), E',\n    ') AS umoptions "
 					  "FROM pg_user_mappings "
 					  "WHERE srvid = '%u' "
 					  "ORDER BY usename",
@@ -11796,7 +11796,7 @@ dumpUserMappings(Archive *fout,
 		appendPQExpBuffer(q, " SERVER %s", fmtId(servername));
 
 		if (umoptions && strlen(umoptions) > 0)
-			appendPQExpBuffer(q, " OPTIONS (%s)", umoptions);
+			appendPQExpBuffer(q, " OPTIONS (\n    %s\n)", umoptions);
 
 		appendPQExpBuffer(q, ";\n");
 
@@ -12431,7 +12431,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 							  "SELECT pg_catalog.quote_ident(option_name) || "
 							  "' ' || pg_catalog.quote_literal(option_value) "
 							  "FROM pg_catalog.pg_options_to_table(ftoptions)"
-							  "), ', ') AS ftoptions "
+							  "), E',\n    ') AS ftoptions "
 							  "FROM pg_catalog.pg_foreign_table ft "
 							  "JOIN pg_catalog.pg_foreign_server fs "
 							  "ON (fs.oid = ft.ftserver) "
@@ -12660,7 +12660,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 
 		/* Dump generic options if any */
 		if (ftoptions && ftoptions[0])
-			appendPQExpBuffer(q, "\nOPTIONS (%s)", ftoptions);
+			appendPQExpBuffer(q, "\nOPTIONS (\n    %s\n)", ftoptions);
 
 		appendPQExpBuffer(q, ";\n");
 
@@ -12860,7 +12860,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 								  fmtId(tbinfo->dobj.name));
 				appendPQExpBuffer(q, "ALTER COLUMN %s ",
 								  fmtId(tbinfo->attnames[j]));
-				appendPQExpBuffer(q, "OPTIONS (%s);\n",
+				appendPQExpBuffer(q, "OPTIONS (\n    %s\n);\n",
 								  tbinfo->attfdwoptions[j]);
 			}
 		}

commit 15df037845d9d3d99e9e2b4370256b136b00c66a
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Thu Jan 5 20:34:07 2012 +0200

    pg_dump: Dump operators with the same name ordered by arity
    
    pg_dump sorts operators by name, but operators with the same name come
    out in random order.  Now operators with the same name are dumped in
    the order prefix, postfix, infix.  (This is consistent with functions,
    which are dumped in increasing number of argument order.)

diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 1985c7d..af45886 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -3138,6 +3138,7 @@ getOperators(int *numOprs)
 	int			i_oprname;
 	int			i_oprnamespace;
 	int			i_rolname;
+	int			i_oprkind;
 	int			i_oprcode;
 
 	/*
@@ -3153,6 +3154,7 @@ getOperators(int *numOprs)
 		appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
 						  "oprnamespace, "
 						  "(%s oprowner) AS rolname, "
+						  "oprkind, "
 						  "oprcode::oid AS oprcode "
 						  "FROM pg_operator",
 						  username_subquery);
@@ -3162,6 +3164,7 @@ getOperators(int *numOprs)
 		appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
 						  "0::oid AS oprnamespace, "
 						  "(%s oprowner) AS rolname, "
+						  "oprkind, "
 						  "oprcode::oid AS oprcode "
 						  "FROM pg_operator",
 						  username_subquery);
@@ -3173,6 +3176,7 @@ getOperators(int *numOprs)
 						  "oid, oprname, "
 						  "0::oid AS oprnamespace, "
 						  "(%s oprowner) AS rolname, "
+						  "oprkind, "
 						  "oprcode::oid AS oprcode "
 						  "FROM pg_operator",
 						  username_subquery);
@@ -3191,6 +3195,7 @@ getOperators(int *numOprs)
 	i_oprname = PQfnumber(res, "oprname");
 	i_oprnamespace = PQfnumber(res, "oprnamespace");
 	i_rolname = PQfnumber(res, "rolname");
+	i_oprkind = PQfnumber(res, "oprkind");
 	i_oprcode = PQfnumber(res, "oprcode");
 
 	for (i = 0; i < ntups; i++)
@@ -3203,6 +3208,7 @@ getOperators(int *numOprs)
 		oprinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)),
 												  oprinfo[i].dobj.catId.oid);
 		oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
+		oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
 		oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
 
 		/* Decide whether we want to dump it */
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 913d102..11c4d37 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -204,6 +204,7 @@ typedef struct _oprInfo
 {
 	DumpableObject dobj;
 	char	   *rolname;
+	char		oprkind;
 	Oid			oprcode;
 } OprInfo;
 
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index c605a3b..4d1ae94 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -178,6 +178,16 @@ DOTypeNameCompare(const void *p1, const void *p2)
 		if (cmpval != 0)
 			return cmpval;
 	}
+	else if (obj1->objType == DO_OPERATOR)
+	{
+		OprInfo	*oobj1 = *(OprInfo * const *) p1;
+		OprInfo *oobj2 = *(OprInfo * const *) p2;
+
+		/* oprkind is 'l', 'r', or 'b'; this sorts prefix, postfix, infix */
+		cmpval = (oobj2->oprkind - oobj1->oprkind);
+		if (cmpval != 0)
+			return cmpval;
+	}
 
 	/* Usually shouldn't get here, but if we do, sort by OID */
 	return oidcmp(obj1->catId.oid, obj2->catId.oid);

commit 104e7dac28c56dcaf9b778dff60a5daefc3a0661
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Thu Jan 5 19:48:55 2012 +0200

    Improve ALTER DOMAIN / DROP CONSTRAINT with nonexistent constraint
    
    ALTER DOMAIN / DROP CONSTRAINT on a nonexistent constraint name did
    not report any error.  Now it reports an error.  The IF EXISTS option
    was added to get the usual behavior of ignoring nonexistent objects to
    drop.

diff --git a/doc/src/sgml/ref/alter_domain.sgml b/doc/src/sgml/ref/alter_domain.sgml
index 29504cc..2511a12 100644
--- a/doc/src/sgml/ref/alter_domain.sgml
+++ b/doc/src/sgml/ref/alter_domain.sgml
@@ -30,7 +30,7 @@ ALTER DOMAIN <replaceable class="PARAMETER">name</replaceable>
 ALTER DOMAIN <replaceable class="PARAMETER">name</replaceable>
     ADD <replaceable class="PARAMETER">domain_constraint</replaceable> [ NOT VALID ]
 ALTER DOMAIN <replaceable class="PARAMETER">name</replaceable>
-    DROP CONSTRAINT <replaceable class="PARAMETER">constraint_name</replaceable> [ RESTRICT | CASCADE ]
+    DROP CONSTRAINT [ IF EXISTS ] <replaceable class="PARAMETER">constraint_name</replaceable> [ RESTRICT | CASCADE ]
 ALTER DOMAIN <replaceable class="PARAMETER">name</replaceable>
     VALIDATE CONSTRAINT <replaceable class="PARAMETER">constraint_name</replaceable>
 ALTER DOMAIN <replaceable class="PARAMETER">name</replaceable>
@@ -92,10 +92,12 @@ ALTER DOMAIN <replaceable class="PARAMETER">name</replaceable>
    </varlistentry>
 
    <varlistentry>
-    <term>DROP CONSTRAINT</term>
+    <term>DROP CONSTRAINT [ IF EXISTS ]</term>
     <listitem>
      <para>
       This form drops constraints on a domain.
+      If <literal>IF EXISTS</literal> is specified and the constraint
+      does not exist, no error is thrown. In this case a notice is issued instead.
      </para>
     </listitem>
    </varlistentry>
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 4bbbaf3..0f8af31 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -2264,7 +2264,7 @@ AlterDomainNotNull(List *names, bool notNull)
  */
 void
 AlterDomainDropConstraint(List *names, const char *constrName,
-						  DropBehavior behavior)
+						  DropBehavior behavior, bool missing_ok)
 {
 	TypeName   *typename;
 	Oid			domainoid;
@@ -2274,6 +2274,7 @@ AlterDomainDropConstraint(List *names, const char *constrName,
 	SysScanDesc conscan;
 	ScanKeyData key[1];
 	HeapTuple	contup;
+	bool		found = false;
 
 	/* Make a TypeName so we can use standard type lookup machinery */
 	typename = makeTypeNameFromNameList(names);
@@ -2317,6 +2318,7 @@ AlterDomainDropConstraint(List *names, const char *constrName,
 			conobj.objectSubId = 0;
 
 			performDeletion(&conobj, behavior);
+			found = true;
 		}
 	}
 	/* Clean up after the scan */
@@ -2324,6 +2326,19 @@ AlterDomainDropConstraint(List *names, const char *constrName,
 	heap_close(conrel, RowExclusiveLock);
 
 	heap_close(rel, NoLock);
+
+	if (!found)
+	{
+		if (!missing_ok)
+			ereport(ERROR,
+					(errcode(ERRCODE_UNDEFINED_OBJECT),
+					 errmsg("constraint \"%s\" of domain \"%s\" does not exist",
+					   constrName, TypeNameToString(typename))));
+		else
+			ereport(NOTICE,
+					(errmsg("constraint \"%s\" of domain \"%s\" does not exist, skipping",
+							constrName, TypeNameToString(typename))));
+	}
 }
 
 /*
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index c9d3e2e..756e3a6 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -2568,6 +2568,7 @@ _copyAlterDomainStmt(const AlterDomainStmt *from)
 	COPY_STRING_FIELD(name);
 	COPY_NODE_FIELD(def);
 	COPY_SCALAR_FIELD(behavior);
+	COPY_SCALAR_FIELD(missing_ok);
 
 	return newnode;
 }
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index efe2c96..9eff42f 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -1034,6 +1034,7 @@ _equalAlterDomainStmt(const AlterDomainStmt *a, const AlterDomainStmt *b)
 	COMPARE_STRING_FIELD(name);
 	COMPARE_NODE_FIELD(def);
 	COMPARE_SCALAR_FIELD(behavior);
+	COMPARE_SCALAR_FIELD(missing_ok);
 
 	return true;
 }
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 29df0c1..87d7305 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -7616,6 +7616,18 @@ AlterDomainStmt:
 					n->typeName = $3;
 					n->name = $6;
 					n->behavior = $7;
+					n->missing_ok = false;
+					$$ = (Node *)n;
+				}
+			/* ALTER DOMAIN <domain> DROP CONSTRAINT IF EXISTS <name> [RESTRICT|CASCADE] */
+			| ALTER DOMAIN_P any_name DROP CONSTRAINT IF_P EXISTS name opt_drop_behavior
+				{
+					AlterDomainStmt *n = makeNode(AlterDomainStmt);
+					n->subtype = 'X';
+					n->typeName = $3;
+					n->name = $8;
+					n->behavior = $9;
+					n->missing_ok = true;
 					$$ = (Node *)n;
 				}
 			/* ALTER DOMAIN <domain> VALIDATE CONSTRAINT <name> */
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 923da6a..704bbe9 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -768,7 +768,8 @@ standard_ProcessUtility(Node *parsetree,
 					case 'X':	/* DROP CONSTRAINT */
 						AlterDomainDropConstraint(stmt->typeName,
 												  stmt->name,
-												  stmt->behavior);
+												  stmt->behavior,
+												  stmt->missing_ok);
 						break;
 					case 'V':	/* VALIDATE CONSTRAINT */
 						AlterDomainValidateConstraint(stmt->typeName,
diff --git a/src/include/commands/typecmds.h b/src/include/commands/typecmds.h
index 11d05b8..3748bd5 100644
--- a/src/include/commands/typecmds.h
+++ b/src/include/commands/typecmds.h
@@ -33,7 +33,7 @@ extern void AlterDomainNotNull(List *names, bool notNull);
 extern void AlterDomainAddConstraint(List *names, Node *constr);
 extern void AlterDomainValidateConstraint(List *names, char *constrName);
 extern void AlterDomainDropConstraint(List *names, const char *constrName,
-						  DropBehavior behavior);
+									  DropBehavior behavior, bool missing_ok);
 
 extern List *GetDomainConstraints(Oid typeOid);
 
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index bee0e18..0be3fb1 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -1264,6 +1264,7 @@ typedef struct AlterDomainStmt
 	char	   *name;			/* column or constraint name to act on */
 	Node	   *def;			/* definition of default or constraint */
 	DropBehavior behavior;		/* RESTRICT or CASCADE for DROP cases */
+	bool		missing_ok;		/* skip error if missing? */
 } AlterDomainStmt;
 
 
diff --git a/src/test/regress/expected/domain.out b/src/test/regress/expected/domain.out
index 3e44e3b..4f47374 100644
--- a/src/test/regress/expected/domain.out
+++ b/src/test/regress/expected/domain.out
@@ -358,6 +358,10 @@ alter domain con drop constraint t;
 insert into domcontest values (-5); --fails
 ERROR:  value for domain con violates check constraint "con_check"
 insert into domcontest values (42);
+alter domain con drop constraint nonexistent;
+ERROR:  constraint "nonexistent" of domain "con" does not exist
+alter domain con drop constraint if exists nonexistent;
+NOTICE:  constraint "nonexistent" of domain "con" does not exist, skipping
 -- Test ALTER DOMAIN .. CONSTRAINT .. NOT VALID
 create domain things AS INT;
 CREATE TABLE thethings (stuff things);
diff --git a/src/test/regress/sql/domain.sql b/src/test/regress/sql/domain.sql
index 1fd3900..ad049b7 100644
--- a/src/test/regress/sql/domain.sql
+++ b/src/test/regress/sql/domain.sql
@@ -259,6 +259,9 @@ alter domain con drop constraint t;
 insert into domcontest values (-5); --fails
 insert into domcontest values (42);
 
+alter domain con drop constraint nonexistent;
+alter domain con drop constraint if exists nonexistent;
+
 -- Test ALTER DOMAIN .. CONSTRAINT .. NOT VALID
 create domain things AS INT;
 CREATE TABLE thethings (stuff things);

commit 2abefd9a92f3c02ad4f6030ac1578bbf314db368
Author: Andrew Dunstan <andrew@dunslane.net>
Date:   Thu Jan 5 12:01:18 2012 -0500

    Work around perl bug in SvPVutf8().
    
    Certain things like typeglobs or readonly things like $^V cause
    perl's SvPVutf8() to die nastily and crash the backend. To avoid
    that bug we make a copy of the object, which will subsequently be
    garbage collected.
    
    Back patched to 9.1 where we first started using SvPVutf8().
    
    Per -hackers discussion. Original problem reported by David Wheeler.

diff --git a/src/pl/plperl/plperl_helpers.h b/src/pl/plperl/plperl_helpers.h
index ac0a97d..c671820 100644
--- a/src/pl/plperl/plperl_helpers.h
+++ b/src/pl/plperl/plperl_helpers.h
@@ -50,8 +50,14 @@ sv2cstr(SV *sv)
 
 	/*
 	 * get a utf8 encoded char * out of perl. *note* it may not be valid utf8!
+	 *
+	 * SvPVutf8() croaks nastily on certain things, like typeglobs and
+	 * readonly object such as $^V. That's a perl bug - it's not supposed to
+	 * happen. To avoid crashing the backend, we make a mortal copy of the
+	 * sv before passing it to SvPVutf8(). The copy will be garbage collected
+	 * very soon (see perldoc perlguts).
 	 */
-	val = SvPVutf8(sv, len);
+	val = SvPVutf8(sv_mortalcopy(sv), len);
 
 	/*
 	 * we use perls length in the event we had an embedded null byte to ensure

commit 8cf82ac53e9a3d5dd86f16106e3398063a526817
Author: Michael Meskes <meskes@postgresql.org>
Date:   Thu Jan 5 14:08:45 2012 +0100

    Ecpglib stores variables that are used in DECLARE statements in a global list.
    This list is now freed when the last connection has been closed.
    
    Closes: #6366

diff --git a/src/interfaces/ecpg/ecpglib/connect.c b/src/interfaces/ecpg/ecpglib/connect.c
index 997046b..909ba70 100644
--- a/src/interfaces/ecpg/ecpglib/connect.c
+++ b/src/interfaces/ecpg/ecpglib/connect.c
@@ -149,6 +149,13 @@ ecpg_finish(struct connection * act)
 		for (cache = act->cache_head; cache; ptr = cache, cache = cache->next, ecpg_free(ptr));
 		ecpg_free(act->name);
 		ecpg_free(act);
+		/* delete cursor variables when last connection gets closed */
+		if (all_connections == NULL)
+		{
+			struct var_list *iv_ptr;
+
+			for (; ivlist; iv_ptr = ivlist, ivlist = ivlist->next, ecpg_free(iv_ptr));
+		}
 	}
 	else
 		ecpg_log("ecpg_finish: called an extra time\n");
diff --git a/src/interfaces/ecpg/ecpglib/extern.h b/src/interfaces/ecpg/ecpglib/extern.h
index 96d49a4..bd1ffb0 100644
--- a/src/interfaces/ecpg/ecpglib/extern.h
+++ b/src/interfaces/ecpg/ecpglib/extern.h
@@ -121,6 +121,15 @@ struct variable
 	struct variable *next;
 };
 
+struct var_list
+{
+	int	number;
+	void   *pointer;
+	struct var_list *next;
+};
+
+extern struct var_list *ivlist;
+
 /* Here are some methods used by the lib. */
 
 /* Returns a pointer to a string containing a simple type name. */
diff --git a/src/interfaces/ecpg/ecpglib/misc.c b/src/interfaces/ecpg/ecpglib/misc.c
index 98e0597..f244782 100644
--- a/src/interfaces/ecpg/ecpglib/misc.c
+++ b/src/interfaces/ecpg/ecpglib/misc.c
@@ -501,12 +501,7 @@ ecpg_gettext(const char *msgid)
 }
 #endif   /* ENABLE_NLS */
 
-static struct var_list
-{
-	int			number;
-	void	   *pointer;
-	struct var_list *next;
-}	*ivlist = NULL;
+struct var_list *ivlist = NULL;
 
 void
 ECPGset_var(int number, void *pointer, int lineno)

commit dfd26f9c5f371437f243249025863ea9911aacaa
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Wed Jan 4 18:30:55 2012 -0500

    Make executor's SELECT INTO code save and restore original tuple receiver.
    
    As previously coded, the QueryDesc's dest pointer was left dangling
    (pointing at an already-freed receiver object) after ExecutorEnd.  It's a
    bit astonishing that it took us this long to notice, and I'm not sure that
    the known problem case with SQL functions is the only one.  Fix it by
    saving and restoring the original receiver pointer, which seems the most
    bulletproof way of ensuring any related bugs are also covered.
    
    Per bug #6379 from Paul Ramsey.  Back-patch to 8.4 where the current
    handling of SELECT INTO was introduced.

diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 54df18d..569d0ba 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -2445,6 +2445,7 @@ typedef struct
 {
 	DestReceiver pub;			/* publicly-known function pointers */
 	EState	   *estate;			/* EState we are working with */
+	DestReceiver *origdest;		/* QueryDesc's original receiver */
 	Relation	rel;			/* Relation to write to */
 	int			hi_options;		/* heap_insert performance options */
 	BulkInsertState bistate;	/* bulk insert state */
@@ -2651,12 +2652,14 @@ OpenIntoRel(QueryDesc *queryDesc)
 	/*
 	 * Now replace the query's DestReceiver with one for SELECT INTO
 	 */
-	queryDesc->dest = CreateDestReceiver(DestIntoRel);
-	myState = (DR_intorel *) queryDesc->dest;
+	myState = (DR_intorel *) CreateDestReceiver(DestIntoRel);
 	Assert(myState->pub.mydest == DestIntoRel);
 	myState->estate = estate;
+	myState->origdest = queryDesc->dest;
 	myState->rel = intoRelationDesc;
 
+	queryDesc->dest = (DestReceiver *) myState;
+
 	/*
 	 * We can skip WAL-logging the insertions, unless PITR or streaming
 	 * replication is in use. We can skip the FSM in any case.
@@ -2677,8 +2680,11 @@ CloseIntoRel(QueryDesc *queryDesc)
 {
 	DR_intorel *myState = (DR_intorel *) queryDesc->dest;
 
-	/* OpenIntoRel might never have gotten called */
-	if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
+	/*
+	 * OpenIntoRel might never have gotten called, and we also want to guard
+	 * against double destruction.
+	 */
+	if (myState && myState->pub.mydest == DestIntoRel)
 	{
 		FreeBulkInsertState(myState->bistate);
 
@@ -2689,7 +2695,11 @@ CloseIntoRel(QueryDesc *queryDesc)
 		/* close rel, but keep lock until commit */
 		heap_close(myState->rel, NoLock);
 
-		myState->rel = NULL;
+		/* restore the receiver belonging to executor's caller */
+		queryDesc->dest = myState->origdest;
+
+		/* might as well invoke my destructor */
+		intorel_destroy((DestReceiver *) myState);
 	}
 }
 
diff --git a/src/test/regress/expected/select_into.out b/src/test/regress/expected/select_into.out
index 9ed4229..c8327f6 100644
--- a/src/test/regress/expected/select_into.out
+++ b/src/test/regress/expected/select_into.out
@@ -50,3 +50,28 @@ DETAIL:  drop cascades to table selinto_schema.tmp1
 drop cascades to table selinto_schema.tmp2
 drop cascades to table selinto_schema.tmp3
 DROP USER selinto_user;
+--
+-- CREATE TABLE AS/SELECT INTO as last command in a SQL function
+-- have been known to cause problems
+--
+CREATE FUNCTION make_table() RETURNS VOID
+AS $$
+  CREATE TABLE created_table AS SELECT * FROM int8_tbl;
+$$ LANGUAGE SQL;
+SELECT make_table();
+ make_table 
+------------
+ 
+(1 row)
+
+SELECT * FROM created_table;
+        q1        |        q2         
+------------------+-------------------
+              123 |               456
+              123 |  4567890123456789
+ 4567890123456789 |               123
+ 4567890123456789 |  4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+DROP TABLE created_table;
diff --git a/src/test/regress/sql/select_into.sql b/src/test/regress/sql/select_into.sql
index 039d35c..09d210b 100644
--- a/src/test/regress/sql/select_into.sql
+++ b/src/test/regress/sql/select_into.sql
@@ -52,3 +52,18 @@ RESET SESSION AUTHORIZATION;
 
 DROP SCHEMA selinto_schema CASCADE;
 DROP USER selinto_user;
+
+--
+-- CREATE TABLE AS/SELECT INTO as last command in a SQL function
+-- have been known to cause problems
+--
+CREATE FUNCTION make_table() RETURNS VOID
+AS $$
+  CREATE TABLE created_table AS SELECT * FROM int8_tbl;
+$$ LANGUAGE SQL;
+
+SELECT make_table();
+
+SELECT * FROM created_table;
+
+DROP TABLE created_table;

commit 10ecc0d5867b8dd39cf506b8bb02053ede05fb60
Author: Michael Meskes <meskes@postgresql.org>
Date:   Wed Jan 4 10:01:14 2012 +0100

    Made code in ecpg better readable.

diff --git a/src/interfaces/ecpg/preproc/ecpg.header b/src/interfaces/ecpg/preproc/ecpg.header
index 94c45c8..88d9cf5 100644
--- a/src/interfaces/ecpg/preproc/ecpg.header
+++ b/src/interfaces/ecpg/preproc/ecpg.header
@@ -224,16 +224,16 @@ adjust_outofscope_cursor_vars(struct cursor *cur)
 {
 	/* Informix accepts DECLARE with variables that are out of scope when OPEN is called.
 	 * For instance you can DECLARE a cursor in one function, and OPEN/FETCH/CLOSE
-	 * it in other functions. This is very useful for e.g. event-driver programming,
+	 * it in another functions. This is very useful for e.g. event-driver programming,
 	 * but may also lead to dangerous programming. The limitation when this is allowed
-	 * and doesn's cause problems have to be documented, like the allocated variables
+	 * and doesn't cause problems have to be documented, like the allocated variables
 	 * must not be realloc()'ed.
 	 *
 	 * We have to change the variables to our own struct and just store the pointer
 	 * instead of the variable. Do it only for local variables, not for globals.
 	 */
 
-	char *result = mm_strdup("");
+	char *result = EMPTY;
 	int insert;
 
 	for (insert = 1; insert >= 0; insert--)
@@ -247,13 +247,14 @@ adjust_outofscope_cursor_vars(struct cursor *cur)
 
 		for (ptr = list; ptr != NULL; ptr = ptr->next)
 		{
-			char temp[20];
+			char var_text[20];
 			char *original_var;
 			bool skip_set_var = false;
+			bool var_ptr = false;
 
 			/* change variable name to "ECPGget_var(<counter>)" */
 			original_var = ptr->variable->name;
-			sprintf(temp, "%d))", ecpg_internal_var);
+			sprintf(var_text, "%d))", ecpg_internal_var);
 
 			/* Don't emit ECPGset_var() calls for global variables */
 			if (ptr->variable->brace_level == 0)
@@ -276,13 +277,12 @@ adjust_outofscope_cursor_vars(struct cursor *cur)
 				newvar = new_variable(cat_str(4, mm_strdup("("),
 											  mm_strdup(ecpg_type_name(ptr->variable->type->u.element->type)),
 											  mm_strdup(" *)(ECPGget_var("),
-											  mm_strdup(temp)),
+											  mm_strdup(var_text)),
 									  ECPGmake_array_type(ECPGmake_simple_type(ptr->variable->type->u.element->type,
 																			   mm_strdup("1"),
 																			   ptr->variable->type->u.element->counter),
 														  ptr->variable->type->size),
 									  0);
-				sprintf(temp, "%d, (", ecpg_internal_var++);
 			}
 			else if ((ptr->variable->type->type == ECPGt_varchar
 					  || ptr->variable->type->type == ECPGt_char
@@ -293,59 +293,57 @@ adjust_outofscope_cursor_vars(struct cursor *cur)
 				newvar = new_variable(cat_str(4, mm_strdup("("),
 											  mm_strdup(ecpg_type_name(ptr->variable->type->type)),
 											  mm_strdup(" *)(ECPGget_var("),
-											  mm_strdup(temp)),
+											  mm_strdup(var_text)),
 									  ECPGmake_simple_type(ptr->variable->type->type,
 														   ptr->variable->type->size,
 														   ptr->variable->type->counter),
 									  0);
 				if (ptr->variable->type->type == ECPGt_varchar)
-					sprintf(temp, "%d, &(", ecpg_internal_var++);
-				else
-					sprintf(temp, "%d, (", ecpg_internal_var++);
+					var_ptr = true;
 			}
 			else if (ptr->variable->type->type == ECPGt_struct
 					 || ptr->variable->type->type == ECPGt_union)
 			{
-				sprintf(temp, "%d)))", ecpg_internal_var);
-				newvar = new_variable(cat_str(4, mm_strdup("(*("),
+				newvar = new_variable(cat_str(5, mm_strdup("(*("),
 											  mm_strdup(ptr->variable->type->type_name),
 											  mm_strdup(" *)(ECPGget_var("),
-											  mm_strdup(temp)),
+											  mm_strdup(var_text),
+											  mm_strdup(")")),
 									  ECPGmake_struct_type(ptr->variable->type->u.members,
 														   ptr->variable->type->type,
 														   ptr->variable->type->type_name,
 														   ptr->variable->type->struct_sizeof),
 									  0);
-				sprintf(temp, "%d, &(", ecpg_internal_var++);
+				var_ptr = true;
 			}
 			else if (ptr->variable->type->type == ECPGt_array)
 			{
 				if (ptr->variable->type->u.element->type == ECPGt_struct
 					|| ptr->variable->type->u.element->type == ECPGt_union)
 				{
-					sprintf(temp, "%d)))", ecpg_internal_var);
-					newvar = new_variable(cat_str(4, mm_strdup("(*("),
-												  mm_strdup(ptr->variable->type->u.element->type_name),
-												  mm_strdup(" *)(ECPGget_var("), mm_strdup(temp)),
+					newvar = new_variable(cat_str(5, mm_strdup("(*("),
+											  mm_strdup(ptr->variable->type->u.element->type_name),
+											  mm_strdup(" *)(ECPGget_var("),
+											  mm_strdup(var_text),
+											  mm_strdup(")")),
 										  ECPGmake_struct_type(ptr->variable->type->u.element->u.members,
 															   ptr->variable->type->u.element->type,
 															   ptr->variable->type->u.element->type_name,
 															   ptr->variable->type->u.element->struct_sizeof),
 										  0);
-					sprintf(temp, "%d, (", ecpg_internal_var++);
 				}
 				else
 				{
 					newvar = new_variable(cat_str(4, mm_strdup("("),
 												  mm_strdup(ecpg_type_name(ptr->variable->type->type)),
 												  mm_strdup(" *)(ECPGget_var("),
-												  mm_strdup(temp)),
+												  mm_strdup(var_text)),
 										  ECPGmake_array_type(ECPGmake_simple_type(ptr->variable->type->u.element->type,
 																				   ptr->variable->type->u.element->size,
 																				   ptr->variable->type->u.element->counter),
 															  ptr->variable->type->size),
 										  0);
-					sprintf(temp, "%d, &(", ecpg_internal_var++);
+					var_ptr = true;
 				}
 			}
 			else
@@ -353,19 +351,22 @@ adjust_outofscope_cursor_vars(struct cursor *cur)
 				newvar = new_variable(cat_str(4, mm_strdup("*("),
 											  mm_strdup(ecpg_type_name(ptr->variable->type->type)),
 											  mm_strdup(" *)(ECPGget_var("),
-											  mm_strdup(temp)),
+											  mm_strdup(var_text)),
 									  ECPGmake_simple_type(ptr->variable->type->type,
 														   ptr->variable->type->size,
 														   ptr->variable->type->counter),
 									  0);
-				sprintf(temp, "%d, &(", ecpg_internal_var++);
+				var_ptr = true;
 			}
 
-			/* create call to "ECPGset_var(<counter>, <pointer>. <line number>)" */
+			/* create call to "ECPGset_var(<counter>, <connection>, <pointer>. <line number>)" */
 			if (!skip_set_var)
+			{
+				sprintf(var_text, "%d, %s", ecpg_internal_var++, var_ptr ? "&(" : "(");
 				result = cat_str(5, result, mm_strdup("ECPGset_var("),
-								 mm_strdup(temp), mm_strdup(original_var),
+								 mm_strdup(var_text), mm_strdup(original_var),
 								 mm_strdup("), __LINE__);\n"));
+			}
 
 			/* now the indicator if there is one and it's not a global variable */
 			if ((ptr->indicator->type->type == ECPGt_NO_INDICATOR) || (ptr->indicator->brace_level == 0))
@@ -376,50 +377,51 @@ adjust_outofscope_cursor_vars(struct cursor *cur)
 			{
 				/* change variable name to "ECPGget_var(<counter>)" */
 				original_var = ptr->indicator->name;
-				sprintf(temp, "%d))", ecpg_internal_var);
+				sprintf(var_text, "%d))", ecpg_internal_var);
+				var_ptr = false;
 
 				if (ptr->indicator->type->type == ECPGt_struct
 					|| ptr->indicator->type->type == ECPGt_union)
 				{
-					sprintf(temp, "%d)))", ecpg_internal_var);
-					newind = new_variable(cat_str(4, mm_strdup("(*("),
-												  mm_strdup(ptr->indicator->type->type_name),
-												  mm_strdup(" *)(ECPGget_var("),
-												  mm_strdup(temp)),
+					newind = new_variable(cat_str(5, mm_strdup("(*("),
+											  mm_strdup(ptr->indicator->type->type_name),
+											  mm_strdup(" *)(ECPGget_var("),
+											  mm_strdup(var_text),
+											  mm_strdup(")")),
 										  ECPGmake_struct_type(ptr->indicator->type->u.members,
 															   ptr->indicator->type->type,
 															   ptr->indicator->type->type_name,
 															   ptr->indicator->type->struct_sizeof),
 										  0);
-					sprintf(temp, "%d, &(", ecpg_internal_var++);
+					var_ptr = true;
 				}
 				else if (ptr->indicator->type->type == ECPGt_array)
 				{
 					if (ptr->indicator->type->u.element->type == ECPGt_struct
 						|| ptr->indicator->type->u.element->type == ECPGt_union)
 					{
-						sprintf(temp, "%d)))", ecpg_internal_var);
-						newind = new_variable(cat_str(4, mm_strdup("(*("),
-													  mm_strdup(ptr->indicator->type->u.element->type_name),
-													  mm_strdup(" *)(ECPGget_var("), mm_strdup(temp)),
+						newind = new_variable(cat_str(5, mm_strdup("(*("),
+											  mm_strdup(ptr->indicator->type->u.element->type_name),
+											  mm_strdup(" *)(ECPGget_var("),
+											  mm_strdup(var_text),
+											  mm_strdup(")")),
 											  ECPGmake_struct_type(ptr->indicator->type->u.element->u.members,
 																   ptr->indicator->type->u.element->type,
 																   ptr->indicator->type->u.element->type_name,
 																   ptr->indicator->type->u.element->struct_sizeof),
 											  0);
-						sprintf(temp, "%d, (", ecpg_internal_var++);
 					}
 					else
 					{
 						newind = new_variable(cat_str(4, mm_strdup("("),
 													  mm_strdup(ecpg_type_name(ptr->indicator->type->u.element->type)),
-													  mm_strdup(" *)(ECPGget_var("), mm_strdup(temp)),
+													  mm_strdup(" *)(ECPGget_var("), mm_strdup(var_text)),
 											  ECPGmake_array_type(ECPGmake_simple_type(ptr->indicator->type->u.element->type,
 																					   ptr->indicator->type->u.element->size,
 																					   ptr->indicator->type->u.element->counter),
 																  ptr->indicator->type->size),
 											  0);
-						sprintf(temp, "%d, &(", ecpg_internal_var++);
+						var_ptr = true;
 					}
 				}
 				else if (atoi(ptr->indicator->type->size) > 1)
@@ -427,29 +429,29 @@ adjust_outofscope_cursor_vars(struct cursor *cur)
 					newind = new_variable(cat_str(4, mm_strdup("("),
 												  mm_strdup(ecpg_type_name(ptr->indicator->type->type)),
 												  mm_strdup(" *)(ECPGget_var("),
-												  mm_strdup(temp)),
+												  mm_strdup(var_text)),
 										  ECPGmake_simple_type(ptr->indicator->type->type,
 															   ptr->indicator->type->size,
 															   ptr->variable->type->counter),
 										  0);
-					sprintf(temp, "%d, (", ecpg_internal_var++);
 				}
 				else
 				{
 					newind = new_variable(cat_str(4, mm_strdup("*("),
 												  mm_strdup(ecpg_type_name(ptr->indicator->type->type)),
 												  mm_strdup(" *)(ECPGget_var("),
-												  mm_strdup(temp)),
+												  mm_strdup(var_text)),
 										  ECPGmake_simple_type(ptr->indicator->type->type,
 															   ptr->indicator->type->size,
 															   ptr->variable->type->counter),
 										  0);
-					sprintf(temp, "%d, &(", ecpg_internal_var++);
+					var_ptr = true;
 				}
 
 				/* create call to "ECPGset_var(<counter>, <pointer>. <line number>)" */
+				sprintf(var_text, "%d, %s", ecpg_internal_var++, var_ptr ? "&(" : "(");
 				result = cat_str(5, result, mm_strdup("ECPGset_var("),
-								 mm_strdup(temp), mm_strdup(original_var),
+								 mm_strdup(var_text), mm_strdup(original_var),
 								 mm_strdup("), __LINE__);\n"));
 			}
 
diff --git a/src/interfaces/ecpg/test/expected/preproc-outofscope.c b/src/interfaces/ecpg/test/expected/preproc-outofscope.c
index a30b721..125d7d8 100644
--- a/src/interfaces/ecpg/test/expected/preproc-outofscope.c
+++ b/src/interfaces/ecpg/test/expected/preproc-outofscope.c
@@ -202,16 +202,16 @@ static void
 open_cur1(void)
 {
 	{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "declare mycur cursor for select * from a1", ECPGt_EOIT, 
-	ECPGt_int,&((*( MYTYPE  *)(ECPGget_var( 0))).id),(long)1,(long)1,sizeof(int), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).id),(long)1,(long)1,sizeof(int), 
-	ECPGt_char,&((*( MYTYPE  *)(ECPGget_var( 0))).t),(long)64,(long)1,(64)*sizeof(char), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).t),(long)1,(long)1,sizeof(int), 
-	ECPGt_double,&((*( MYTYPE  *)(ECPGget_var( 0))).d1),(long)1,(long)1,sizeof(double), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).d1),(long)1,(long)1,sizeof(int), 
-	ECPGt_double,&((*( MYTYPE  *)(ECPGget_var( 0))).d2),(long)1,(long)1,sizeof(double), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).d2),(long)1,(long)1,sizeof(int), 
-	ECPGt_char,&((*( MYTYPE  *)(ECPGget_var( 0))).c),(long)30,(long)1,(30)*sizeof(char), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).c),(long)1,(long)1,sizeof(int), ECPGt_EORT);
+	ECPGt_int,&((*( MYTYPE  *)(ECPGget_var( 0)) ).id),(long)1,(long)1,sizeof(int), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).id),(long)1,(long)1,sizeof(int), 
+	ECPGt_char,&((*( MYTYPE  *)(ECPGget_var( 0)) ).t),(long)64,(long)1,(64)*sizeof(char), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).t),(long)1,(long)1,sizeof(int), 
+	ECPGt_double,&((*( MYTYPE  *)(ECPGget_var( 0)) ).d1),(long)1,(long)1,sizeof(double), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).d1),(long)1,(long)1,sizeof(int), 
+	ECPGt_double,&((*( MYTYPE  *)(ECPGget_var( 0)) ).d2),(long)1,(long)1,sizeof(double), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).d2),(long)1,(long)1,sizeof(int), 
+	ECPGt_char,&((*( MYTYPE  *)(ECPGget_var( 0)) ).c),(long)30,(long)1,(30)*sizeof(char), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).c),(long)1,(long)1,sizeof(int), ECPGt_EORT);
 #line 40 "outofscope.pgc"
 
 if (sqlca.sqlcode < 0) exit (1);}
@@ -226,16 +226,16 @@ static void
 get_record1(void)
 {
 	{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch mycur", ECPGt_EOIT, 
-	ECPGt_int,&((*( MYTYPE  *)(ECPGget_var( 0))).id),(long)1,(long)1,sizeof(int), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).id),(long)1,(long)1,sizeof(int), 
-	ECPGt_char,&((*( MYTYPE  *)(ECPGget_var( 0))).t),(long)64,(long)1,(64)*sizeof(char), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).t),(long)1,(long)1,sizeof(int), 
-	ECPGt_double,&((*( MYTYPE  *)(ECPGget_var( 0))).d1),(long)1,(long)1,sizeof(double), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).d1),(long)1,(long)1,sizeof(int), 
-	ECPGt_double,&((*( MYTYPE  *)(ECPGget_var( 0))).d2),(long)1,(long)1,sizeof(double), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).d2),(long)1,(long)1,sizeof(int), 
-	ECPGt_char,&((*( MYTYPE  *)(ECPGget_var( 0))).c),(long)30,(long)1,(30)*sizeof(char), 
-	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1))).c),(long)1,(long)1,sizeof(int), ECPGt_EORT);
+	ECPGt_int,&((*( MYTYPE  *)(ECPGget_var( 0)) ).id),(long)1,(long)1,sizeof(int), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).id),(long)1,(long)1,sizeof(int), 
+	ECPGt_char,&((*( MYTYPE  *)(ECPGget_var( 0)) ).t),(long)64,(long)1,(64)*sizeof(char), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).t),(long)1,(long)1,sizeof(int), 
+	ECPGt_double,&((*( MYTYPE  *)(ECPGget_var( 0)) ).d1),(long)1,(long)1,sizeof(double), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).d1),(long)1,(long)1,sizeof(int), 
+	ECPGt_double,&((*( MYTYPE  *)(ECPGget_var( 0)) ).d2),(long)1,(long)1,sizeof(double), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).d2),(long)1,(long)1,sizeof(int), 
+	ECPGt_char,&((*( MYTYPE  *)(ECPGget_var( 0)) ).c),(long)30,(long)1,(30)*sizeof(char), 
+	ECPGt_int,&((*( MYNULLTYPE  *)(ECPGget_var( 1)) ).c),(long)1,(long)1,sizeof(int), ECPGt_EORT);
 #line 49 "outofscope.pgc"
 
 if (sqlca.sqlcode < 0) exit (1);}

commit 54a622cadf1fb6d2047bf99fbee73b6418d2a23f
Author: Andrew Dunstan <andrew@dunslane.net>
Date:   Tue Jan 3 16:02:49 2012 -0500

    Suggest use of psql when pg_restore gets a text dump.

diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 7d895c4..d9edebb 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -77,6 +77,9 @@ typedef struct _parallel_slot
 
 #define NO_SLOT (-1)
 
+#define TEXT_DUMP_HEADER "--\n-- PostgreSQL database dump\n--\n\n"
+#define TEXT_DUMPALL_HEADER "--\n-- PostgreSQL database cluster dump\n--\n\n"
+
 /* state needed to save/restore an archive's output target */
 typedef struct _outputContext
 {
@@ -1862,12 +1865,20 @@ _discoverArchiveFormat(ArchiveHandle *AH)
 	else
 	{
 		/*
-		 * *Maybe* we have a tar archive format file... So, read first 512
-		 * byte header...
+		 * *Maybe* we have a tar archive format file or a text dump ... 
+		 * So, read first 512 byte header...
 		 */
 		cnt = fread(&AH->lookahead[AH->lookaheadLen], 1, 512 - AH->lookaheadLen, fh);
 		AH->lookaheadLen += cnt;
 
+		if (AH->lookaheadLen >= strlen(TEXT_DUMPALL_HEADER) &&
+			(strncmp(AH->lookahead, TEXT_DUMP_HEADER, strlen(TEXT_DUMP_HEADER)) == 0 ||
+			 strncmp(AH->lookahead, TEXT_DUMPALL_HEADER, strlen(TEXT_DUMPALL_HEADER)) == 0))
+		{
+			/* looks like it's probably a text format dump. so suggest they try psql */
+			die_horribly(AH, modulename, "input file appears to be a text format dump. Please use psql.\n");
+		}
+
 		if (AH->lookaheadLen != 512)
 			die_horribly(AH, modulename, "input file does not appear to be a valid archive (too short?)\n");
 

commit bc2a050d40976441cdb963ad829316c23e8df0aa
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Tue Jan 3 16:00:06 2012 -0500

    Use a non-locking initial test in TAS_SPIN on PPC.
    
    Further testing convinces me that this is helpful at sufficiently high
    contention levels, though it's still worrisome that it loses slightly
    at lower contention levels.
    
    Per Manabu Ori.

diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index ff7eb14..9b02d1f 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -358,6 +358,9 @@ typedef unsigned int slock_t;
 
 #define TAS(lock) tas(lock)
 
+/* On PPC, it's a win to use a non-locking test before the lwarx */
+#define TAS_SPIN(lock)	(*(lock) ? 1 : TAS(lock))
+
 /*
  * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
  * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.

commit 63876d3bac5a7471a7987da25a93c13a2534a644
Author: Andrew Dunstan <andrew@dunslane.net>
Date:   Tue Jan 3 08:44:26 2012 -0500

    Support for building with MS Visual Studio 2010.
    
    Brar Piening, reviewed by Craig Ringer.

diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml
index f96b174..b6ea0ab 100644
--- a/doc/src/sgml/install-windows.sgml
+++ b/doc/src/sgml/install-windows.sgml
@@ -20,10 +20,10 @@
   There are several different ways of building PostgreSQL on
   <productname>Windows</productname>. The simplest way to build with
   Microsoft tools is to install a supported version of the
-  <productname>Microsoft Platform SDK</productname> and use the included
+  <productname>Microsoft Windows SDK</productname> and use the included
   compiler. It is also possible to build with the full
-  <productname>Microsoft Visual C++ 2005 or 2008</productname>. In some cases
-  that requires the installation of the <productname>Platform SDK</productname>
+  <productname>Microsoft Visual C++ 2005, 2008 or 2010</productname>. In some cases
+  that requires the installation of the <productname>Windows SDK</productname>
   in addition to the compiler.
  </para>
 
@@ -69,32 +69,26 @@
 
  <sect1 id="install-windows-full">
   <title>Building with <productname>Visual C++</productname> or the
-  <productname>Platform SDK</productname></title>
+  <productname>Microsoft Windows SDK</productname></title>
 
  <para>
   PostgreSQL can be built using the Visual C++ compiler suite from Microsoft.
   These compilers can be either from <productname>Visual Studio</productname>,
   <productname>Visual Studio Express</productname> or some versions of the
-  <productname>Platform SDK</productname>. If you do not already have a
+  <productname>Microsoft Windows SDK</productname>. If you do not already have a
   <productname>Visual Studio</productname> environment set up, the easiest
-  way us to use the compilers in the <productname>Platform SDK</productname>,
+  way is to use the compilers in the <productname>Windows SDK</productname>,
   which is a free download from Microsoft.
  </para>
 
  <para>
-  PostgreSQL supports the compilers from
-  <productname>Visual Studio 2005</productname> and
-  <productname>Visual Studio 2008</productname>. When using the Platform SDK
-  only, or when building for 64-bit Windows, only
-  <productname>Visual Studio 2008</productname> is supported.
-  <productname>Visual Studio 2010</productname> is not yet supported.
- </para>
-
- <para>
-  When building using the <productname>Platform SDK</productname>, versions
-  6.0 to 7.0 of the SDK are supported. Older or newer versions will not work.
-  In particular, versions from 7.0a and later will not work, since
-  they include compilers from <productname>Visual Studio 2010</productname>.
+  PostgreSQL is known to support compilation using the compilers shipped with
+  <productname>Visual Studio 2005</productname> to
+  <productname>Visual Studio 2010</productname> (including Express editions),
+  as well as standalone Windows SDK releases 6.0 to 7.1.
+  64-bit PostgreSQL builds are only supported with
+  <productname>Microsoft Windows SDK</productname> version 6.0a and above or
+  <productname>Visual Studio 2008</productname> and above.
  </para>
 
  <para>
@@ -104,11 +98,13 @@
   <productname>Cygwin</productname> present in your system PATH. Also, make
   sure you have all the required Visual C++ tools available in the PATH. In
   <productname>Visual Studio</productname>, start the
-  <application>Visual Studio Command Prompt</application>. In the
-  <productname>Platform SDK</productname>, start the
-  <application>CMD shell</application> listed under the SDK on the Start Menu.
+  <application>Visual Studio Command Prompt</application>.
   If you wish to build a 64-bit version, you must use the 64-bit version of
   the command, and vice versa.
+  In the <productname>Microsoft Windows SDK</productname>, start the
+  <application>CMD shell</application> listed under the SDK on the Start Menu.
+  In recent SDK versions you can change the targeted CPU architecture by using
+  the <command>setenv</command> command.
   All commands should be run from the <filename>src\tools\msvc</filename>
   directory.
  </para>
@@ -148,17 +144,17 @@ $ENV{PATH}=$ENV{PATH} . ';c:\some\where\bison\bin';
 
    <variablelist>
     <varlistentry>
-     <term><productname>Microsoft Platform SDK</productname></term>
+     <term><productname>Microsoft Windows SDK</productname></term>
      <listitem><para>
       It is recommended that you upgrade to the latest supported version
-      of the <productname>Microsoft Platform SDK</productname> (currently
-      version 7.0), available for download from
+      of the <productname>Microsoft Windows SDK</productname> (currently
+      version 7.1), available for download from
       <ulink url="http://www.microsoft.com/downloads/"></>.
      </para>
      <para>
       You must always include the
       <application>Windows Headers and Libraries</application> part of the SDK.
-      If you install the <productname>Platform SDK</productname>
+      If you install the <productname>Windows SDK</productname>
       including the <application>Visual C++ Compilers</application>,
       you don't need <productname>Visual Studio</productname> to build.
      </para></listitem>
@@ -202,6 +198,10 @@ $ENV{PATH}=$ENV{PATH} . ';c:\some\where\bison\bin';
       Bison can be downloaded from <ulink url="http://gnuwin32.sourceforge.net"></>.
       Flex can be downloaded from
       <ulink url="http://www.postgresql.org/ftp/misc/winflex/"></>.
+      If you are using <productname>msysGit</productname> for accessing the
+      PostgreSQL <productname>Git</productname> repository you probably already
+      have recent versions of bison and flex in your <productname>Git</productname>
+      binary directory.
      </para>
 
      <note>
@@ -479,7 +479,7 @@ $ENV{DOCROOT}='c:\docbook';
   static library to link into an application. For normal use the
   <productname>MinGW</productname> or
   <productname>Visual Studio</productname> or
-  <productname>Platform SDK</productname> method is recommended.
+  <productname>Windows SDK</productname> method is recommended.
  </para>
 
  <para>
diff --git a/src/include/port/win32.h b/src/include/port/win32.h
index afc9628..e2dd23b 100644
--- a/src/include/port/win32.h
+++ b/src/include/port/win32.h
@@ -103,7 +103,9 @@
 #define IPC_STAT 4096
 
 #define EACCESS 2048
+#ifndef EIDRM
 #define EIDRM 4096
+#endif
 
 #define SETALL 8192
 #define GETNCNT 16384
@@ -300,6 +302,26 @@ typedef int pid_t;
 #endif
 
 /*
+ * For Microsoft Visual Studio 2010 and above we intentionally redefine
+ * the regular Berkeley error constants and set them to the WSA constants.
+ * Note that this will break if those constants are used for anything else
+ * than Windows Sockets errors.
+ */
+#if _MSC_VER >= 1600
+#pragma warning(disable:4005)
+#define EMSGSIZE WSAEMSGSIZE
+#define EAFNOSUPPORT WSAEAFNOSUPPORT
+#define EWOULDBLOCK WSAEWOULDBLOCK
+#define EPROTONOSUPPORT WSAEPROTONOSUPPORT
+#define ECONNRESET WSAECONNRESET
+#define EINPROGRESS WSAEINPROGRESS
+#define ENOBUFS WSAENOBUFS
+#define ECONNREFUSED WSAECONNREFUSED
+#define EOPNOTSUPP WSAEOPNOTSUPP
+#pragma warning(default:4005)
+#endif 
+
+/*
  * Extended locale functions with gratuitous underscore prefixes.
  * (These APIs are nevertheless fully documented by Microsoft.)
  */
diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm
index a7720a0..6176743 100644
--- a/src/tools/msvc/Install.pm
+++ b/src/tools/msvc/Install.pm
@@ -56,11 +56,8 @@ sub Install
     my $majorver = DetermineMajorVersion();
     print "Installing version $majorver for $conf in $target\n";
 
-    EnsureDirectories(
-        $target, 'bin', 'lib', 'share',
-        'share/timezonesets','share/extension', 'share/contrib','doc',
-        'doc/extension', 'doc/contrib','symbols', 'share/tsearch_data'
-    );
+    EnsureDirectories($target, 'bin', 'lib', 'share','share/timezonesets','share/extension',
+        'share/contrib','doc','doc/extension', 'doc/contrib','symbols', 'share/tsearch_data');
 
     CopySolutionOutput($conf, $target);
     lcopy($target . '/lib/libpq.dll', $target . '/bin/libpq.dll');
@@ -186,6 +183,13 @@ sub CopySolutionOutput
     my $rem = qr{Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}"\) = "([^"]+)"};
 
     my $sln = read_file("pgsql.sln") || croak "Could not open pgsql.sln\n";
+
+    my $vcproj = 'vcproj';
+    if ($sln =~ /Microsoft Visual Studio Solution File, Format Version (\d+)\.\d+/ && $1 >= 11)
+    {
+        $vcproj = 'vcxproj';
+    }
+
     print "Copying build output files...";
     while ($sln =~ $rem)
     {
@@ -195,26 +199,48 @@ sub CopySolutionOutput
 
         $sln =~ s/$rem//;
 
-        my $proj = read_file("$pf.vcproj") || croak "Could not open $pf.vcproj\n";
-        if ($proj !~ qr{ConfigurationType="([^"]+)"})
+        my $proj = read_file("$pf.$vcproj") || croak "Could not open $pf.$vcproj\n";
+        if ($vcproj eq 'vcproj' && $proj =~ qr{ConfigurationType="([^"]+)"})
         {
-            croak "Could not parse $pf.vcproj\n";
-        }
-        if ($1 == 1)
-        {
-            $dir = "bin";
-            $ext = "exe";
+            if ($1 == 1)
+            {
+                $dir = "bin";
+                $ext = "exe";
+            }
+            elsif ($1 == 2)
+            {
+                $dir = "lib";
+                $ext = "dll";
+            }
+            else
+            {
+
+                # Static lib, such as libpgport, only used internally during build, don't install
+                next;
+            }
         }
-        elsif ($1 == 2)
+        elsif ($vcproj eq 'vcxproj' && $proj =~ qr{<ConfigurationType>(\w+)</ConfigurationType>})
         {
-            $dir = "lib";
-            $ext = "dll";
+            if ($1 eq 'Application')
+            {
+                $dir = "bin";
+                $ext = "exe";
+            }
+            elsif ($1 eq 'DynamicLibrary')
+            {
+                $dir = "lib";
+                $ext = "dll";
+            }
+            else # 'StaticLibrary'
+            {
+
+                # Static lib, such as libpgport, only used internally during build, don't install
+                next;
+            }
         }
         else
         {
-
-            # Static lib, such as libpgport, only used internally during build, don't install
-            next;
+            croak "Could not parse $pf.$vcproj\n";
         }
         lcopy("$conf\\$pf\\$pf.$ext","$target\\$dir\\$pf.$ext")
           || croak "Could not copy $pf.$ext\n";
@@ -470,8 +496,7 @@ sub CopyIncludeFiles
         $target . '/include/server/',
         'src/include/', 'pg_config.h', 'pg_config_os.h'
     );
-    CopyFiles('Grammar header', $target . '/include/server/parser/','src/backend/parser/',
-        'gram.h');
+    CopyFiles('Grammar header', $target . '/include/server/parser/','src/backend/parser/','gram.h');
     CopySetOfFiles('',[ glob("src\\include\\*.h") ],$target . '/include/server/');
     my $D;
     opendir($D, 'src/include') || croak "Could not opendir on src/include!\n";
diff --git a/src/tools/msvc/MSBuildProject.pm b/src/tools/msvc/MSBuildProject.pm
new file mode 100644
index 0000000..fcce9eb
--- /dev/null
+++ b/src/tools/msvc/MSBuildProject.pm
@@ -0,0 +1,388 @@
+package MSBuildProject;
+
+#
+# Package that encapsulates a MSBuild (Visual C++ 2010) project file
+#
+# src/tools/msvc/MSBuildProject.pm
+#
+
+use Carp;
+use strict;
+use warnings;
+use base qw(Project);
+
+sub _new
+{
+    my $classname = shift;
+    my $self = $classname->SUPER::_new(@_);
+    bless($self, $classname);
+
+    $self->{filenameExtension} = '.vcxproj';
+
+    return $self;
+}
+
+sub WriteHeader
+{
+    my ($self, $f) = @_;
+
+    print $f <<EOF;
+<?xml version="1.0" encoding="Windows-1252"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+EOF
+    $self->WriteConfigurationHeader($f, 'Debug');
+    $self->WriteConfigurationHeader($f, 'Release');
+    print $f <<EOF;
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>$self->{guid}</ProjectGuid>
+  </PropertyGroup>
+  <Import Project="\$(VCTargetsPath)\\Microsoft.Cpp.Default.props" />
+EOF
+    $self->WriteConfigurationPropertyGroup($f, 'Release',{ wholeopt=>'false' });
+    $self->WriteConfigurationPropertyGroup($f, 'Debug',{ wholeopt=>'false' });
+    print $f <<EOF;
+  <Import Project="\$(VCTargetsPath)\\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+EOF
+    $self->WritePropertySheetsPropertyGroup($f, 'Release');
+    $self->WritePropertySheetsPropertyGroup($f, 'Debug');
+    print $f <<EOF;
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup>
+    <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+EOF
+    $self->WriteAdditionalProperties($f, 'Debug');
+    $self->WriteAdditionalProperties($f, 'Release');
+    print $f <<EOF;
+  </PropertyGroup>
+EOF
+    $self->WriteItemDefinitionGroup(
+        $f, 'Debug',
+        {
+            defs=>'_DEBUG;DEBUG=1;',
+            opt=>'Disabled',
+            strpool=>'false',
+            runtime=>'MultiThreadedDebugDLL'
+        }
+    );
+    $self->WriteItemDefinitionGroup($f, 'Release',
+        { defs=>'', opt=>'Full', strpool=>'true', runtime=>'MultiThreadedDLL' });
+}
+
+sub AddDefine
+{
+    my ($self, $def) = @_;
+
+    $self->{defines} .= $def . ';';
+}
+
+sub WriteReferences
+{
+    my ($self, $f) = @_;
+
+    my @references = @{$self->{references}};
+
+    if (scalar(@references))
+    {
+        print $f <<EOF;
+  <ItemGroup>
+EOF
+        foreach my $ref (@references)
+        {
+            print $f <<EOF;
+    <ProjectReference Include="$ref->{name}$ref->{filenameExtension}">
+      <Project>$ref->{guid}</Project>
+    </ProjectReference>
+EOF
+        }
+        print $f <<EOF;
+  </ItemGroup>
+EOF
+    }
+}
+
+sub WriteFiles
+{
+    my ($self, $f) = @_;
+    print $f <<EOF;
+  <ItemGroup>
+EOF
+    my @grammarFiles = ();
+    my @resourceFiles = ();
+    my %uniquefiles;
+    foreach my $fileNameWithPath (sort keys %{ $self->{files} })
+    {
+        confess "Bad format filename '$fileNameWithPath'\n"
+          unless ($fileNameWithPath =~ /^(.*)\\([^\\]+)\.[r]?[cyl]$/);
+        my $dir = $1;
+        my $fileName = $2;
+        if ($fileNameWithPath =~ /\.y$/ or $fileNameWithPath =~ /\.l$/)
+        {
+            push @grammarFiles, $fileNameWithPath;
+        }
+        elsif ($fileNameWithPath =~ /\.rc$/)
+        {
+            push @resourceFiles, $fileNameWithPath;
+        }
+        elsif (defined($uniquefiles{$fileName}))
+        {
+
+            # File already exists, so fake a new name
+            my $obj = $dir;
+            $obj =~ s/\\/_/g;
+
+            print $f <<EOF;
+    <ClCompile Include="$fileNameWithPath">
+      <ObjectFileName Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">.\\debug\\$self->{name}\\${obj}_$fileName.obj</ObjectFileName>
+      <ObjectFileName Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">.\\release\\$self->{name}\\${obj}_$fileName.obj</ObjectFileName>
+    </ClCompile>
+EOF
+        }
+        else
+        {
+            $uniquefiles{$fileName} = 1;
+            print $f <<EOF;
+    <ClCompile Include="$fileNameWithPath" />
+EOF
+        }
+
+    }
+    print $f <<EOF;
+  </ItemGroup>
+EOF
+    if (scalar(@grammarFiles))
+    {
+        print $f <<EOF;
+  <ItemGroup>
+EOF
+        foreach my $grammarFile (@grammarFiles)
+        {
+            (my $outputFile = $grammarFile) =~ s/\.(y|l)$/.c/;
+            if ($grammarFile =~ /\.y$/)
+            {
+                $outputFile =~ s{^src\\pl\\plpgsql\\src\\gram.c$}{src\\pl\\plpgsql\\src\\pl_gram.c};
+                print $f <<EOF;
+    <CustomBuild Include="$grammarFile">
+      <Message Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">Running bison on $grammarFile</Message>
+      <Command Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">perl "src\\tools\\msvc\\pgbison.pl" "$grammarFile"</Command>
+      <AdditionalInputs Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">%(AdditionalInputs)</AdditionalInputs>
+      <Outputs Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">$outputFile;%(Outputs)</Outputs>
+      <Message Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">Running bison on $grammarFile</Message>
+      <Command Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">perl "src\\tools\\msvc\\pgbison.pl" "$grammarFile"</Command>
+      <AdditionalInputs Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">%(AdditionalInputs)</AdditionalInputs>
+      <Outputs Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">$outputFile;%(Outputs)</Outputs>
+    </CustomBuild>
+EOF
+            }
+            else #if ($grammarFile =~ /\.l$/)
+            {
+                print $f <<EOF;
+    <CustomBuild Include="$grammarFile">
+      <Message Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">Running flex on $grammarFile</Message>
+      <Command Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">perl "src\\tools\\msvc\\pgflex.pl" "$grammarFile"</Command>
+      <AdditionalInputs Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">%(AdditionalInputs)</AdditionalInputs>
+      <Outputs Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">$outputFile;%(Outputs)</Outputs>
+      <Message Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">Running flex on $grammarFile</Message>
+      <Command Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">perl "src\\tools\\msvc\\pgflex.pl" "$grammarFile"</Command>
+      <AdditionalInputs Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">%(AdditionalInputs)</AdditionalInputs>
+      <Outputs Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">$outputFile;%(Outputs)</Outputs>
+    </CustomBuild>
+EOF
+            }
+        }
+        print $f <<EOF;
+  </ItemGroup>
+EOF
+    }
+    if (scalar(@resourceFiles))
+    {
+        print $f <<EOF;
+  <ItemGroup>
+EOF
+        foreach my $rcFile (@resourceFiles)
+        {
+            print $f <<EOF;
+    <ResourceCompile Include="$rcFile" />
+EOF
+        }
+        print $f <<EOF;
+  </ItemGroup>
+EOF
+    }
+}
+
+sub WriteConfigurationHeader
+{
+    my ($self, $f, $cfgname) = @_;
+    print $f <<EOF;
+    <ProjectConfiguration Include="$cfgname|$self->{platform}">
+      <Configuration>$cfgname</Configuration>
+      <Platform>$self->{platform}</Platform>
+    </ProjectConfiguration>
+EOF
+}
+
+sub WriteConfigurationPropertyGroup
+{
+    my ($self, $f, $cfgname, $p) = @_;
+    my $cfgtype =
+      ($self->{type} eq "exe")
+      ?'Application'
+      :($self->{type} eq "dll"?'DynamicLibrary':'StaticLibrary');
+
+    print $f <<EOF;
+  <PropertyGroup Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'" Label="Configuration">
+    <ConfigurationType>$cfgtype</ConfigurationType>
+    <UseOfMfc>false</UseOfMfc>
+    <CharacterSet>MultiByte</CharacterSet>
+    <WholeProgramOptimization>$p->{wholeopt}</WholeProgramOptimization>
+  </PropertyGroup>
+EOF
+}
+
+sub WritePropertySheetsPropertyGroup
+{
+    my ($self, $f, $cfgname) = @_;
+    print $f <<EOF;
+  <ImportGroup Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'" Label="PropertySheets">
+    <Import Project="\$(UserRootDir)\\Microsoft.Cpp.\$(Platform).user.props" Condition="exists('\$(UserRootDir)\\Microsoft.Cpp.\$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+EOF
+}
+
+sub WriteAdditionalProperties
+{
+    my ($self, $f, $cfgname) = @_;
+    print $f <<EOF;
+    <OutDir Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">.\\$cfgname\\$self->{name}\\</OutDir>
+    <IntDir Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">.\\$cfgname\\$self->{name}\\</IntDir>
+    <LinkIncremental Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">false</LinkIncremental>
+EOF
+}
+
+sub WriteItemDefinitionGroup
+{
+    my ($self, $f, $cfgname, $p) = @_;
+    my $cfgtype =
+      ($self->{type} eq "exe")
+      ?'Application'
+      :($self->{type} eq "dll"?'DynamicLibrary':'StaticLibrary');
+    my $libs = $self->GetAdditionalLinkerDependencies($cfgname, ';');
+
+    my $targetmachine = $self->{platform} eq 'Win32' ? 'MachineX86' : 'MachineX64';
+
+    my $includes = $self->{includes};
+    unless ($includes eq '' or $includes =~ /;$/)
+    {
+        $includes .= ';';
+    }
+    print $f <<EOF;
+  <ItemDefinitionGroup Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">
+    <ClCompile>
+      <Optimization>$p->{opt}</Optimization>
+      <AdditionalIncludeDirectories>$self->{prefixincludes}src/include;src/include/port/win32;src/include/port/win32_msvc;$includes\%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <PreprocessorDefinitions>WIN32;_WINDOWS;__WINDOWS__;__WIN32__;EXEC_BACKEND;WIN32_STACK_RLIMIT=4194304;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE$self->{defines}$p->{defs}\%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <StringPooling>$p->{strpool}</StringPooling>
+      <RuntimeLibrary>$p->{runtime}</RuntimeLibrary>
+      <DisableSpecificWarnings>$self->{disablewarnings};\%(DisableSpecificWarnings)</DisableSpecificWarnings>
+      <AdditionalOptions>/MP \%(AdditionalOptions)</AdditionalOptions>
+      <AssemblerOutput>
+      </AssemblerOutput>
+      <AssemblerListingLocation>.\\$cfgname\\$self->{name}\\</AssemblerListingLocation>
+      <ObjectFileName>.\\$cfgname\\$self->{name}\\</ObjectFileName>
+      <ProgramDataBaseFileName>.\\$cfgname\\$self->{name}\\</ProgramDataBaseFileName>
+      <BrowseInformation>false</BrowseInformation>
+      <WarningLevel>Level3</WarningLevel>
+      <SuppressStartupBanner>true</SuppressStartupBanner>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <CompileAs>Default</CompileAs>
+    </ClCompile>
+    <Link>
+      <OutputFile>.\\$cfgname\\$self->{name}\\$self->{name}.$self->{type}</OutputFile>
+      <AdditionalDependencies>$libs;\%(AdditionalDependencies)</AdditionalDependencies>
+      <SuppressStartupBanner>true</SuppressStartupBanner>
+      <AdditionalLibraryDirectories>\%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <IgnoreSpecificDefaultLibraries>libc;\%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+      <StackReserveSize>4194304</StackReserveSize>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <ProgramDatabaseFile>.\\$cfgname\\$self->{name}\\$self->{name}.pdb</ProgramDatabaseFile>
+      <GenerateMapFile>false</GenerateMapFile>
+      <MapFileName>.\\$cfgname\\$self->{name}\\$self->{name}.map</MapFileName>
+      <SubSystem>Console</SubSystem>
+      <TargetMachine>$targetmachine</TargetMachine>
+EOF
+    if ($self->{disablelinkerwarnings})
+    {
+        print $f
+"      <AdditionalOptions>/ignore:$self->{disablelinkerwarnings} \%(AdditionalOptions)</AdditionalOptions>\n";
+    }
+    if ($self->{implib})
+    {
+        my $l = $self->{implib};
+        $l =~ s/__CFGNAME__/$cfgname/g;
+        print $f "      <ImportLibrary>$l</ImportLibrary>\n";
+    }
+    if ($self->{def})
+    {
+        my $d = $self->{def};
+        $d =~ s/__CFGNAME__/$cfgname/g;
+        print $f "      <ModuleDefinitionFile>$d</ModuleDefinitionFile>\n";
+    }
+    print $f <<EOF;
+    </Link>
+    <ResourceCompile>
+      <AdditionalIncludeDirectories>src\\include;\%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ResourceCompile>
+EOF
+    if ($self->{builddef})
+    {
+        print $f <<EOF;
+    <PreLinkEvent>
+      <Message>Generate DEF file</Message>
+      <Command>perl src\\tools\\msvc\\gendef.pl $cfgname\\$self->{name} $self->{platform}</Command>
+    </PreLinkEvent>
+EOF
+    }
+    print $f <<EOF;
+  </ItemDefinitionGroup>
+EOF
+}
+
+sub Footer
+{
+    my ($self, $f) = @_;
+    $self->WriteReferences($f);
+
+    print $f <<EOF;
+  <Import Project="\$(VCTargetsPath)\\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
+EOF
+}
+
+package VC2010Project;
+
+#
+# Package that encapsulates a Visual C++ 2010 project file
+#
+
+use strict;
+use warnings;
+use base qw(MSBuildProject);
+
+sub new
+{
+    my $classname = shift;
+    my $self = $classname->SUPER::_new(@_);
+    bless($self, $classname);
+
+    $self->{vcver} = '10.00';
+
+    return $self;
+}
+
+1;
diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm
index fb83224..7881c66 100644
--- a/src/tools/msvc/Mkvcbuild.pm
+++ b/src/tools/msvc/Mkvcbuild.pm
@@ -14,6 +14,7 @@ use Solution;
 use Cwd;
 use File::Copy;
 use Config;
+use VSObjectFactory;
 use List::Util qw(first);
 
 use Exporter;
@@ -47,7 +48,9 @@ sub mkvcbuild
     chdir('..\..\..') if (-d '..\msvc' && -d '..\..\..\src');
     die 'Must run from root or msvc directory' unless (-d 'src\tools\msvc' && -d 'src');
 
-    $solution = new Solution($config);
+    my $vsVersion = DetermineVisualStudioVersion();
+
+    $solution = CreateSolution($vsVersion, $config);
 
     our @pgportfiles = qw(
       chklocale.c crypt.c fseeko.c getrusage.c inet_aton.c random.c srandom.c
@@ -344,12 +347,13 @@ sub mkvcbuild
     $pgdump->AddFile('src\backend\parser\kwlookup.c');
 
     my $pgdumpall = AddSimpleFrontend('pg_dump', 1);
-	# pg_dumpall doesn't use the files in the Makefile's $(OBJS), unlike
-	# pg_dump and pg_restore.
-	# So remove their sources from the object, keeping the other setup that 
-	# AddSimpleFrontend() has done.
-    my @nodumpall = grep  { m/src\\bin\\pg_dump\\.*\.c$/ } 
-	keys %{$pgdumpall->{files}};
+
+    # pg_dumpall doesn't use the files in the Makefile's $(OBJS), unlike
+    # pg_dump and pg_restore.
+    # So remove their sources from the object, keeping the other setup that
+    # AddSimpleFrontend() has done.
+    my @nodumpall = grep  { m/src\\bin\\pg_dump\\.*\.c$/ }
+      keys %{$pgdumpall->{files}};
     delete @{$pgdumpall->{files}}{@nodumpall};
     $pgdumpall->{name} = 'pg_dumpall';
     $pgdumpall->AddIncludeDir('src\backend');
@@ -508,6 +512,7 @@ sub mkvcbuild
     $pgregress->AddReference($libpgport);
 
     $solution->Save();
+    return $solution->{vcver};
 }
 
 #####################
diff --git a/src/tools/msvc/Project.pm b/src/tools/msvc/Project.pm
index 66752f9..9db664a 100644
--- a/src/tools/msvc/Project.pm
+++ b/src/tools/msvc/Project.pm
@@ -10,9 +10,9 @@ use strict;
 use warnings;
 use File::Basename;
 
-sub new
+sub _new
 {
-    my ($junk, $name, $type, $solution) = @_;
+    my ($classname, $name, $type, $solution) = @_;
     my $good_types = {
         lib => 1,
         exe => 1,
@@ -20,24 +20,23 @@ sub new
     };
     confess("Bad project type: $type\n") unless exists $good_types->{$type};
     my $self = {
-        name            => $name,
-        type            => $type,
-        guid            => Win32::GuidGen(),
-        files           => {},
-        references      => [],
-        libraries       => [],
-        suffixlib       => [],
-        includes        => '',
-        prefixincludes  => '',
-        defines         => ';',
-        solution        => $solution,
-        disablewarnings => '4018;4244;4273;4102;4090;4267',
+        name                  => $name,
+        type                  => $type,
+        guid                  => Win32::GuidGen(),
+        files                 => {},
+        references            => [],
+        libraries             => [],
+        suffixlib             => [],
+        includes              => '',
+        prefixincludes        => '',
+        defines               => ';',
+        solution              => $solution,
+        disablewarnings       => '4018;4244;4273;4102;4090;4267',
         disablelinkerwarnings => '',
-        vcver           => $solution->{vcver},
-        platform        => $solution->{platform},
+        platform              => $solution->{platform},
     };
 
-    bless $self;
+    bless($self, $classname);
     return $self;
 }
 
@@ -355,135 +354,17 @@ sub Save
     $self->DisableLinkerWarnings('4197') if ($self->{platform} eq 'x64');
 
     # Dump the project
-    open(F, ">$self->{name}.vcproj") || croak("Could not write to $self->{name}.vcproj\n");
+    open(F, ">$self->{name}$self->{filenameExtension}")
+      || croak("Could not write to $self->{name}$self->{filenameExtension}\n");
     $self->WriteHeader(*F);
-    $self->WriteReferences(*F);
-    print F <<EOF;
- <Files>
-EOF
-    my @dirstack = ();
-    my %uniquefiles;
-    foreach my $f (sort keys %{ $self->{files} })
-    {
-        confess "Bad format filename '$f'\n" unless ($f =~ /^(.*)\\([^\\]+)\.[r]?[cyl]$/);
-        my $dir = $1;
-        my $file = $2;
-
-        # Walk backwards down the directory stack and close any dirs we're done with
-        while ($#dirstack >= 0)
-        {
-            if (join('\\',@dirstack) eq substr($dir, 0, length(join('\\',@dirstack))))
-            {
-                last if (length($dir) == length(join('\\',@dirstack)));
-                last if (substr($dir, length(join('\\',@dirstack)),1) eq '\\');
-            }
-            print F ' ' x $#dirstack . "  </Filter>\n";
-            pop @dirstack;
-        }
-
-        # Now walk forwards and create whatever directories are needed
-        while (join('\\',@dirstack) ne $dir)
-        {
-            my $left = substr($dir, length(join('\\',@dirstack)));
-            $left =~ s/^\\//;
-            my @pieces = split /\\/, $left;
-            push @dirstack, $pieces[0];
-            print F ' ' x $#dirstack . "  <Filter Name=\"$pieces[0]\" Filter=\"\">\n";
-        }
-
-        print F ' ' x $#dirstack . "   <File RelativePath=\"$f\"";
-        if ($f =~ /\.y$/)
-        {
-            my $of = $f;
-            $of =~ s/\.y$/.c/;
-            $of =~ s{^src\\pl\\plpgsql\\src\\gram.c$}{src\\pl\\plpgsql\\src\\pl_gram.c};
-            print F '>'
-              . $self->GenerateCustomTool('Running bison on ' . $f,
-                'cmd /V:ON /c src\tools\msvc\pgbison.bat ' . $f, $of)
-              . '</File>' . "\n";
-        }
-        elsif ($f =~ /\.l$/)
-        {
-            my $of = $f;
-            $of =~ s/\.l$/.c/;
-            print F '>'
-              . $self->GenerateCustomTool('Running flex on ' . $f,
-                'src\tools\msvc\pgflex.bat ' . $f,$of)
-              . '</File>' . "\n";
-        }
-        elsif (defined($uniquefiles{$file}))
-        {
-
-            # File already exists, so fake a new name
-            my $obj = $dir;
-            $obj =~ s/\\/_/g;
-            print F
-"><FileConfiguration Name=\"Debug|$self->{platform}\"><Tool Name=\"VCCLCompilerTool\" ObjectFile=\".\\debug\\$self->{name}\\$obj"
-              . "_$file.obj\" /></FileConfiguration><FileConfiguration Name=\"Release|$self->{platform}\"><Tool Name=\"VCCLCompilerTool\" ObjectFile=\".\\release\\$self->{name}\\$obj"
-              . "_$file.obj\" /></FileConfiguration></File>\n";
-        }
-        else
-        {
-            $uniquefiles{$file} = 1;
-            print F " />\n";
-        }
-    }
-    while ($#dirstack >= 0)
-    {
-        print F ' ' x $#dirstack . "  </Filter>\n";
-        pop @dirstack;
-    }
+    $self->WriteFiles(*F);
     $self->Footer(*F);
     close(F);
 }
 
-sub GenerateCustomTool
+sub GetAdditionalLinkerDependencies
 {
-    my ($self, $desc, $tool, $output, $cfg) = @_;
-    if (!defined($cfg))
-    {
-        return $self->GenerateCustomTool($desc, $tool, $output, 'Debug')
-          .$self->GenerateCustomTool($desc, $tool, $output, 'Release');
-    }
-    return
-"<FileConfiguration Name=\"$cfg|$self->{platform}\"><Tool Name=\"VCCustomBuildTool\" Description=\"$desc\" CommandLine=\"$tool\" AdditionalDependencies=\"\" Outputs=\"$output\" /></FileConfiguration>";
-}
-
-sub WriteReferences
-{
-    my ($self, $f) = @_;
-    print $f " <References>\n";
-    foreach my $ref (@{$self->{references}})
-    {
-        print $f
-"  <ProjectReference ReferencedProjectIdentifier=\"$ref->{guid}\" Name=\"$ref->{name}\" />\n";
-    }
-    print $f " </References>\n";
-}
-
-sub WriteHeader
-{
-    my ($self, $f) = @_;
-
-    print $f <<EOF;
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject ProjectType="Visual C++" Version="$self->{vcver}" Name="$self->{name}" ProjectGUID="$self->{guid}">
- <Platforms><Platform Name="$self->{platform}"/></Platforms>
- <Configurations>
-EOF
-    $self->WriteConfiguration($f, 'Debug',
-        { defs=>'_DEBUG;DEBUG=1;', wholeopt=>0, opt=>0, strpool=>'false', runtime=>3 });
-    $self->WriteConfiguration($f, 'Release',
-        { defs=>'', wholeopt=>0, opt=>3, strpool=>'true', runtime=>2 });
-    print $f <<EOF;
- </Configurations>
-EOF
-}
-
-sub WriteConfiguration
-{
-    my ($self, $f, $cfgname, $p) = @_;
-    my $cfgtype = ($self->{type} eq "exe")?1:($self->{type} eq "dll"?2:4);
+    my ($self, $cfgname, $seperator) = @_;
     my $libcfg = (uc $cfgname eq "RELEASE")?"MD":"MDd";
     my $libs = '';
     foreach my $lib (@{$self->{libraries}})
@@ -497,76 +378,11 @@ sub WriteConfiguration
                 last;
             }
         }
-        $libs .= $xlib . " ";
+        $libs .= $xlib . $seperator;
     }
-    $libs =~ s/ $//;
+    $libs =~ s/.$//;
     $libs =~ s/__CFGNAME__/$cfgname/g;
-
-    my $targetmachine = $self->{platform} eq 'Win32' ? 1 : 17;
-
-    print $f <<EOF;
-  <Configuration Name="$cfgname|$self->{platform}" OutputDirectory=".\\$cfgname\\$self->{name}" IntermediateDirectory=".\\$cfgname\\$self->{name}"
-	ConfigurationType="$cfgtype" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" CharacterSet="2" WholeProgramOptimization="$p->{wholeopt}">
-	<Tool Name="VCCLCompilerTool" Optimization="$p->{opt}"
-		AdditionalIncludeDirectories="$self->{prefixincludes}src/include;src/include/port/win32;src/include/port/win32_msvc;$self->{includes}"
-		PreprocessorDefinitions="WIN32;_WINDOWS;__WINDOWS__;__WIN32__;EXEC_BACKEND;WIN32_STACK_RLIMIT=4194304;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE$self->{defines}$p->{defs}"
-		StringPooling="$p->{strpool}"
-		RuntimeLibrary="$p->{runtime}" DisableSpecificWarnings="$self->{disablewarnings}"
-		AdditionalOptions="/MP"
-EOF
-    print $f <<EOF;
-		AssemblerOutput="0" AssemblerListingLocation=".\\$cfgname\\$self->{name}\\" ObjectFile=".\\$cfgname\\$self->{name}\\"
-		ProgramDataBaseFileName=".\\$cfgname\\$self->{name}\\" BrowseInformation="0"
-		WarningLevel="3" SuppressStartupBanner="TRUE" DebugInformationFormat="3" CompileAs="0"/>
-	<Tool Name="VCLinkerTool" OutputFile=".\\$cfgname\\$self->{name}\\$self->{name}.$self->{type}"
-		AdditionalDependencies="$libs"
-		LinkIncremental="0" SuppressStartupBanner="TRUE" AdditionalLibraryDirectories="" IgnoreDefaultLibraryNames="libc"
-		StackReserveSize="4194304" DisableSpecificWarnings="$self->{disablewarnings}"
-		GenerateDebugInformation="TRUE" ProgramDatabaseFile=".\\$cfgname\\$self->{name}\\$self->{name}.pdb"
-		GenerateMapFile="FALSE" MapFileName=".\\$cfgname\\$self->{name}\\$self->{name}.map"
-		SubSystem="1" TargetMachine="$targetmachine"
-EOF
-    if ($self->{disablelinkerwarnings})
-    {
-        print $f "\t\tAdditionalOptions=\"/ignore:$self->{disablelinkerwarnings}\"\n";
-    }
-    if ($self->{implib})
-    {
-        my $l = $self->{implib};
-        $l =~ s/__CFGNAME__/$cfgname/g;
-        print $f "\t\tImportLibrary=\"$l\"\n";
-    }
-    if ($self->{def})
-    {
-        my $d = $self->{def};
-        $d =~ s/__CFGNAME__/$cfgname/g;
-        print $f "\t\tModuleDefinitionFile=\"$d\"\n";
-    }
-
-    print $f "\t/>\n";
-    print $f
-"\t<Tool Name=\"VCLibrarianTool\" OutputFile=\".\\$cfgname\\$self->{name}\\$self->{name}.lib\" IgnoreDefaultLibraryNames=\"libc\" />\n";
-    print $f
-      "\t<Tool Name=\"VCResourceCompilerTool\" AdditionalIncludeDirectories=\"src\\include\" />\n";
-    if ($self->{builddef})
-    {
-        print $f
-"\t<Tool Name=\"VCPreLinkEventTool\" Description=\"Generate DEF file\" CommandLine=\"perl src\\tools\\msvc\\gendef.pl $cfgname\\$self->{name} $self->{platform}\" />\n";
-    }
-    print $f <<EOF;
-  </Configuration>
-EOF
-}
-
-sub Footer
-{
-    my ($self, $f) = @_;
-
-    print $f <<EOF;
- </Files>
- <Globals/>
-</VisualStudioProject>
-EOF
+    return $libs;
 }
 
 # Utility function that loads a complete file
diff --git a/src/tools/msvc/README b/src/tools/msvc/README
index b8dd488..ad0eca3 100644
--- a/src/tools/msvc/README
+++ b/src/tools/msvc/README
@@ -4,19 +4,19 @@ MSVC build
 ==========
 
 This directory contains the tools required to build PostgreSQL using
-Microsoft Visual Studio 2005. This builds the whole backend, not just
+Microsoft Visual Studio 2005 - 2011. This builds the whole backend, not just
 the libpq frontend library. For more information, see the documentation
-chapter "Installation on Windows".
+chapter "Installation on Windows" and the description below.
 
 
 Notes about Visual Studio Express
 ---------------------------------
-To build PostgreSQL using Visual Studio Express, the Platform SDK
+To build PostgreSQL using Visual Studio Express, the Microsoft Windows SDK
 has to be installed. Since this is not included in the product
 originally, extra steps are needed to make it work.
 
-First, download and install the latest Platform SDK from
-www.microsoft.com.
+First, download and install a supported version of the Microsoft Windows SDK
+from www.microsoft.com (v6.0 or greater).
 
 Locate the files vcprojectengine.dll.express.config and
 vcprojectengine.dll.config in the vc\vcpackages directory of
@@ -26,3 +26,77 @@ to add them to the beginning of the list.
 
 This should work for both GUI and commandline builds, but a restart
 may be necessary.
+
+If you are using a recent version of the Microsoft Windows SDK that includes
+the compilers and build tools you probably don't even need Visual Studio
+Express to build PostgreSQL.
+
+
+Structure of the build tools
+----------------------------
+The tools for building PostgreSQL using Microsoft Visual Studio currently
+consist of the following files:
+
+- Configuration files -
+config_default.pl      default configuration arguments
+
+A typical build environment has two more files, buildenv.pl and config.pl
+that contain the user's build environment settings and configuration
+arguments.
+
+
+- User tools -
+build.pl               tool to build the binaries
+builddoc.pl            tool to build the docs
+clean.bat              batch file for cleaning up generated files
+install.pl             tool to install the generated files
+mkvcbuild.pl           tool to generate the Visual Studio build files
+vcregress.pl           tool to run the regression tests
+
+
+- Internal tools -
+gendef.pl              internal tool to generate .DEF files
+pgbison.pl             internal tool to process .y files using bison
+pgflex.pl              internal tool to process .l files using flex
+
+Many of those .pl files also have a corresponding .bat-wrapper that doesn't
+contain any additional logic.
+
+
+- Internal modules -
+Install.pm             module containing the install logic
+Mkvcbuild.pm           module containing the code to generate the Visual
+                       Studio build (project/solution) files
+MSBuildProject.pm      module containing the code to generate MSBuild based
+                       project files (Visual Studio 2010 or greater)
+Project.pm             module containing the common code to generate the
+                       Visual Studio project files. Also provides the
+                       common interface of all project file generators
+Solution.pm            module containing the code to generate the Visual
+                       Studio solution files.
+VCBuildProject.pm      module containing the code to generate VCBuild based
+                       project files (Visual Studio 2005/2008)
+VSObjectFactory.pm     factory module providing the code to create the 
+                       appropriate project/solution files for the current
+                       environment
+
+
+Description of the internals of the Visual Studio build process
+---------------------------------------------------------------
+By typing 'build' the user starts the build.bat wrapper which simply passes
+it's arguments to build.pl.
+In build.pl the user's buildenv.pl is used to set up the build environment
+(i. e. path to bison and flex). In addtion his config.pl file is merged into
+config_default.pl to create the configuration arguments.
+These configuration arguments are passed over to Mkvcbuild::mkvcbuild
+(Mkvcbuild.pm) which creates the Visual Studio project and solution files.
+It does this by using VSObjectFactory::CreateSolution to create an object
+implementing the Solution interface (this could be either a VS2005Solution,
+a VS2008Solution or a VS2010Solution, all in Solution.pm, depending on the
+user's build environment) and adding objects implementing the corresponding
+Project interface (VC2005Project or VC2008Project from VCBuildProject.pm or
+VC2010Project from MSBuildProject.pm) to it.
+When Solution::Save is called, the implementations of Solution and Project
+save their content in the appropriate format.
+The final step of starting the appropriate build program (msbuild or vcbuild)
+is performed in build.pl again.
diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm
index 36fd4b7..1725fbb 100644
--- a/src/tools/msvc/Solution.pm
+++ b/src/tools/msvc/Solution.pm
@@ -8,10 +8,11 @@ package Solution;
 use Carp;
 use strict;
 use warnings;
+use VSObjectFactory;
 
-sub new
+sub _new
 {
-    my $junk = shift;
+    my $classname = shift;
     my $options = shift;
     my $self = {
         projects => {},
@@ -21,7 +22,7 @@ sub new
         vcver    => undef,
         platform => undef,
     };
-    bless $self;
+    bless($self, $classname);
 
     # integer_datetimes is now the default
     $options->{integer_datetimes} = 1
@@ -53,28 +54,15 @@ sub new
     die "Bad wal_segsize $options->{wal_segsize}"
       unless grep {$_ == $options->{wal_segsize}} (1,2,4,8,16,32,64);
 
-    $self->DetermineToolVersions();
+    $self->DeterminePlatform();
 
     return $self;
 }
 
-sub DetermineToolVersions
+sub DeterminePlatform
 {
     my $self = shift;
 
-    # Determine version of vcbuild command, to set proper verison of visual studio
-    open(P,"vcbuild /? |") || die "vcbuild command not found";
-    my $line = <P>;
-    close(P);
-    if ($line !~ /^Microsoft\s*\(R\) Visual C\+\+ [^-]+ - \D+(\d+)\.00\.\d+/)
-    {
-        die "Unable to determine vcbuild version from first line of output!";
-    }
-    if ($1 == 8) { $self->{vcver} = '8.00' }
-    elsif ($1 == 9) { $self->{vcver} = '9.00' }
-    else { die "Unsupported version of Visual Studio: $1" }
-    print "Detected Visual Studio version $self->{vcver}\n";
-
     # Determine if we are in 32 or 64-bit mode. Do this by seeing if CL has
     # 64-bit only parameters.
     $self->{platform} = 'Win32';
@@ -428,7 +416,7 @@ sub AddProject
 {
     my ($self, $name, $type, $folder, $initialdir) = @_;
 
-    my $proj = new Project($name, $type, $self);
+    my $proj = VSObjectFactory::CreateProject($self->{vcver}, $name, $type, $self);
     push @{$self->{projects}->{$folder}}, $proj;
     $proj->AddDir($initialdir) if ($initialdir);
     if ($self->{options}->{zlib})
@@ -488,8 +476,8 @@ sub Save
 
     open(SLN,">pgsql.sln") || croak "Could not write to pgsql.sln\n";
     print SLN <<EOF;
-Microsoft Visual Studio Solution File, Format Version 9.00
-# Visual Studio 2005
+Microsoft Visual Studio Solution File, Format Version $self->{solutionFileVersion}
+# $self->{visualStudioName}
 EOF
 
     foreach my $fld (keys %{$self->{projects}})
@@ -497,7 +485,7 @@ EOF
         foreach my $proj (@{$self->{projects}->{$fld}})
         {
             print SLN <<EOF;
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "$proj->{name}", "$proj->{name}.vcproj", "$proj->{guid}"
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "$proj->{name}", "$proj->{name}$proj->{filenameExtension}", "$proj->{guid}"
 EndProject
 EOF
         }
@@ -579,4 +567,74 @@ sub GetFakeConfigure
     return $cfg;
 }
 
+package VS2005Solution;
+
+#
+# Package that encapsulates a Visual Studio 2005 solution file
+#
+
+use strict;
+use warnings;
+use base qw(Solution);
+
+sub new
+{
+    my $classname = shift;
+    my $self = $classname->SUPER::_new(@_);
+    bless($self, $classname);
+
+    $self->{solutionFileVersion} = '9.00';
+    $self->{vcver} = '8.00';
+    $self->{visualStudioName} = 'Visual Studio 2005';
+
+    return $self;
+}
+
+package VS2008Solution;
+
+#
+# Package that encapsulates a Visual Studio 2008 solution file
+#
+
+use strict;
+use warnings;
+use base qw(Solution);
+
+sub new
+{
+    my $classname = shift;
+    my $self = $classname->SUPER::_new(@_);
+    bless($self, $classname);
+
+    $self->{solutionFileVersion} = '10.00';
+    $self->{vcver} = '9.00';
+    $self->{visualStudioName} = 'Visual Studio 2008';
+
+    return $self;
+}
+
+package VS2010Solution;
+
+#
+# Package that encapsulates a Visual Studio 2010 solution file
+#
+
+use Carp;
+use strict;
+use warnings;
+use base qw(Solution);
+
+sub new
+{
+    my $classname = shift;
+    my $self = $classname->SUPER::_new(@_);
+    bless($self, $classname);
+
+    $self->{solutionFileVersion} = '11.00';
+    $self->{vcver} = '10.00';
+    $self->{visualStudioName} = 'Visual Studio 2010';
+
+    return $self;
+}
+
 1;
diff --git a/src/tools/msvc/VCBuildProject.pm b/src/tools/msvc/VCBuildProject.pm
new file mode 100644
index 0000000..97439d9
--- /dev/null
+++ b/src/tools/msvc/VCBuildProject.pm
@@ -0,0 +1,267 @@
+package VCBuildProject;
+
+#
+# Package that encapsulates a VCBuild (Visual C++ 2005/2008) project file
+#
+# src/tools/msvc/VCBuildProject.pm
+#
+
+use Carp;
+use strict;
+use warnings;
+use base qw(Project);
+
+sub _new
+{
+    my $classname = shift;
+    my $self = $classname->SUPER::_new(@_);
+    bless($self, $classname);
+
+    $self->{filenameExtension} = '.vcproj';
+
+    return $self;
+}
+
+sub WriteHeader
+{
+    my ($self, $f) = @_;
+
+    print $f <<EOF;
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject ProjectType="Visual C++" Version="$self->{vcver}" Name="$self->{name}" ProjectGUID="$self->{guid}">
+ <Platforms><Platform Name="$self->{platform}"/></Platforms>
+ <Configurations>
+EOF
+    $self->WriteConfiguration($f, 'Debug',
+        { defs=>'_DEBUG;DEBUG=1;', wholeopt=>0, opt=>0, strpool=>'false', runtime=>3 });
+    $self->WriteConfiguration($f, 'Release',
+        { defs=>'', wholeopt=>0, opt=>3, strpool=>'true', runtime=>2 });
+    print $f <<EOF;
+ </Configurations>
+EOF
+    $self->WriteReferences($f);
+}
+
+sub WriteFiles
+{
+    my ($self, $f) = @_;
+    print $f <<EOF;
+ <Files>
+EOF
+    my @dirstack = ();
+    my %uniquefiles;
+    foreach my $fileNameWithPath (sort keys %{ $self->{files} })
+    {
+        confess "Bad format filename '$fileNameWithPath'\n"
+          unless ($fileNameWithPath =~ /^(.*)\\([^\\]+)\.[r]?[cyl]$/);
+        my $dir = $1;
+        my $file = $2;
+
+        # Walk backwards down the directory stack and close any dirs we're done with
+        while ($#dirstack >= 0)
+        {
+            if (join('\\',@dirstack) eq substr($dir, 0, length(join('\\',@dirstack))))
+            {
+                last if (length($dir) == length(join('\\',@dirstack)));
+                last if (substr($dir, length(join('\\',@dirstack)),1) eq '\\');
+            }
+            print $f ' ' x $#dirstack . "  </Filter>\n";
+            pop @dirstack;
+        }
+
+        # Now walk forwards and create whatever directories are needed
+        while (join('\\',@dirstack) ne $dir)
+        {
+            my $left = substr($dir, length(join('\\',@dirstack)));
+            $left =~ s/^\\//;
+            my @pieces = split /\\/, $left;
+            push @dirstack, $pieces[0];
+            print $f ' ' x $#dirstack . "  <Filter Name=\"$pieces[0]\" Filter=\"\">\n";
+        }
+
+        print $f ' ' x $#dirstack . "   <File RelativePath=\"$fileNameWithPath\"";
+        if ($fileNameWithPath =~ /\.y$/)
+        {
+            my $of = $fileNameWithPath;
+            $of =~ s/\.y$/.c/;
+            $of =~ s{^src\\pl\\plpgsql\\src\\gram.c$}{src\\pl\\plpgsql\\src\\pl_gram.c};
+            print $f '>'
+              . $self->GenerateCustomTool('Running bison on ' . $fileNameWithPath,
+                "perl src\\tools\\msvc\\pgbison.pl $fileNameWithPath", $of)
+              . '</File>' . "\n";
+        }
+        elsif ($fileNameWithPath =~ /\.l$/)
+        {
+            my $of = $fileNameWithPath;
+            $of =~ s/\.l$/.c/;
+            print $f '>'
+              . $self->GenerateCustomTool('Running flex on ' . $fileNameWithPath,
+                "perl src\\tools\\msvc\\pgflex.pl $fileNameWithPath", $of)
+              . '</File>' . "\n";
+        }
+        elsif (defined($uniquefiles{$file}))
+        {
+
+            # File already exists, so fake a new name
+            my $obj = $dir;
+            $obj =~ s/\\/_/g;
+            print $f
+"><FileConfiguration Name=\"Debug|$self->{platform}\"><Tool Name=\"VCCLCompilerTool\" ObjectFile=\".\\debug\\$self->{name}\\$obj"
+              . "_$file.obj\" /></FileConfiguration><FileConfiguration Name=\"Release|$self->{platform}\"><Tool Name=\"VCCLCompilerTool\" ObjectFile=\".\\release\\$self->{name}\\$obj"
+              . "_$file.obj\" /></FileConfiguration></File>\n";
+        }
+        else
+        {
+            $uniquefiles{$file} = 1;
+            print $f " />\n";
+        }
+    }
+    while ($#dirstack >= 0)
+    {
+        print $f ' ' x $#dirstack . "  </Filter>\n";
+        pop @dirstack;
+    }
+    print $f <<EOF;
+ </Files>
+EOF
+}
+
+sub Footer
+{
+    my ($self, $f) = @_;
+
+    print $f <<EOF;
+ <Globals/>
+</VisualStudioProject>
+EOF
+}
+
+sub WriteConfiguration
+{
+    my ($self, $f, $cfgname, $p) = @_;
+    my $cfgtype = ($self->{type} eq "exe")?1:($self->{type} eq "dll"?2:4);
+    my $libs = $self->GetAdditionalLinkerDependencies($cfgname, ' ');
+
+    my $targetmachine = $self->{platform} eq 'Win32' ? 1 : 17;
+
+    print $f <<EOF;
+  <Configuration Name="$cfgname|$self->{platform}" OutputDirectory=".\\$cfgname\\$self->{name}" IntermediateDirectory=".\\$cfgname\\$self->{name}"
+	ConfigurationType="$cfgtype" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" CharacterSet="2" WholeProgramOptimization="$p->{wholeopt}">
+	<Tool Name="VCCLCompilerTool" Optimization="$p->{opt}"
+		AdditionalIncludeDirectories="$self->{prefixincludes}src/include;src/include/port/win32;src/include/port/win32_msvc;$self->{includes}"
+		PreprocessorDefinitions="WIN32;_WINDOWS;__WINDOWS__;__WIN32__;EXEC_BACKEND;WIN32_STACK_RLIMIT=4194304;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE$self->{defines}$p->{defs}"
+		StringPooling="$p->{strpool}"
+		RuntimeLibrary="$p->{runtime}" DisableSpecificWarnings="$self->{disablewarnings}"
+		AdditionalOptions="/MP"
+EOF
+    print $f <<EOF;
+		AssemblerOutput="0" AssemblerListingLocation=".\\$cfgname\\$self->{name}\\" ObjectFile=".\\$cfgname\\$self->{name}\\"
+		ProgramDataBaseFileName=".\\$cfgname\\$self->{name}\\" BrowseInformation="0"
+		WarningLevel="3" SuppressStartupBanner="TRUE" DebugInformationFormat="3" CompileAs="0"/>
+	<Tool Name="VCLinkerTool" OutputFile=".\\$cfgname\\$self->{name}\\$self->{name}.$self->{type}"
+		AdditionalDependencies="$libs"
+		LinkIncremental="0" SuppressStartupBanner="TRUE" AdditionalLibraryDirectories="" IgnoreDefaultLibraryNames="libc"
+		StackReserveSize="4194304" DisableSpecificWarnings="$self->{disablewarnings}"
+		GenerateDebugInformation="TRUE" ProgramDatabaseFile=".\\$cfgname\\$self->{name}\\$self->{name}.pdb"
+		GenerateMapFile="FALSE" MapFileName=".\\$cfgname\\$self->{name}\\$self->{name}.map"
+		SubSystem="1" TargetMachine="$targetmachine"
+EOF
+    if ($self->{disablelinkerwarnings})
+    {
+        print $f "\t\tAdditionalOptions=\"/ignore:$self->{disablelinkerwarnings}\"\n";
+    }
+    if ($self->{implib})
+    {
+        my $l = $self->{implib};
+        $l =~ s/__CFGNAME__/$cfgname/g;
+        print $f "\t\tImportLibrary=\"$l\"\n";
+    }
+    if ($self->{def})
+    {
+        my $d = $self->{def};
+        $d =~ s/__CFGNAME__/$cfgname/g;
+        print $f "\t\tModuleDefinitionFile=\"$d\"\n";
+    }
+
+    print $f "\t/>\n";
+    print $f
+"\t<Tool Name=\"VCLibrarianTool\" OutputFile=\".\\$cfgname\\$self->{name}\\$self->{name}.lib\" IgnoreDefaultLibraryNames=\"libc\" />\n";
+    print $f
+      "\t<Tool Name=\"VCResourceCompilerTool\" AdditionalIncludeDirectories=\"src\\include\" />\n";
+    if ($self->{builddef})
+    {
+        print $f
+"\t<Tool Name=\"VCPreLinkEventTool\" Description=\"Generate DEF file\" CommandLine=\"perl src\\tools\\msvc\\gendef.pl $cfgname\\$self->{name} $self->{platform}\" />\n";
+    }
+    print $f <<EOF;
+  </Configuration>
+EOF
+}
+
+sub WriteReferences
+{
+    my ($self, $f) = @_;
+    print $f " <References>\n";
+    foreach my $ref (@{$self->{references}})
+    {
+        print $f
+"  <ProjectReference ReferencedProjectIdentifier=\"$ref->{guid}\" Name=\"$ref->{name}\" />\n";
+    }
+    print $f " </References>\n";
+}
+
+sub GenerateCustomTool
+{
+    my ($self, $desc, $tool, $output, $cfg) = @_;
+    if (!defined($cfg))
+    {
+        return $self->GenerateCustomTool($desc, $tool, $output, 'Debug')
+          .$self->GenerateCustomTool($desc, $tool, $output, 'Release');
+    }
+    return
+"<FileConfiguration Name=\"$cfg|$self->{platform}\"><Tool Name=\"VCCustomBuildTool\" Description=\"$desc\" CommandLine=\"$tool\" AdditionalDependencies=\"\" Outputs=\"$output\" /></FileConfiguration>";
+}
+
+package VC2005Project;
+
+#
+# Package that encapsulates a Visual C++ 2005 project file
+#
+
+use strict;
+use warnings;
+use base qw(VCBuildProject);
+
+sub new
+{
+    my $classname = shift;
+    my $self = $classname->SUPER::_new(@_);
+    bless($self, $classname);
+
+    $self->{vcver} = '8.00';
+
+    return $self;
+}
+
+package VC2008Project;
+
+#
+# Package that encapsulates a Visual C++ 2008 project file
+#
+
+use strict;
+use warnings;
+use base qw(VCBuildProject);
+
+sub new
+{
+    my $classname = shift;
+    my $self = $classname->SUPER::_new(@_);
+    bless($self, $classname);
+
+    $self->{vcver} = '9.00';
+
+    return $self;
+}
+
+1;
diff --git a/src/tools/msvc/VSObjectFactory.pm b/src/tools/msvc/VSObjectFactory.pm
new file mode 100644
index 0000000..44db1f6
--- /dev/null
+++ b/src/tools/msvc/VSObjectFactory.pm
@@ -0,0 +1,122 @@
+package VSObjectFactory;
+
+#
+# Package that creates Visual Studio wrapper objects for msvc build
+#
+# src/tools/msvc/VSObjectFactory.pm
+#
+
+use Carp;
+use strict;
+use warnings;
+
+use Exporter;
+use Project;
+use Solution;
+use VCBuildProject;
+use MSBuildProject;
+
+our (@ISA, @EXPORT);
+@ISA = qw(Exporter);
+@EXPORT = qw(CreateSolution CreateProject DetermineVisualStudioVersion);
+
+sub CreateSolution
+{
+    my $visualStudioVersion = shift;
+
+    if (!defined($visualStudioVersion))
+    {
+        $visualStudioVersion = DetermineVisualStudioVersion();
+    }
+
+    if ($visualStudioVersion eq '8.00')
+    {
+        return new VS2005Solution(@_);
+    }
+    elsif ($visualStudioVersion eq '9.00')
+    {
+        return new VS2008Solution(@_);
+    }
+    elsif ($visualStudioVersion eq '10.00')
+    {
+        return new VS2010Solution(@_);
+    }
+    else
+    {
+        croak "The requested Visual Studio version is not supported.";
+    }
+}
+
+sub CreateProject
+{
+    my $visualStudioVersion = shift;
+
+    if (!defined($visualStudioVersion))
+    {
+        $visualStudioVersion = DetermineVisualStudioVersion();
+    }
+
+    if ($visualStudioVersion eq '8.00')
+    {
+        return new VC2005Project(@_);
+    }
+    elsif ($visualStudioVersion eq '9.00')
+    {
+        return new VC2008Project(@_);
+    }
+    elsif ($visualStudioVersion eq '10.00')
+    {
+        return new VC2010Project(@_);
+    }
+    else
+    {
+        croak "The requested Visual Studio version is not supported.";
+    }
+}
+
+sub DetermineVisualStudioVersion
+{
+    my $nmakeVersion = shift;
+
+    if (!defined($nmakeVersion))
+    {
+
+        # Determine version of nmake command, to set proper verison of visual studio
+        # we use nmake as it has existed for a long time and still exists in visual studio 2010
+        open(P,"nmake /? 2>&1 |")
+          || croak "Unable to determine Visual Studio version: The nmake command wasn't found.";
+        while(<P>)
+        {
+            chomp;
+            if (/(\d+)\.(\d+)\.\d+(\.\d+)?$/)
+            {
+                return _GetVisualStudioVersion($1, $2);
+            }
+        }
+        close(P);
+    }
+    elsif($nmakeVersion =~ /(\d+)\.(\d+)\.\d+(\.\d+)?$/)
+    {
+        return _GetVisualStudioVersion($1, $2);
+    }
+    croak "Unable to determine Visual Studio version: The nmake version could not be determined.";
+}
+
+sub _GetVisualStudioVersion
+{
+    my($major, $minor) = @_;
+    if ($major > 10)
+    {
+        carp
+"The determined version of Visual Studio is newer than the latest supported version. Returning the latest supported version instead.";
+        return '10.00';
+    }
+    elsif ($major < 6)
+    {
+        croak
+"Unable to determine Visual Studio version: Visual Studio versions before 6.0 aren't supported.";
+    }
+    return "$major.$minor";
+}
+
+1;
diff --git a/src/tools/msvc/build.pl b/src/tools/msvc/build.pl
index 66b5c4c..151849b 100644
--- a/src/tools/msvc/build.pl
+++ b/src/tools/msvc/build.pl
@@ -33,7 +33,7 @@ our $config;
 require "config_default.pl";
 require "config.pl" if (-f "src/tools/msvc/config.pl");
 
-Mkvcbuild::mkvcbuild($config);
+my $vcver = Mkvcbuild::mkvcbuild($config);
 
 # check what sort of build we are doing
 
@@ -50,7 +50,11 @@ elsif ($ARGV[0] ne "RELEASE")
 
 # ... and do it
 
-if ($buildwhat)
+if ($buildwhat and $vcver eq '10.00')
+{
+    system("msbuild $buildwhat.vcxproj /verbosity:detailed /p:Configuration=$bconf");
+}
+elsif ($buildwhat)
 {
     system("vcbuild $buildwhat.vcproj $bconf");
 }
diff --git a/src/tools/msvc/builddoc.pl b/src/tools/msvc/builddoc.pl
index 6531e98..8d8d8a3 100644
--- a/src/tools/msvc/builddoc.pl
+++ b/src/tools/msvc/builddoc.pl
@@ -69,8 +69,8 @@ $cmd =
   ."| findstr /V \"DTDDECL catalog entries are not supported\" ";
 system($cmd); # die "openjade" if $?;
 print "Running collateindex...\n";
-$cmd ="perl \"$docroot/$dsssl/bin/collateindex.pl\" -f -g -i bookindex "
-  ."-o bookindex.sgml HTML.index";
+$cmd =
+  "perl \"$docroot/$dsssl/bin/collateindex.pl\" -f -g -i bookindex "."-o bookindex.sgml HTML.index";
 system($cmd);
 die "collateindex" if $?;
 mkdir "html";
diff --git a/src/tools/msvc/clean.bat b/src/tools/msvc/clean.bat
index a59bbe5..ac31038 100755
--- a/src/tools/msvc/clean.bat
+++ b/src/tools/msvc/clean.bat
@@ -10,8 +10,12 @@ if exist ..\msvc if exist ..\..\..\src cd ..\..\..
 if exist debug rd /s /q debug
 if exist release rd /s /q release
 for %%f in (*.vcproj) do del %%f
+for %%f in (*.vcxproj) do del %%f
+for %%f in (*.vcxproj.user) do del %%f
 if exist pgsql.sln del /q pgsql.sln
 if exist pgsql.sln.cache del /q pgsql.sln.cache
+if exist pgsql.sdf del /q pgsql.sdf
+if exist pgsql.suo del /q /a:H pgsql.suo
 del /s /q src\bin\win32ver.rc 2> NUL
 del /s /q src\interfaces\win32ver.rc 2> NUL
 if exist src\backend\win32ver.rc del /q src\backend\win32ver.rc

commit f132824c24c46d2efab49b4cddd1088781bf499e
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Mon Jan 2 23:29:16 2012 +0200

    Another fix for pg_regress: Replace exit_nicely() with exit() plus
    atexit() hook

diff --git a/src/test/isolation/isolation_main.c b/src/test/isolation/isolation_main.c
index fab0a01..135bc51 100644
--- a/src/test/isolation/isolation_main.c
+++ b/src/test/isolation/isolation_main.c
@@ -69,7 +69,7 @@ isolation_start_test(const char *testname,
 	{
 		fprintf(stderr, _("could not start process for test %s\n"),
 				testname);
-		exit_nicely(2);
+		exit(2);
 	}
 
 	return pid;

commit bd09111f1f52e3739a24b88a4671f7a4b0ed4c1d
Author: Peter Eisentraut <peter_e@gmx.net>
Date:   Mon Jan 2 22:08:04 2012 +0200

    pg_regress: Replace exit_nicely() with exit() plus atexit() hook

diff --git a/src/interfaces/ecpg/test/pg_regress_ecpg.c b/src/interfaces/ecpg/test/pg_regress_ecpg.c
index 2690c57..2b09242 100644
--- a/src/interfaces/ecpg/test/pg_regress_ecpg.c
+++ b/src/interfaces/ecpg/test/pg_regress_ecpg.c
@@ -34,13 +34,13 @@ ecpg_filter(const char *sourcefile, const char *outfile)
 	if (!s)
 	{
 		fprintf(stderr, "Could not open file %s for reading\n", sourcefile);
-		exit_nicely(2);
+		exit(2);
 	}
 	t = fopen(outfile, "w");
 	if (!t)
 	{
 		fprintf(stderr, "Could not open file %s for writing\n", outfile);
-		exit_nicely(2);
+		exit(2);
 	}
 
 	while (fgets(linebuf, LINEBUFSIZE, s))
@@ -148,7 +148,7 @@ ecpg_start_test(const char *testname,
 	{
 		fprintf(stderr, _("could not start process for test %s\n"),
 				testname);
-		exit_nicely(2);
+		exit(2);
 	}
 
 	free(outfile_stdout);
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index cbf3b77..2f6b37b 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -300,7 +300,7 @@ stop_postmaster(void)
 		{
 			fprintf(stderr, _("\n%s: could not stop postmaster: exit code was %d\n"),
 					progname, r);
-			exit(2);			/* not exit_nicely(), that would be recursive */
+			_exit(2);			/* not exit(), that could be recursive */
 		}
 
 		postmaster_running = false;
@@ -308,17 +308,6 @@ stop_postmaster(void)
 }
 
 /*
- * Always exit through here, not through plain exit(), to ensure we make
- * an effort to shut down a temp postmaster
- */
-void
-exit_nicely(int code)
-{
-	stop_postmaster();
-	exit(code);
-}
-
-/*
  * Check whether string matches pattern
  *
  * In the original shell script, this function was implemented using expr(1),
@@ -444,7 +433,7 @@ convert_sourcefiles_in(char *source_subdir, char *dest_subdir, char *suffix)
 	names = pgfnames(indir);
 	if (!names)
 		/* Error logged in pgfnames */
-		exit_nicely(2);
+		exit(2);
 
 	snprintf(testtablespace, MAXPGPATH, "%s/testtablespace", outputdir);
 
@@ -493,14 +482,14 @@ convert_sourcefiles_in(char *source_subdir, char *dest_subdir, char *suffix)
 		{
 			fprintf(stderr, _("%s: could not open file \"%s\" for reading: %s\n"),
 					progname, srcfile, strerror(errno));
-			exit_nicely(2);
+			exit(2);
 		}
 		outfile = fopen(destfile, "w");
 		if (!outfile)
 		{
 			fprintf(stderr, _("%s: could not open file \"%s\" for writing: %s\n"),
 					progname, destfile, strerror(errno));
-			exit_nicely(2);
+			exit(2);
 		}
 		while (fgets(line, sizeof(line), infile))
 		{
@@ -523,7 +512,7 @@ convert_sourcefiles_in(char *source_subdir, char *dest_subdir, char *suffix)
 	{
 		fprintf(stderr, _("%s: no *.source files found in \"%s\"\n"),
 				progname, indir);
-		exit_nicely(2);
+		exit(2);
 	}
 
 	pgfnames_cleanup(names);
@@ -566,7 +555,7 @@ load_resultmap(void)
 			return;
 		fprintf(stderr, _("%s: could not open file \"%s\" for reading: %s\n"),
 				progname, buf, strerror(errno));
-		exit_nicely(2);
+		exit(2);
 	}
 
 	while (fgets(buf, sizeof(buf), f))
@@ -587,7 +576,7 @@ load_resultmap(void)
 		{
 			fprintf(stderr, _("incorrectly formatted resultmap entry: %s\n"),
 					buf);
-			exit_nicely(2);
+			exit(2);
 		}
 		*file_type++ = '\0';
 
@@ -596,7 +585,7 @@ load_resultmap(void)
 		{
 			fprintf(stderr, _("incorrectly formatted resultmap entry: %s\n"),
 					buf);
-			exit_nicely(2);
+			exit(2);
 		}
 		*platform++ = '\0';
 		expected = strchr(platform, '=');
@@ -604,7 +593,7 @@ load_resultmap(void)
 		{
 			fprintf(stderr, _("incorrectly formatted resultmap entry: %s\n"),
 					buf);
-			exit_nicely(2);
+			exit(2);
 		}
 		*expected++ = '\0';
 
@@ -916,7 +905,7 @@ psql_command(const char *database, const char *query,...)
 	{
 		/* psql probably already reported the error */
 		fprintf(stderr, _("command failed: %s\n"), psql_cmd);
-		exit_nicely(2);
+		exit(2);
 	}
 }
 
@@ -945,7 +934,7 @@ spawn_process(const char *cmdline)
 	{
 		fprintf(stderr, _("%s: could not fork: %s\n"),
 				progname, strerror(errno));
-		exit_nicely(2);
+		exit(2);
 	}
 	if (pid == 0)
 	{
@@ -962,7 +951,7 @@ spawn_process(const char *cmdline)
 		execl(shellprog, shellprog, "-c", cmdline2, (char *) NULL);
 		fprintf(stderr, _("%s: could not exec \"%s\": %s\n"),
 				progname, shellprog, strerror(errno));
-		exit(1);				/* not exit_nicely here... */
+		_exit(1);				/* not exit() here... */
 	}
 	/* in parent */
 	return pid;
@@ -993,7 +982,7 @@ spawn_process(const char *cmdline)
 			FreeLibrary(Advapi32Handle);
 		fprintf(stderr, _("%s: cannot create restricted tokens on this platform\n"),
 				progname);
-		exit_nicely(2);
+		exit(2);
 	}
 
 	/* Open the current token to use as base for the restricted one */
@@ -1001,7 +990,7 @@ spawn_process(const char *cmdline)
 	{
 		fprintf(stderr, _("could not open process token: error code %lu\n"),
 				GetLastError());
-		exit_nicely(2);
+		exit(2);
 	}
 
 	/* Allocate list of SIDs to remove */
@@ -1012,7 +1001,7 @@ spawn_process(const char *cmdline)
 								  SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0, 0, &dropSids[1].Sid))
 	{
 		fprintf(stderr, _("could not allocate SIDs: error code %lu\n"), GetLastError());
-		exit_nicely(2);
+		exit(2);
 	}
 
 	b = _CreateRestrictedToken(origToken,
@@ -1032,7 +1021,7 @@ spawn_process(const char *cmdline)
 	{
 		fprintf(stderr, _("could not create restricted token: error code %lu\n"),
 				GetLastError());
-		exit_nicely(2);
+		exit(2);
 	}
 
 	cmdline2 = malloc(strlen(cmdline) + 8);
@@ -1056,7 +1045,7 @@ spawn_process(const char *cmdline)
 	{
 		fprintf(stderr, _("could not start process for \"%s\": error code %lu\n"),
 				cmdline2, GetLastError());
-		exit_nicely(2);
+		exit(2);
 	}
 
 	free(cmdline2);
@@ -1144,7 +1133,7 @@ make_directory(const char *dir)
 	{
 		fprintf(stderr, _("%s: could not create directory \"%s\": %s\n"),
 				progname, dir, strerror(errno));
-		exit_nicely(2);
+		exit(2);
 	}
 }
 
@@ -1185,7 +1174,7 @@ run_diff(const char *cmd, const char *filename)
 	if (!WIFEXITED(r) || WEXITSTATUS(r) > 1)
 	{
 		fprintf(stderr, _("diff command failed with status %d: %s\n"), r, cmd);
-		exit_nicely(2);
+		exit(2);
 	}
 #ifdef WIN32
 
@@ -1196,7 +1185,7 @@ run_diff(const char *cmd, const char *filename)
 	if (WEXITSTATUS(r) == 1 && file_size(filename) <= 0)
 	{
 		fprintf(stderr, _("diff command not found: %s\n"), cmd);
-		exit_nicely(2);
+		exit(2);
 	}
 #endif
 
@@ -1371,7 +1360,7 @@ wait_for_tests(PID_TYPE * pids, int *statuses, char **names, int num_tests)
 		{
 			fprintf(stderr, _("failed to wait for subprocesses: %s\n"),
 					strerror(errno));
-			exit_nicely(2);
+			exit(2);
 		}
 #else
 		DWORD		exit_status;
@@ -1382,7 +1371,7 @@ wait_for_tests(PID_TYPE * pids, int *statuses, char **names, int num_tests)
 		{
 			fprintf(stderr, _("failed to wait for subprocesses: error code %lu\n"),
 					GetLastError());
-			exit_nicely(2);
+			exit(2);
 		}
 		p = active_pids[r - WAIT_OBJECT_0];
 		/* compact the active_pids array */
@@ -1468,7 +1457,7 @@ run_schedule(const char *schedule, test_function tfunc)
 	{
 		fprintf(stderr, _("%s: could not open file \"%s\" for reading: %s\n"),
 				progname, schedule, strerror(errno));
-		exit_nicely(2);
+		exit(2);
 	}
 
 	while (fgets(scbuf, sizeof(scbuf), scf))
@@ -1517,7 +1506,7 @@ run_schedule(const char *schedule, test_function tfunc)
 		{
 			fprintf(stderr, _("syntax error in schedule file \"%s\" line %d: %s\n"),
 					schedule, line_num, scbuf);
-			exit_nicely(2);
+			exit(2);
 		}
 
 		num_tests = 0;
@@ -1536,7 +1525,7 @@ run_schedule(const char *schedule, test_function tfunc)
 					/* can't print scbuf here, it's already been trashed */
 					fprintf(stderr, _("too many parallel tests in schedule file \"%s\", line %d\n"),
 							schedule, line_num);
-					exit_nicely(2);
+					exit(2);
 				}
 				tests[num_tests] = c;
 				num_tests++;
@@ -1548,7 +1537,7 @@ run_schedule(const char *schedule, test_function tfunc)
 		{
 			fprintf(stderr, _("syntax error in schedule file \"%s\" line %d: %s\n"),
 					schedule, line_num, scbuf);
-			exit_nicely(2);
+			exit(2);
 		}
 
 		if (num_tests == 1)
@@ -1744,7 +1733,7 @@ open_result_files(void)
 	{
 		fprintf(stderr, _("%s: could not open file \"%s\" for writing: %s\n"),
 				progname, logfilename, strerror(errno));
-		exit_nicely(2);
+		exit(2);
 	}
 
 	/* create the diffs file as empty */
@@ -1755,7 +1744,7 @@ open_result_files(void)
 	{
 		fprintf(stderr, _("%s: could not open file \"%s\" for writing: %s\n"),
 				progname, difffilename, strerror(errno));
-		exit_nicely(2);
+		exit(2);
 	}
 	/* we don't keep the diffs file open continuously */
 	fclose(difffile);
@@ -1853,7 +1842,7 @@ make_absolute_path(const char *in)
 			if (!getcwd(cwdbuf, sizeof(cwdbuf)))
 			{
 				fprintf(stderr, _("could not get current working directory: %s\n"), strerror(errno));
-				exit_nicely(2);
+				exit(2);
 			}
 		}
 
@@ -1952,6 +1941,8 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 	progname = get_progname(argv[0]);
 	set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_regress"));
 
+	atexit(stop_postmaster);
+
 #ifndef HAVE_UNIX_SOCKETS
 	/* no unix domain sockets available, so change default */
 	hostname = "localhost";
@@ -1969,10 +1960,10 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 		{
 			case 'h':
 				help();
-				exit_nicely(0);
+				exit(0);
 			case 'V':
 				puts("pg_regress (PostgreSQL) " PG_VERSION);
-				exit_nicely(0);
+				exit(0);
 			case 1:
 
 				/*
@@ -2052,7 +2043,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 				/* getopt_long already emitted a complaint */
 				fprintf(stderr, _("\nTry \"%s -h\" for more information.\n"),
 						progname);
-				exit_nicely(2);
+				exit(2);
 		}
 	}
 
@@ -2100,7 +2091,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 		if (!top_builddir)
 		{
 			fprintf(stderr, _("--top-builddir must be specified when using --temp-install\n"));
-			exit_nicely(2);
+			exit(2);
 		}
 
 		if (directory_exists(temp_install))
@@ -2132,7 +2123,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 		if (system(buf))
 		{
 			fprintf(stderr, _("\n%s: installation failed\nExamine %s/log/install.log for the reason.\nCommand was: %s\n"), progname, outputdir, buf);
-			exit_nicely(2);
+			exit(2);
 		}
 
 		for (sl = extra_install; sl != NULL; sl = sl->next)
@@ -2143,13 +2134,13 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 				   makeprog, top_builddir, sl->str, temp_install, outputdir);
 #else
 			fprintf(stderr, _("\n%s: --extra-install option not supported on this platform\n"), progname);
-			exit_nicely(2);
+			exit(2);
 #endif
 
 			if (system(buf))
 			{
 				fprintf(stderr, _("\n%s: installation failed\nExamine %s/log/install.log for the reason.\nCommand was: %s\n"), progname, outputdir, buf);
-				exit_nicely(2);
+				exit(2);
 			}
 		}
 
@@ -2164,7 +2155,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 		if (system(buf))
 		{
 			fprintf(stderr, _("\n%s: initdb failed\nExamine %s/log/initdb.log for the reason.\nCommand was: %s\n"), progname, outputdir, buf);
-			exit_nicely(2);
+			exit(2);
 		}
 
 		/*
@@ -2180,7 +2171,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 		if (pg_conf == NULL)
 		{
 			fprintf(stderr, _("\n%s: could not open \"%s\" for adding extra config: %s\n"), progname, buf, strerror(errno));
-			exit_nicely(2);
+			exit(2);
 		}
 		fputs("\n# Configuration added by pg_regress\n\n", pg_conf);
 		fputs("max_prepared_transactions = 2\n", pg_conf);
@@ -2194,7 +2185,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 			if (extra_conf == NULL)
 			{
 				fprintf(stderr, _("\n%s: could not open \"%s\" to read extra config: %s\n"), progname, temp_config, strerror(errno));
-				exit_nicely(2);
+				exit(2);
 			}
 			while (fgets(line_buf, sizeof(line_buf), extra_conf) != NULL)
 				fputs(line_buf, pg_conf);
@@ -2222,7 +2213,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 					if (!port_specified_by_user)
 						fprintf(stderr, _("%s: could not determine an available port\n"), progname);
 					fprintf(stderr, _("Specify an unused port using the --port option or shut down any conflicting PostgreSQL servers.\n"));
-					exit_nicely(2);
+					exit(2);
 				}
 
 				fprintf(stderr, _("port %d apparently in use, trying %d\n"), port, port + 1);
@@ -2249,7 +2240,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 		{
 			fprintf(stderr, _("\n%s: could not spawn postmaster: %s\n"),
 					progname, strerror(errno));
-			exit_nicely(2);
+			exit(2);
 		}
 
 		/*
@@ -2273,7 +2264,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 #endif
 			{
 				fprintf(stderr, _("\n%s: postmaster failed\nExamine %s/log/postmaster.log for the reason\n"), progname, outputdir);
-				exit_nicely(2);
+				exit(2);
 			}
 
 			pg_usleep(1000000L);
@@ -2299,7 +2290,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 						progname, GetLastError());
 #endif
 
-			exit_nicely(2);
+			exit(2);
 		}
 
 		postmaster_running = true;
@@ -2414,7 +2405,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 	}
 
 	if (fail_count != 0)
-		exit_nicely(1);
+		exit(1);
 
 	return 0;
 }
diff --git a/src/test/regress/pg_regress.h b/src/test/regress/pg_regress.h
index 8023193..8e096f2 100644
--- a/src/test/regress/pg_regress.h
+++ b/src/test/regress/pg_regress.h
@@ -56,6 +56,5 @@ int regression_main(int argc, char *argv[],
 				init_function ifunc, test_function tfunc);
 void		add_stringlist_item(_stringlist ** listhead, const char *str);
 PID_TYPE	spawn_process(const char *cmdline);
-void		exit_nicely(int code);
 void		replace_string(char *string, char *replace, char *replacement);
 bool		file_exists(const char *file);
diff --git a/src/test/regress/pg_regress_main.c b/src/test/regress/pg_regress_main.c
index 87309ab..f8e1921 100644
--- a/src/test/regress/pg_regress_main.c
+++ b/src/test/regress/pg_regress_main.c
@@ -77,7 +77,7 @@ psql_start_test(const char *testname,
 	{
 		fprintf(stderr, _("could not start process for test %s\n"),
 				testname);
-		exit_nicely(2);
+		exit(2);
 	}
 
 	return pid;

commit ac7a5a3f25708c03242edc301ad008236fc36c7e
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Mon Jan 2 14:43:45 2012 -0500

    Fix coerce_to_target_type for coerce_type's klugy handling of COLLATE.
    
    Because coerce_type recurses into the argument of a CollateExpr,
    coerce_to_target_type's longstanding code for detecting whether coerce_type
    had actually done anything (to wit, returned a different node than it
    passed in) was broken in 9.1.  This resulted in unexpected failures in
    hide_coercion_node; which was not the latter's fault, since it's critical
    that we never call it on anything that wasn't inserted by coerce_type.
    (Else we might decide to "hide" a user-written function call.)
    
    Fix by removing and replacing the CollateExpr in coerce_to_target_type
    itself.  This is all pretty ugly but I don't immediately see a way to make
    it nicer.
    
    Per report from Jean-Yves F. Barbier.

diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index e727837..6661a3d 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -79,10 +79,24 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
 					  int location)
 {
 	Node	   *result;
+	Node	   *origexpr;
 
 	if (!can_coerce_type(1, &exprtype, &targettype, ccontext))
 		return NULL;
 
+	/*
+	 * If the input has a CollateExpr at the top, strip it off, perform the
+	 * coercion, and put a new one back on.  This is annoying since it
+	 * duplicates logic in coerce_type, but if we don't do this then it's too
+	 * hard to tell whether coerce_type actually changed anything, and we
+	 * *must* know that to avoid possibly calling hide_coercion_node on
+	 * something that wasn't generated by coerce_type.  Note that if there are
+	 * multiple stacked CollateExprs, we just discard all but the topmost.
+	 */
+	origexpr = expr;
+	while (expr && IsA(expr, CollateExpr))
+		expr = (Node *) ((CollateExpr *) expr)->arg;
+
 	result = coerce_type(pstate, expr, exprtype,
 						 targettype, targettypmod,
 						 ccontext, cformat, location);
@@ -98,6 +112,18 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
 								(cformat != COERCE_IMPLICIT_CAST),
 								(result != expr && !IsA(result, Const)));
 
+	if (expr != origexpr)
+	{
+		/* Reinstall top CollateExpr */
+		CollateExpr *coll = (CollateExpr *) origexpr;
+		CollateExpr *newcoll = makeNode(CollateExpr);
+
+		newcoll->arg = (Expr *) result;
+		newcoll->collOid = coll->collOid;
+		newcoll->location = coll->location;
+		result = (Node *) newcoll;
+	}
+
 	return result;
 }
 
@@ -318,7 +344,7 @@ coerce_type(ParseState *pstate, Node *node,
 		 * If we have a COLLATE clause, we have to push the coercion
 		 * underneath the COLLATE.	This is really ugly, but there is little
 		 * choice because the above hacks on Consts and Params wouldn't happen
-		 * otherwise.
+		 * otherwise.  This kluge has consequences in coerce_to_target_type.
 		 */
 		CollateExpr *coll = (CollateExpr *) node;
 		CollateExpr *newcoll = makeNode(CollateExpr);
diff --git a/src/test/regress/expected/collate.out b/src/test/regress/expected/collate.out
index dc17fea..a15e691 100644
--- a/src/test/regress/expected/collate.out
+++ b/src/test/regress/expected/collate.out
@@ -574,6 +574,9 @@ ALTER TABLE collate_test22 ADD FOREIGN KEY (f2) REFERENCES collate_test20;
 RESET enable_seqscan;
 RESET enable_hashjoin;
 RESET enable_nestloop;
+-- 9.1 bug with useless COLLATE in an expression subject to length coercion
+CREATE TEMP TABLE vctable (f1 varchar(25));
+INSERT INTO vctable VALUES ('foo' COLLATE "C");
 --
 -- Clean up.  Many of these table names will be re-used if the user is
 -- trying to run any platform-specific collation tests later, so we
diff --git a/src/test/regress/sql/collate.sql b/src/test/regress/sql/collate.sql
index 52d830d..f72f3ed 100644
--- a/src/test/regress/sql/collate.sql
+++ b/src/test/regress/sql/collate.sql
@@ -214,6 +214,11 @@ RESET enable_seqscan;
 RESET enable_hashjoin;
 RESET enable_nestloop;
 
+-- 9.1 bug with useless COLLATE in an expression subject to length coercion
+
+CREATE TEMP TABLE vctable (f1 varchar(25));
+INSERT INTO vctable VALUES ('foo' COLLATE "C");
+
 --
 -- Clean up.  Many of these table names will be re-used if the user is
 -- trying to run any platform-specific collation tests later, so we

commit a8ab8d0eaa96dbaebb646971f8988a3edc28e52c
Author: Bruce Momjian <bruce@momjian.us>
Date:   Mon Jan 2 08:48:59 2012 -0500

    Add comment about skipping binary files for copyright changes.

diff --git a/src/tools/copyright.pl b/src/tools/copyright.pl
index 739f182..db5a2f0 100755
--- a/src/tools/copyright.pl
+++ b/src/tools/copyright.pl
@@ -31,6 +31,8 @@ sub wanted {
     }
 
     return if ! -f $File::Find::name || -l $File::Find::name;
+    # skip file names with binary extensions
+    # How are these updated?  bjm 2012-01-02
     return if ($_ =~ m/\.(ico|bin)$);
 
     my @lines;

commit 631beeac3598a73dee2c2afa38fa2e734148031b
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Mon Jan 2 00:01:33 2012 -0500

    Use LWSYNC in place of SYNC/ISYNC in PPC spinlocks, where possible.
    
    This is allegedly a win, at least on some PPC implementations, according
    to the PPC ISA documents.  However, as with LWARX hints, some PPC
    platforms give an illegal-instruction failure.  Use the same trick as
    before of assuming that PPC64 platforms will accept it; we might need to
    refine that based on experience, but there are other projects doing
    likewise according to google.
    
    I did not add an assembler compatibility test because LWSYNC has been
    around much longer than hint bits, and it seems unlikely that any
    toolchains currently in use don't recognize it.

diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index 810be27..ac45ee6 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -189,6 +189,17 @@
 #endif
 
 /*
+ * On PPC machines, decide whether to use LWSYNC instructions in place of
+ * ISYNC and SYNC.  This provides slightly better performance, but will
+ * result in illegal-instruction failures on some pre-POWER4 machines.
+ * By default we use LWSYNC when building for 64-bit PPC, which should be
+ * safe in nearly all cases.
+ */
+#if defined(__ppc64__) || defined(__powerpc64__)
+#define USE_PPC_LWSYNC
+#endif
+
+/*
  *------------------------------------------------------------------------
  * The following symbols are for enabling debugging code, not for
  * controlling user-visible features or resource limits.
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index 2e5163e..ff7eb14 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -361,6 +361,7 @@ typedef unsigned int slock_t;
 /*
  * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
  * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
+ * On newer machines, we can use lwsync instead for better performance.
  */
 static __inline__ int
 tas(volatile slock_t *lock)
@@ -382,7 +383,11 @@ tas(volatile slock_t *lock)
 "1:	li      %1,1		\n"
 "	b		3f			\n"
 "2:						\n"
+#ifdef USE_PPC_LWSYNC
+"	lwsync				\n"
+#else
 "	isync				\n"
+#endif
 "	li      %1,0		\n"
 "3:						\n"
 
@@ -392,13 +397,25 @@ tas(volatile slock_t *lock)
 	return _res;
 }
 
-/* PowerPC S_UNLOCK is almost standard but requires a "sync" instruction */
+/*
+ * PowerPC S_UNLOCK is almost standard but requires a "sync" instruction.
+ * On newer machines, we can use lwsync instead for better performance.
+ */
+#ifdef USE_PPC_LWSYNC
+#define S_UNLOCK(lock)	\
+do \
+{ \
+	__asm__ __volatile__ ("	lwsync \n"); \
+	*((volatile slock_t *) (lock)) = 0; \
+} while (0)
+#else
 #define S_UNLOCK(lock)	\
 do \
 { \
 	__asm__ __volatile__ ("	sync \n"); \
 	*((volatile slock_t *) (lock)) = 0; \
 } while (0)
+#endif /* USE_PPC_LWSYNC */
 
 #endif /* powerpc */
 

commit 8496c6cd77e2f5f105fc47315680174157d66647
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Sun Jan 1 22:44:01 2012 -0500

    Use 4-byte slock_t on both PPC and PPC64.
    
    Previously we defined slock_t as 8 bytes on PPC64, but the TAS assembly
    code uses word-wide operations regardless, so that the second word was
    just wasted space.  There doesn't appear to be any performance benefit
    in adding the second word, so get rid of it to simplify the code.

diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index cc67be8..2e5163e 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -354,13 +354,10 @@ tas(volatile slock_t *lock)
 #if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
 #define HAS_TEST_AND_SET
 
-#if defined(__ppc64__) || defined(__powerpc64__)
-typedef unsigned long slock_t;
-#else
 typedef unsigned int slock_t;
-#endif
 
 #define TAS(lock) tas(lock)
+
 /*
  * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
  * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.

commit 5cfa8dd3007d7e953c6a03b0fa2215d97c581b0c
Author: Tom Lane <tgl@sss.pgh.pa.us>
Date:   Sun Jan 1 22:39:59 2012 -0500

    Use mutex hint bit in PPC LWARX instructions, where possible.
    
    The hint bit makes for a small but measurable performance improvement
    in access to contended spinlocks.
    
    On the other hand, some PPC chips give an illegal-instruction failure.
    There doesn't seem to be a completely bulletproof way to tell whether the
    hint bit will cause an illegal-instruction failure other than by trying
    it; but most if not all 64-bit PPC machines should accept it, so follow
    the Linux kernel's lead and assume it's okay to use it in 64-bit builds.
    Of course we must also check whether the assembler accepts the command,
    since even with a recent CPU the toolchain could be old.
    
    Patch by Manabu Ori, significantly modified by me.

diff --git a/configure b/configure
index baa1b97..5cb3d9b 100755
--- a/configure
+++ b/configure
@@ -1635,7 +1635,7 @@ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
 This configure script is free software; the Free Software Foundation
 gives unlimited permission to copy, distribute and modify it.
 
-Copyright (c) 1996-2011, PostgreSQL Global Development Group
+Copyright (c) 1996-2012, PostgreSQL Global Development Group
 _ACEOF
   exit
 fi
@@ -18207,6 +18207,66 @@ rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
       conftest$ac_exeext conftest.$ac_ext
 fi
 
+# On PPC, check if assembler supports LWARX instruction's mutex hint bit
+case $host_cpu in
+  ppc*|powerpc*)
+    { $as_echo "$as_me:$LINENO: checking whether assembler supports lwarx hint bit" >&5
+$as_echo_n "checking whether assembler supports lwarx hint bit... " >&6; }
+    cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+
+int
+main ()
+{
+int a = 0; int *p = &a; int r;
+	 __asm__ __volatile__ (" lwarx %0,0,%1,1\n" : "=&r"(r) : "r"(p));
+  ;
+  return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+  (eval "$ac_compile") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext; then
+  pgac_cv_have_ppc_mutex_hint=yes
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	pgac_cv_have_ppc_mutex_hint=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+    { $as_echo "$as_me:$LINENO: result: $pgac_cv_have_ppc_mutex_hint" >&5
+$as_echo "$pgac_cv_have_ppc_mutex_hint" >&6; }
+    if test x"$pgac_cv_have_ppc_mutex_hint" = xyes ; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_PPC_LWARX_MUTEX_HINT 1
+_ACEOF
+
+    fi
+  ;;
+esac
+
 # Check largefile support.  You might think this is a system service not a
 # compiler characteristic, but you'd be wrong.  We must check this before
 # probing existence of related functions such as fseeko, since the largefile
diff --git a/configure.in b/configure.in
index 48db5c3..3f195a6 100644
--- a/configure.in
+++ b/configure.in
@@ -1170,11 +1170,27 @@ if test "$with_krb5" = yes; then
   AC_MSG_CHECKING(for krb5_free_unparsed_name)
   AC_TRY_LINK([#include <krb5.h>],
               [krb5_free_unparsed_name(NULL,NULL);],
-              [AC_DEFINE(HAVE_KRB5_FREE_UNPARSED_NAME, 1, [Define to 1 if you have krb5_free_unparsed_name])
+              [AC_DEFINE(HAVE_KRB5_FREE_UNPARSED_NAME, 1, [Define to 1 if you have krb5_free_unparsed_name.])
 AC_MSG_RESULT(yes)],
               [AC_MSG_RESULT(no)])
 fi
 
+# On PPC, check if assembler supports LWARX instruction's mutex hint bit
+case $host_cpu in
+  ppc*|powerpc*)
+    AC_MSG_CHECKING([whether assembler supports lwarx hint bit])
+    AC_TRY_COMPILE([],
+	[int a = 0; int *p = &a; int r;
+	 __asm__ __volatile__ (" lwarx %0,0,%1,1\n" : "=&r"(r) : "r"(p));],
+	[pgac_cv_have_ppc_mutex_hint=yes],
+	[pgac_cv_have_ppc_mutex_hint=no])
+    AC_MSG_RESULT([$pgac_cv_have_ppc_mutex_hint])
+    if test x"$pgac_cv_have_ppc_mutex_hint" = xyes ; then
+	AC_DEFINE(HAVE_PPC_LWARX_MUTEX_HINT, 1, [Define to 1 if the assembler supports PPC's LWARX mutex hint bit.])
+    fi
+  ;;
+esac
+
 # Check largefile support.  You might think this is a system service not a
 # compiler characteristic, but you'd be wrong.  We must check this before
 # probing existence of related functions such as fseeko, since the largefile
diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in
index ef467b7..e1b7fea 100644
--- a/src/include/pg_config.h.in
+++ b/src/include/pg_config.h.in
@@ -275,7 +275,7 @@
 /* Define to 1 if `text.data' is member of `krb5_error'. */
 #undef HAVE_KRB5_ERROR_TEXT_DATA
 
-/* Define to 1 if you have krb5_free_unparsed_name */
+/* Define to 1 if you have krb5_free_unparsed_name. */
 #undef HAVE_KRB5_FREE_UNPARSED_NAME
 
 /* Define to 1 if `client' is member of `krb5_ticket'. */
@@ -384,6 +384,9 @@
 /* Define to 1 if you have the POSIX signal interface. */
 #undef HAVE_POSIX_SIGNALS
 
+/* Define to 1 if the assembler supports PPC's LWARX mutex hint bit. */
+#undef HAVE_PPC_LWARX_MUTEX_HINT
+
 /* Define to 1 if you have the `pstat' function. */
 #undef HAVE_PSTAT
 
diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index ac434fa..810be27 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -6,6 +6,9 @@
  * for developers.	If you edit any of these, be sure to do a *full*
  * rebuild (and an initdb if noted).
  *
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
  * src/include/pg_config_manual.h
  *------------------------------------------------------------------------
  */
@@ -171,6 +174,21 @@
 #endif
 
 /*
+ * On PPC machines, decide whether to use the mutex hint bit in LWARX
+ * instructions.  Setting the hint bit will slightly improve spinlock
+ * performance on POWER6 and later machines, but does nothing before that,
+ * and will result in illegal-instruction failures on some pre-POWER4
+ * machines.  By default we use the hint bit when building for 64-bit PPC,
+ * which should be safe in nearly all cases.  You might want to override
+ * this if you are building 32-bit code for a known-recent PPC machine.
+ */
+#ifdef HAVE_PPC_LWARX_MUTEX_HINT /* must have assembler support in any case */
+#if defined(__ppc64__) || defined(__powerpc64__)
+#define USE_PPC_LWARX_MUTEX_HINT
+#endif
+#endif
+
+/*
  *------------------------------------------------------------------------
  * The following symbols are for enabling debugging code, not for
  * controlling user-visible features or resource limits.
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index 98c12db..cc67be8 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -372,7 +372,11 @@ tas(volatile slock_t *lock)
 	int _res;
 
 	__asm__ __volatile__(
+#ifdef USE_PPC_LWARX_MUTEX_HINT
+"	lwarx   %0,0,%3,1	\n"
+#else
 "	lwarx   %0,0,%3		\n"
+#endif
 "	cmpwi   %0,0		\n"
 "	bne     1f			\n"
 "	addi    %0,%0,1		\n"

commit 6b6137e4efebcd767a349099b3e048fbc7755cca
Author: Bruce Momjian <bruce@momjian.us>
Date:   Sun Jan 1 22:27:27 2012 -0500

    Update copyright git skip comment.

diff --git a/src/tools/copyright.pl b/src/tools/copyright.pl
index d52a67e..739f182 100755
--- a/src/tools/copyright.pl
+++ b/src/tools/copyright.pl
@@ -23,7 +23,7 @@ print "Using current year:  $year\n";
 find({wanted => \&wanted, no_chdir => 1}, '.');
 
 sub wanted {
-    # prevent corruption of git indexes, ./.git
+    # prevent corruption of git indexes by ignoring any .git/
     if ($_ eq '.git')
     {
         $File::Find::prune = 1;

commit bed762c81e6a7f62695d6c8acb78f15c8e85342e
Author: Bruce Momjian <bruce@momjian.us>
Date:   Sun Jan 1 19:47:24 2012 -0500

    Skip any .git directory for copyright changes, not just top-level .git
    directories.  Per suggestion from Andrew Dunstan.

diff --git a/src/tools/copyright.pl b/src/tools/copyright.pl
index 79458b5..d52a67e 100755
--- a/src/tools/copyright.pl
+++ b/src/tools/copyright.pl
@@ -24,7 +24,7 @@ find({wanted => \&wanted, no_chdir => 1}, '.');
 
 sub wanted {
     # prevent corruption of git indexes, ./.git
-    if ($File::Find::name =~ m{^\./\.git$})
+    if ($_ eq '.git')
     {
         $File::Find::prune = 1;
         return;

commit b5eb06a22a5a811cbdd065ac52580bbcc59469a0
Author: Bruce Momjian <bruce@momjian.us>
Date:   Sun Jan 1 19:42:07 2012 -0500

    Revert binary change in copyright year adjustment.

diff --git a/src/bin/pgevent/MSG00001.bin b/src/bin/pgevent/MSG00001.bin
index 05c452b..6ac08e5 100644
Binary files a/src/bin/pgevent/MSG00001.bin and b/src/bin/pgevent/MSG00001.bin differ

commit 1358801b7a7afed840b780d7c52e20921a483ed1
Author: Bruce Momjian <bruce@momjian.us>
Date:   Sun Jan 1 19:40:13 2012 -0500

    Revert binary change in copyright year adjustment.

diff --git a/src/port/win32.ico b/src/port/win32.ico
index f948f99..a58ee43 100644
Binary files a/src/port/win32.ico and b/src/port/win32.ico differ

commit f4cd747a4d760600ec9791a95957e993c320ba63
Author: Bruce Momjian <bruce@momjian.us>
Date:   Sun Jan 1 19:36:10 2012 -0500

    Skip 'ico' and 'bin' extensions in copyright changes.

diff --git a/src/tools/copyright.pl b/src/tools/copyright.pl
index 705edaf..79458b5 100755
--- a/src/tools/copyright.pl
+++ b/src/tools/copyright.pl
@@ -31,6 +31,7 @@ sub wanted {
     }
 
     return if ! -f $File::Find::name || -l $File::Find::name;
+    return if ($_ =~ m/\.(ico|bin)$);
 
     my @lines;
     tie @lines, "Tie::File", $File::Find::name;

commit e126958c2efdfc2d60b978d1fc7a780ff647e8ad
Author: Bruce Momjian <bruce@momjian.us>
Date:   Sun Jan 1 18:01:58 2012 -0500

    Update copyright notices for year 2012.

diff --git a/COPYRIGHT b/COPYRIGHT
index 4babbf3..d5512d1 100644
--- a/COPYRIGHT
+++ b/COPYRIGHT
@@ -1,7 +1,7 @@
 PostgreSQL Database Management System
 (formerly known as Postgres, then as Postgres95)
 
-Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
 
 Portions Copyright (c) 1994, The Regents of the University of California
 
diff --git a/configure b/configure
index bfc7a1f..baa1b97 100755
--- a/configure
+++ b/configure
@@ -9,7 +9,7 @@
 # This configure script is free software; the Free Software Foundation
 # gives unlimited permission to copy, distribute and modify it.
 #
-# Copyright (c) 1996-2011, PostgreSQL Global Development Group
+# Copyright (c) 1996-2012, PostgreSQL Global Development Group
 ## --------------------- ##
 ## M4sh Initialization.  ##
 ## --------------------- ##
diff --git a/configure.in b/configure.in
index 54ca820..48db5c3 100644
--- a/configure.in
+++ b/configure.in
@@ -23,7 +23,7 @@ m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.63], [], [m4_fatal([Autoconf version 2.6
 Untested combinations of 'autoconf' and PostgreSQL versions are not
 recommended.  You can remove the check from 'configure.in' but it is then
 your responsibility whether the result works or not.])])
-AC_COPYRIGHT([Copyright (c) 1996-2011, PostgreSQL Global Development Group])
+AC_COPYRIGHT([Copyright (c) 1996-2012, PostgreSQL Global Development Group])
 AC_CONFIG_SRCDIR([src/backend/access/common/heaptuple.c])
 AC_CONFIG_AUX_DIR(config)
 AC_PREFIX_DEFAULT(/usr/local/pgsql)
diff --git a/contrib/adminpack/adminpack.c b/contrib/adminpack/adminpack.c
index 99fa02e..431a675 100644
--- a/contrib/adminpack/adminpack.c
+++ b/contrib/adminpack/adminpack.c
@@ -3,7 +3,7 @@
  * adminpack.c
  *
  *
- * Copyright (c) 2002-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2002-2012, PostgreSQL Global Development Group
  *
  * Author: Andreas Pflug <pgadmin@pse-consulting.de>
  *
diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c
index b320698..61da6a2 100644
--- a/contrib/auto_explain/auto_explain.c
+++ b/contrib/auto_explain/auto_explain.c
@@ -3,7 +3,7 @@
  * auto_explain.c
  *
  *
- * Copyright (c) 2008-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2008-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/auto_explain/auto_explain.c
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 62c810a..36a8e3e 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -9,7 +9,7 @@
  * Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
  *
  * contrib/dblink/dblink.c
- * Copyright (c) 2001-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2012, PostgreSQL Global Development Group
  * ALL RIGHTS RESERVED;
  *
  * Permission to use, copy, modify, and distribute this software and its
diff --git a/contrib/dblink/dblink.h b/contrib/dblink/dblink.h
index 40de83f..935d283 100644
--- a/contrib/dblink/dblink.h
+++ b/contrib/dblink/dblink.h
@@ -9,7 +9,7 @@
  * Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
  *
  * contrib/dblink/dblink.h
- * Copyright (c) 2001-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2012, PostgreSQL Global Development Group
  * ALL RIGHTS RESERVED;
  *
  * Permission to use, copy, modify, and distribute this software and its
diff --git a/contrib/dict_int/dict_int.c b/contrib/dict_int/dict_int.c
index 6896454..4beaf92 100644
--- a/contrib/dict_int/dict_int.c
+++ b/contrib/dict_int/dict_int.c
@@ -3,7 +3,7 @@
  * dict_int.c
  *	  Text search dictionary for integers
  *
- * Copyright (c) 2007-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/dict_int/dict_int.c
diff --git a/contrib/dict_xsyn/dict_xsyn.c b/contrib/dict_xsyn/dict_xsyn.c
index d47315b..59f977e 100644
--- a/contrib/dict_xsyn/dict_xsyn.c
+++ b/contrib/dict_xsyn/dict_xsyn.c
@@ -3,7 +3,7 @@
  * dict_xsyn.c
  *	  Extended synonym dictionary
  *
- * Copyright (c) 2007-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/dict_xsyn/dict_xsyn.c
diff --git a/contrib/dummy_seclabel/dummy_seclabel.c b/contrib/dummy_seclabel/dummy_seclabel.c
index 2979fec..75e4f8c 100644
--- a/contrib/dummy_seclabel/dummy_seclabel.c
+++ b/contrib/dummy_seclabel/dummy_seclabel.c
@@ -7,7 +7,7 @@
  * perspective, but allows regression testing independent of platform-specific
  * features like SELinux.
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  */
 #include "postgres.h"
diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c
index 1cf3b3c..46394a8 100644
--- a/contrib/file_fdw/file_fdw.c
+++ b/contrib/file_fdw/file_fdw.c
@@ -3,7 +3,7 @@
  * file_fdw.c
  *		  foreign-data wrapper for server-side flat files.
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *		  contrib/file_fdw/file_fdw.c
diff --git a/contrib/fuzzystrmatch/fuzzystrmatch.c b/contrib/fuzzystrmatch/fuzzystrmatch.c
index 98b95bf..b9c2165 100644
--- a/contrib/fuzzystrmatch/fuzzystrmatch.c
+++ b/contrib/fuzzystrmatch/fuzzystrmatch.c
@@ -6,7 +6,7 @@
  * Joe Conway <mail@joeconway.com>
  *
  * contrib/fuzzystrmatch/fuzzystrmatch.c
- * Copyright (c) 2001-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2012, PostgreSQL Global Development Group
  * ALL RIGHTS RESERVED;
  *
  * metaphone()
diff --git a/contrib/fuzzystrmatch/levenshtein.c b/contrib/fuzzystrmatch/levenshtein.c
index a84c46a..40035ca 100644
--- a/contrib/fuzzystrmatch/levenshtein.c
+++ b/contrib/fuzzystrmatch/levenshtein.c
@@ -5,7 +5,7 @@
  *
  * Joe Conway <mail@joeconway.com>
  *
- * Copyright (c) 2001-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2012, PostgreSQL Global Development Group
  * ALL RIGHTS RESERVED;
  *
  * levenshtein()
diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c
index ac5f21c..181bbd4 100644
--- a/contrib/isn/isn.c
+++ b/contrib/isn/isn.c
@@ -4,7 +4,7 @@
  *	  PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
  *
  * Author:	German Mendez Bravo (Kronuz)
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/isn/isn.c
diff --git a/contrib/isn/isn.h b/contrib/isn/isn.h
index 3f478b5..7a4b3ca 100644
--- a/contrib/isn/isn.h
+++ b/contrib/isn/isn.h
@@ -4,7 +4,7 @@
  *	  PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
  *
  * Author:	German Mendez Bravo (Kronuz)
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/isn/isn.h
diff --git a/contrib/pageinspect/fsmfuncs.c b/contrib/pageinspect/fsmfuncs.c
index 0d6bc14..c96d4be 100644
--- a/contrib/pageinspect/fsmfuncs.c
+++ b/contrib/pageinspect/fsmfuncs.c
@@ -9,7 +9,7 @@
  * there's hardly any use case for using these without superuser-rights
  * anyway.
  *
- * Copyright (c) 2007-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/pageinspect/fsmfuncs.c
diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c
index fa50655..260ccff 100644
--- a/contrib/pageinspect/heapfuncs.c
+++ b/contrib/pageinspect/heapfuncs.c
@@ -15,7 +15,7 @@
  * there's hardly any use case for using these without superuser-rights
  * anyway.
  *
- * Copyright (c) 2007-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/pageinspect/heapfuncs.c
diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c
index 362ad84..f51a4e3 100644
--- a/contrib/pageinspect/rawpage.c
+++ b/contrib/pageinspect/rawpage.c
@@ -5,7 +5,7 @@
  *
  * Access-method specific inspection functions are in separate files.
  *
- * Copyright (c) 2007-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/pageinspect/rawpage.c
diff --git a/contrib/passwordcheck/passwordcheck.c b/contrib/passwordcheck/passwordcheck.c
index 711ec9f..8d6dea2 100644
--- a/contrib/passwordcheck/passwordcheck.c
+++ b/contrib/passwordcheck/passwordcheck.c
@@ -3,7 +3,7 @@
  * passwordcheck.c
  *
  *
- * Copyright (c) 2009-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2009-2012, PostgreSQL Global Development Group
  *
  * Author: Laurenz Albe <laurenz.albe@wien.gv.at>
  *
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 8dc3054..434aa71 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -11,7 +11,7 @@
  * disappear!) and also take the entry's mutex spinlock.
  *
  *
- * Copyright (c) 2008-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2008-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/pg_stat_statements/pg_stat_statements.c
diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c
index 7185f13..8594d26 100644
--- a/contrib/pg_upgrade/check.c
+++ b/contrib/pg_upgrade/check.c
@@ -3,7 +3,7 @@
  *
  *	server checks and output routines
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/check.c
  */
 
diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c
index 1120d79..8560d88 100644
--- a/contrib/pg_upgrade/controldata.c
+++ b/contrib/pg_upgrade/controldata.c
@@ -3,7 +3,7 @@
  *
  *	controldata functions
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/controldata.c
  */
 
diff --git a/contrib/pg_upgrade/dump.c b/contrib/pg_upgrade/dump.c
index 2af50bc..772ca37 100644
--- a/contrib/pg_upgrade/dump.c
+++ b/contrib/pg_upgrade/dump.c
@@ -3,7 +3,7 @@
  *
  *	dump functions
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/dump.c
  */
 
diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c
index ff9b317..b870ded 100644
--- a/contrib/pg_upgrade/exec.c
+++ b/contrib/pg_upgrade/exec.c
@@ -3,7 +3,7 @@
  *
  *	execution functions
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/exec.c
  */
 
diff --git a/contrib/pg_upgrade/file.c b/contrib/pg_upgrade/file.c
index 0ea269f..fcf1c44 100644
--- a/contrib/pg_upgrade/file.c
+++ b/contrib/pg_upgrade/file.c
@@ -3,7 +3,7 @@
  *
  *	file system operations
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/file.c
  */
 
diff --git a/contrib/pg_upgrade/function.c b/contrib/pg_upgrade/function.c
index b154f03..54f139a 100644
--- a/contrib/pg_upgrade/function.c
+++ b/contrib/pg_upgrade/function.c
@@ -3,7 +3,7 @@
  *
  *	server-side function support
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/function.c
  */
 
diff --git a/contrib/pg_upgrade/info.c b/contrib/pg_upgrade/info.c
index c33e13a..e8361ce 100644
--- a/contrib/pg_upgrade/info.c
+++ b/contrib/pg_upgrade/info.c
@@ -3,7 +3,7 @@
  *
  *	information support functions
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/info.c
  */
 
diff --git a/contrib/pg_upgrade/option.c b/contrib/pg_upgrade/option.c
index 026f71e..82c88af 100644
--- a/contrib/pg_upgrade/option.c
+++ b/contrib/pg_upgrade/option.c
@@ -3,7 +3,7 @@
  *
  *	options functions
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/option.c
  */
 
diff --git a/contrib/pg_upgrade/page.c b/contrib/pg_upgrade/page.c
index c8f5774..a790f4e 100644
--- a/contrib/pg_upgrade/page.c
+++ b/contrib/pg_upgrade/page.c
@@ -3,7 +3,7 @@
  *
  *	per-page conversion operations
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/page.c
  */
 
diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c
index 12df463..15b30fc 100644
--- a/contrib/pg_upgrade/pg_upgrade.c
+++ b/contrib/pg_upgrade/pg_upgrade.c
@@ -3,7 +3,7 @@
  *
  *	main source file
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/pg_upgrade.c
  */
 
diff --git a/contrib/pg_upgrade/pg_upgrade.h b/contrib/pg_upgrade/pg_upgrade.h
index 7d48e9c..58d5201 100644
--- a/contrib/pg_upgrade/pg_upgrade.h
+++ b/contrib/pg_upgrade/pg_upgrade.h
@@ -1,7 +1,7 @@
 /*
  *	pg_upgrade.h
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/pg_upgrade.h
  */
 
diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c
index 74d1621..54ee5f0 100644
--- a/contrib/pg_upgrade/relfilenode.c
+++ b/contrib/pg_upgrade/relfilenode.c
@@ -3,7 +3,7 @@
  *
  *	relfilenode functions
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/relfilenode.c
  */
 
diff --git a/contrib/pg_upgrade/server.c b/contrib/pg_upgrade/server.c
index e45f72b..989af63 100644
--- a/contrib/pg_upgrade/server.c
+++ b/contrib/pg_upgrade/server.c
@@ -3,7 +3,7 @@
  *
  *	database server functions
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/server.c
  */
 
diff --git a/contrib/pg_upgrade/tablespace.c b/contrib/pg_upgrade/tablespace.c
index df2285f..11fd9d0 100644
--- a/contrib/pg_upgrade/tablespace.c
+++ b/contrib/pg_upgrade/tablespace.c
@@ -3,7 +3,7 @@
  *
  *	tablespace functions
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/tablespace.c
  */
 
diff --git a/contrib/pg_upgrade/test.sh b/contrib/pg_upgrade/test.sh
index 9aebee9..299b7a5 100644
--- a/contrib/pg_upgrade/test.sh
+++ b/contrib/pg_upgrade/test.sh
@@ -6,7 +6,7 @@
 # runs the regression tests (to put in some data), runs pg_dumpall,
 # runs pg_upgrade, runs pg_dumpall again, compares the dumps.
 #
-# Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+# Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
 # Portions Copyright (c) 1994, Regents of the University of California
 
 set -e
diff --git a/contrib/pg_upgrade/util.c b/contrib/pg_upgrade/util.c
index b7136be..94eaa18 100644
--- a/contrib/pg_upgrade/util.c
+++ b/contrib/pg_upgrade/util.c
@@ -3,7 +3,7 @@
  *
  *	utility functions
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/util.c
  */
 
diff --git a/contrib/pg_upgrade/version.c b/contrib/pg_upgrade/version.c
index 2d6778f..e8799a4 100644
--- a/contrib/pg_upgrade/version.c
+++ b/contrib/pg_upgrade/version.c
@@ -3,7 +3,7 @@
  *
  *	Postgres-version-specific routines
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/version.c
  */
 
diff --git a/contrib/pg_upgrade/version_old_8_3.c b/contrib/pg_upgrade/version_old_8_3.c
index 625f4ad..a864107 100644
--- a/contrib/pg_upgrade/version_old_8_3.c
+++ b/contrib/pg_upgrade/version_old_8_3.c
@@ -3,7 +3,7 @@
  *
  *	Postgres-version-specific routines
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade/version_old_8_3.c
  */
 
diff --git a/contrib/pg_upgrade_support/pg_upgrade_support.c b/contrib/pg_upgrade_support/pg_upgrade_support.c
index 47d6580..472f152 100644
--- a/contrib/pg_upgrade_support/pg_upgrade_support.c
+++ b/contrib/pg_upgrade_support/pg_upgrade_support.c
@@ -5,7 +5,7 @@
  *	to control oid and relfilenode assignment, and do other special
  *	hacks needed for pg_upgrade.
  *
- *	Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ *	Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *	contrib/pg_upgrade_support/pg_upgrade_support.c
  */
 
diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c
index 91d86da..9081f09 100644
--- a/contrib/pgbench/pgbench.c
+++ b/contrib/pgbench/pgbench.c
@@ -5,7 +5,7 @@
  * Originally written by Tatsuo Ishii and enhanced by many contributors.
  *
  * contrib/pgbench/pgbench.c
- * Copyright (c) 2000-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2000-2012, PostgreSQL Global Development Group
  * ALL RIGHTS RESERVED;
  *
  * Permission to use, copy, modify, and distribute this software and its
diff --git a/contrib/sepgsql/database.c b/contrib/sepgsql/database.c
index 3faef63..be3a7be 100644
--- a/contrib/sepgsql/database.c
+++ b/contrib/sepgsql/database.c
@@ -4,7 +4,7 @@
  *
  * Routines corresponding to database objects
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/sepgsql/dml.c b/contrib/sepgsql/dml.c
index 3199337..17aa41c 100644
--- a/contrib/sepgsql/dml.c
+++ b/contrib/sepgsql/dml.c
@@ -4,7 +4,7 @@
  *
  * Routines to handle DML permission checks
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c
index 8232971..47437ba 100644
--- a/contrib/sepgsql/hooks.c
+++ b/contrib/sepgsql/hooks.c
@@ -4,7 +4,7 @@
  *
  * Entrypoints of the hooks in PostgreSQL, and dispatches the callbacks.
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/sepgsql/label.c b/contrib/sepgsql/label.c
index a2bf571..2ab7a6f 100644
--- a/contrib/sepgsql/label.c
+++ b/contrib/sepgsql/label.c
@@ -4,7 +4,7 @@
  *
  * Routines to support SELinux labels (security context)
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/sepgsql/launcher b/contrib/sepgsql/launcher
index 9e5ecdc..038ef71 100755
--- a/contrib/sepgsql/launcher
+++ b/contrib/sepgsql/launcher
@@ -2,7 +2,7 @@
 #
 # A wrapper script to launch psql command in regression test
 #
-# Copyright (c) 2010-2011, PostgreSQL Global Development Group
+# Copyright (c) 2010-2012, PostgreSQL Global Development Group
 #
 # -------------------------------------------------------------------------
 
diff --git a/contrib/sepgsql/proc.c b/contrib/sepgsql/proc.c
index 14231c4..b902797 100644
--- a/contrib/sepgsql/proc.c
+++ b/contrib/sepgsql/proc.c
@@ -4,7 +4,7 @@
  *
  * Routines corresponding to procedure objects
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/sepgsql/relation.c b/contrib/sepgsql/relation.c
index b4abc8e..efce914 100644
--- a/contrib/sepgsql/relation.c
+++ b/contrib/sepgsql/relation.c
@@ -4,7 +4,7 @@
  *
  * Routines corresponding to relation/attribute objects
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/sepgsql/schema.c b/contrib/sepgsql/schema.c
index c8bb8c9..90dca1d 100644
--- a/contrib/sepgsql/schema.c
+++ b/contrib/sepgsql/schema.c
@@ -4,7 +4,7 @@
  *
  * Routines corresponding to schema objects
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/sepgsql/selinux.c b/contrib/sepgsql/selinux.c
index d693d63..8819b8c 100644
--- a/contrib/sepgsql/selinux.c
+++ b/contrib/sepgsql/selinux.c
@@ -5,7 +5,7 @@
  * Interactions between userspace and selinux in kernelspace,
  * using libselinux api.
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/sepgsql/sepgsql.h b/contrib/sepgsql/sepgsql.h
index 33b219f..c93da7a 100644
--- a/contrib/sepgsql/sepgsql.h
+++ b/contrib/sepgsql/sepgsql.h
@@ -4,7 +4,7 @@
  *
  * Definitions corresponding to SE-PostgreSQL
  *
- * Copyright (c) 2010-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/sepgsql/uavc.c b/contrib/sepgsql/uavc.c
index affe0a2..905f87d 100644
--- a/contrib/sepgsql/uavc.c
+++ b/contrib/sepgsql/uavc.c
@@ -6,7 +6,7 @@
  * access control decisions recently used, and reduce number of kernel
  * invocations to avoid unnecessary performance hit.
  *
- * Copyright (c) 2011, PostgreSQL Global Development Group
+ * Copyright (c) 2011-2012, PostgreSQL Global Development Group
  *
  * -------------------------------------------------------------------------
  */
diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c
index 4fd68cd..65a47011 100644
--- a/contrib/tablefunc/tablefunc.c
+++ b/contrib/tablefunc/tablefunc.c
@@ -10,7 +10,7 @@
  * And contributors:
  * Nabil Sayegh <postgresql@e-trolley.de>
  *
- * Copyright (c) 2002-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2002-2012, PostgreSQL Global Development Group
  *
  * Permission to use, copy, modify, and distribute this software and its
  * documentation for any purpose, without fee, and without a written agreement
diff --git a/contrib/tablefunc/tablefunc.h b/contrib/tablefunc/tablefunc.h
index a0e03e2..d9670f5 100644
--- a/contrib/tablefunc/tablefunc.h
+++ b/contrib/tablefunc/tablefunc.h
@@ -10,7 +10,7 @@
  * And contributors:
  * Nabil Sayegh <postgresql@e-trolley.de>
  *
- * Copyright (c) 2002-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2002-2012, PostgreSQL Global Development Group
  *
  * Permission to use, copy, modify, and distribute this software and its
  * documentation for any purpose, without fee, and without a written agreement
diff --git a/contrib/test_parser/test_parser.c b/contrib/test_parser/test_parser.c
index 8e4c7a3..c27d7d3 100644
--- a/contrib/test_parser/test_parser.c
+++ b/contrib/test_parser/test_parser.c
@@ -3,7 +3,7 @@
  * test_parser.c
  *	  Simple example of a text search parser
  *
- * Copyright (c) 2007-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/test_parser/test_parser.c
diff --git a/contrib/tsearch2/tsearch2.c b/contrib/tsearch2/tsearch2.c
index e4a2ac6..968bd80 100644
--- a/contrib/tsearch2/tsearch2.c
+++ b/contrib/tsearch2/tsearch2.c
@@ -3,7 +3,7 @@
  * tsearch2.c
  *		Backwards-compatibility package for old contrib/tsearch2 API
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  *
  *
  * IDENTIFICATION
diff --git a/contrib/unaccent/unaccent.c b/contrib/unaccent/unaccent.c
index d22f5c7..a39285e 100644
--- a/contrib/unaccent/unaccent.c
+++ b/contrib/unaccent/unaccent.c
@@ -3,7 +3,7 @@
  * unaccent.c
  *	  Text search unaccent dictionary
  *
- * Copyright (c) 2009-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2009-2012, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
  *	  contrib/unaccent/unaccent.c
diff --git a/contrib/uuid-ossp/uuid-ossp.c b/contrib/uuid-ossp/uuid-ossp.c
index 2be278d..d4fc62b 100644
--- a/contrib/uuid-ossp/uuid-ossp.c
+++ b/contrib/uuid-ossp/uuid-ossp.c
@@ -2,7 +2,7 @@
  *
  * UUID generation functions using the OSSP UUID library
  *
- * Copyright (c) 2007-2011, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2012, PostgreSQL Global Development Group
  *
  * contrib/uuid-ossp/uuid-ossp.c
  *
diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c
index 21b6dbf..974172e 100644
--- a/contrib/vacuumlo/vacuumlo.c
+++ b/contrib/vacuumlo/vacuumlo.c
@@ -3,7 +3,7 @@
  * vacuumlo.c
  *	  This removes orphaned large objects from a database.
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/doc/src/sgml/generate-errcodes-table.pl b/doc/src/sgml/generate-errcodes-table.pl
index 0ac020e..e294574 100644
--- a/doc/src/sgml/generate-errcodes-table.pl
+++ b/doc/src/sgml/generate-errcodes-table.pl
@@ -1,7 +1,7 @@
 #!/usr/bin/perl
 #
 # Generate the errcodes-table.sgml file from errcodes.txt
-# Copyright (c) 2000-2011, PostgreSQL Global Development Group
+# Copyright (c) 2000-2012, PostgreSQL Global Development Group
 
 use warnings;
 use strict;
diff --git a/doc/src/sgml/legal.sgml b/doc/src/sgml/legal.sgml
index 4531686..ccf50d6 100644
--- a/doc/src/sgml/legal.sgml
+++ b/doc/src/sgml/legal.sgml
@@ -1,7 +1,7 @@
 <!-- doc/src/sgml/legal.sgml -->
 
 <copyright>
- <year>1996-2011</year>
+ <year>1996-2012</year>
  <holder>The PostgreSQL Global Development Group</holder>
 </copyright>
 
@@ -9,7 +9,7 @@
  <title>Legal Notice</title>
 
  <para>
-  <productname>PostgreSQL</productname> is Copyright &copy; 1996-2011
+  <productname>PostgreSQL</productname> is Copyright &copy; 1996-2012
   by the PostgreSQL Global Development Group and is distributed under
   the terms of the license of the University of California below.
  </para>
diff --git a/src/backend/Makefile b/src/backend/Makefile
index ec82d8d..0c763dd 100644
--- a/src/backend/Makefile
+++ b/src/backend/Makefile
@@ -2,7 +2,7 @@
 #
 # Makefile for the postgres backend
 #
-# Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+# Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
 # Portions Copyright (c) 1994, Regents of the University of California
 #
 # src/backend/Makefile
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 36b3af8..08d2b21 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -45,7 +45,7 @@
  * and we'd like to still refer to them via C struct offsets.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 85c4319..76c76e9 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -4,7 +4,7 @@
  *	   This file contains index tuple accessor and mutator routines,
  *	   as well as various tuple utilities.
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index afce6d5..4616106 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -5,7 +5,7 @@
  *	  clients and standalone backends are supported here).
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 7220e0e..09a7b6f 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -3,7 +3,7 @@
  * reloptions.c
  *	  Core support for relation options (pg_class.reloptions)
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/src/backend/access/common/scankey.c b/src/backend/access/common/scankey.c
index c879b8a..37d77ed 100644
--- a/src/backend/access/common/scankey.c
+++ b/src/backend/access/common/scankey.c
@@ -3,7 +3,7 @@
  * scankey.c
  *	  scan key support code
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c
index 34e5f11..f5a43d4 100644
--- a/src/backend/access/common/tupconvert.c
+++ b/src/backend/access/common/tupconvert.c
@@ -9,7 +9,7 @@
  * executor's "junkfilter" routines, but these functions work on bare
  * HeapTuples rather than TupleTableSlots.
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index ee40486..1f40b7c 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -3,7 +3,7 @@
  * tupdesc.c
  *	  POSTGRES tuple descriptor support code
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index 2de5860..f1f9343 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -4,7 +4,7 @@
  *	  support functions for GIN's indexing of any array
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index eefbd10..b160551 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -4,7 +4,7 @@
  *	  page utilities routines for the postgres inverted index access method.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index 9e5bab1..ddae862 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -4,7 +4,7 @@
  *	  routines for fast build of inverted index
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 7906098..1360146 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -4,7 +4,7 @@
  *	  page utilities routines for the postgres inverted index access method.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index d4de91f..a9482da 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -4,7 +4,7 @@
  *	  page utilities routines for the postgres inverted index access method.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 8ae09c3..d201c68 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -7,7 +7,7 @@
  *	  transfer pending entries into the regular index structure.  This
  *	  wins because bulk insertion is much more efficient than retail.
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index a6504c9..022bd27 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -4,7 +4,7 @@
  *	  fetch tuples from a GIN scan.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index bea5f68..fe06bdc 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -4,7 +4,7 @@
  *	  insert routines for the postgres inverted index access method.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index 73f72f2..3cd9683 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -4,7 +4,7 @@
  *	  routines to manage scans of inverted index relations
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index f18b369..e47abee 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -4,7 +4,7 @@
  *	  utilities routines for the postgres inverted index access method.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index e5d61b5..5418fa0 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -4,7 +4,7 @@
  *	  delete & vacuum routines for the postgres GIN
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index e67a0aa..94051aa 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -4,7 +4,7 @@
  *	  WAL replay logic for inverted index.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 0ce56b8..ae6309d 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -4,7 +4,7 @@
  *	  interface routines for the postgres GiST index access method.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c
index be1b202..021a8dc 100644
--- a/src/backend/access/gist/gistbuild.c
+++ b/src/backend/access/gist/gistbuild.c
@@ -4,7 +4,7 @@
  *	  build algorithm for GiST indexes implementation.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 1c11fb3..2a5f7b34 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -4,7 +4,7 @@
  *	  node buffer management functions for GiST buffering build algorithm.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index b565d09..73551ec 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -4,7 +4,7 @@
  *	  fetch tuples from a GiST scan.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c
index 2b68e21..7220b39 100644
--- a/src/backend/access/gist/gistproc.c
+++ b/src/backend/access/gist/gistproc.c
@@ -6,7 +6,7 @@
  * This gives R-tree behavior, with Guttman's poly-time split algorithm.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index e6140a1..bf139de 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -4,7 +4,7 @@
  *	  routines to manage scans on GiST index relations
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c
index bd846ce..33b071e 100644
--- a/src/backend/access/gist/gistsplit.c
+++ b/src/backend/access/gist/gistsplit.c
@@ -4,7 +4,7 @@
  *	  Split page algorithm
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 8e57d90..ad27b63 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -4,7 +4,7 @@
  *	  utilities routines for the postgres GiST index access method.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index d650645..2cd4efa 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -4,7 +4,7 @@
  *	  vacuuming routines for the postgres GiST index access method.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index 8c32646..76029d9 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -4,7 +4,7 @@
  *	  WAL replay logic for GiST.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 770b3ef..8802669 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -3,7 +3,7 @@
  * hash.c
  *	  Implementation of Margo Seltzer's Hashing package for postgres.
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 897bf9c..0e4cf8e 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -3,7 +3,7 @@
  * hashfunc.c
  *	  Support functions for hash access method.
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 54158f4..66084f4 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -3,7 +3,7 @@
  * hashinsert.c
  *	  Item insertion in hash tables for Postgres.
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index a2a5bb7..130c296 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -3,7 +3,7 @@
  * hashovfl.c
  *	  Overflow page management code for the Po