diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index 9b30629..f8ab57b 100644
*** a/src/bin/pg_dump/dumputils.c
--- b/src/bin/pg_dump/dumputils.c
***************
*** 16,21 ****
--- 16,22 ----
  
  #include <ctype.h>
  
+ #include "dumpmem.h"
  #include "dumputils.h"
  #include "pg_backup.h"
  
*************** static struct
*** 39,44 ****
--- 40,46 ----
  } on_exit_nicely_list[MAX_ON_EXIT_NICELY];
  
  static int on_exit_nicely_index;
+ void (*on_exit_msg_func)(const char *modulename, const char *fmt, va_list ap) = vwrite_msg;
  
  #define supports_grant_options(version) ((version) >= 70400)
  
*************** static bool parseAclItem(const char *ite
*** 49,54 ****
--- 51,57 ----
  static char *copyAclUserName(PQExpBuffer output, char *input);
  static void AddAcl(PQExpBuffer aclbuf, const char *keyword,
  	   const char *subname);
+ static PQExpBuffer getThreadLocalPQExpBuffer(void);
  
  #ifdef WIN32
  static bool parallel_init_done = false;
*************** init_parallel_dump_utils(void)
*** 70,84 ****
  }
  
  /*
!  *	Quotes input string if it's not a legitimate SQL identifier as-is.
!  *
!  *	Note that the returned string must be used before calling fmtId again,
!  *	since we re-use the same return buffer each time.  Non-reentrant but
!  *	reduces memory leakage. (On Windows the memory leakage will be one buffer
!  *	per thread, which is at least better than one per call).
   */
! const char *
! fmtId(const char *rawid)
  {
  	/*
  	 * The Tls code goes awry if we use a static var, so we provide for both
--- 73,83 ----
  }
  
  /*
!  * Non-reentrant but reduces memory leakage. (On Windows the memory leakage
!  * will be one buffer per thread, which is at least better than one per call).
   */
! static PQExpBuffer
! getThreadLocalPQExpBuffer(void)
  {
  	/*
  	 * The Tls code goes awry if we use a static var, so we provide for both
*************** fmtId(const char *rawid)
*** 87,95 ****
  	static PQExpBuffer s_id_return = NULL;
  	PQExpBuffer id_return;
  
- 	const char *cp;
- 	bool		need_quotes = false;
- 
  #ifdef WIN32
  	if (parallel_init_done)
  		id_return = (PQExpBuffer) TlsGetValue(tls_index);		/* 0 when not set */
--- 86,91 ----
*************** fmtId(const char *rawid)
*** 119,124 ****
--- 115,137 ----
  
  	}
  
+ 	return id_return;
+ }
+ 
+ /*
+  *	Quotes input string if it's not a legitimate SQL identifier as-is.
+  *
+  *	Note that the returned string must be used before calling fmtId again,
+  *	since we re-use the same return buffer each time.
+  */
+ const char *
+ fmtId(const char *rawid)
+ {
+ 	PQExpBuffer id_return = getThreadLocalPQExpBuffer();
+ 
+ 	const char *cp;
+ 	bool		need_quotes = false;
+ 
  	/*
  	 * These checks need to match the identifier production in scan.l. Don't
  	 * use islower() etc.
*************** fmtId(const char *rawid)
*** 186,191 ****
--- 199,233 ----
  	return id_return->data;
  }
  
+ /*
+  * fmtQualifiedId - convert a qualified name to the proper format for
+  * the source database.
+  *
+  * Like fmtId, use the result before calling again.
+  *
+  * Since we call fmtId and it also uses getThreadLocalPQExpBuffer() we cannot
+  * use it until we're finished with calling fmtId().
+  */
+ const char *
+ fmtQualifiedId(int remoteVersion, const char *schema, const char *id)
+ {
+ 	PQExpBuffer id_return;
+ 	PQExpBuffer lcl_pqexp = createPQExpBuffer();
+ 
+ 	/* Suppress schema name if fetching from pre-7.3 DB */
+ 	if (remoteVersion >= 70300 && schema && *schema)
+ 	{
+ 		appendPQExpBuffer(lcl_pqexp, "%s.", fmtId(schema));
+ 	}
+ 	appendPQExpBuffer(lcl_pqexp, "%s", fmtId(id));
+ 
+ 	id_return = getThreadLocalPQExpBuffer();
+ 
+ 	appendPQExpBuffer(id_return, "%s", lcl_pqexp->data);
+ 	destroyPQExpBuffer(lcl_pqexp);
+ 
+ 	return id_return->data;
+ }
  
  /*
   * Convert a string value to an SQL string literal and append it to
*************** exit_horribly(const char *modulename, co
*** 1273,1279 ****
  	va_list		ap;
  
  	va_start(ap, fmt);
! 	vwrite_msg(modulename, fmt, ap);
  	va_end(ap);
  
  	exit_nicely(1);
--- 1315,1321 ----
  	va_list		ap;
  
  	va_start(ap, fmt);
! 	on_exit_msg_func(modulename, fmt, ap);
  	va_end(ap);
  
  	exit_nicely(1);
*************** on_exit_nicely(on_exit_nicely_callback f
*** 1319,1331 ****
  	on_exit_nicely_index++;
  }
  
! /* Run accumulated on_exit_nicely callbacks and then exit quietly. */
  void
  exit_nicely(int code)
  {
! 	while (--on_exit_nicely_index >= 0)
! 		(*on_exit_nicely_list[on_exit_nicely_index].function)(code,
! 			on_exit_nicely_list[on_exit_nicely_index].arg);
  #ifdef WIN32
  	if (parallel_init_done && GetCurrentThreadId() != mainThreadId)
  		ExitThread(code);
--- 1361,1374 ----
  	on_exit_nicely_index++;
  }
  
! /* Run accumulated on_exit_nicely callbacks in reverse order and then exit quietly. */
  void
  exit_nicely(int code)
  {
! 	int i;
! 	for (i = on_exit_nicely_index - 1; i >= 0; i--)
! 		(*on_exit_nicely_list[i].function)(code,
! 			on_exit_nicely_list[i].arg);
  #ifdef WIN32
  	if (parallel_init_done && GetCurrentThreadId() != mainThreadId)
  		ExitThread(code);
diff --git a/src/bin/pg_dump/dumputils.h b/src/bin/pg_dump/dumputils.h
index 82cf940..9c374c6 100644
*** a/src/bin/pg_dump/dumputils.h
--- b/src/bin/pg_dump/dumputils.h
*************** extern const char *progname;
*** 24,29 ****
--- 24,31 ----
  
  extern void init_parallel_dump_utils(void);
  extern const char *fmtId(const char *identifier);
+ extern const char *fmtQualifiedId(int remoteVersion,
+ 								  const char *schema, const char *id);
  extern void appendStringLiteral(PQExpBuffer buf, const char *str,
  					int encoding, bool std_strings);
  extern void appendStringLiteralConn(PQExpBuffer buf, const char *str,
*************** extern void exit_horribly(const char *mo
*** 60,65 ****
--- 62,69 ----
  				__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3), noreturn));
  extern void set_section (const char *arg, int *dumpSections);
  
+ extern void (*on_exit_msg_func)(const char *modulename, const char *fmt, va_list ap)
+ 				__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)));
  typedef void (*on_exit_nicely_callback) (int code, void *arg);
  extern void on_exit_nicely(on_exit_nicely_callback function, void *arg);
  extern void exit_nicely(int code) __attribute__((noreturn));
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 61c6863..22c19fe 100644
*** a/src/bin/pg_dump/pg_backup.h
--- b/src/bin/pg_dump/pg_backup.h
*************** struct Archive
*** 89,94 ****
--- 89,96 ----
  	int			minRemoteVersion;		/* allowable range */
  	int			maxRemoteVersion;
  
+ 	int			numWorkers;		/* number of parallel processes */
+ 
  	/* info needed for string escaping */
  	int			encoding;		/* libpq code for client_encoding */
  	bool		std_strings;	/* standard_conforming_strings */
*************** typedef struct _restoreOptions
*** 149,155 ****
  	int			suppressDumpWarnings;	/* Suppress output of WARNING entries
  										 * to stderr */
  	bool		single_txn;
- 	int			number_of_jobs;
  
  	bool	   *idWanted;		/* array showing which dump IDs to emit */
  } RestoreOptions;
--- 151,156 ----
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index e292659..6e21c09 100644
*** a/src/bin/pg_dump/pg_backup_archiver.c
--- b/src/bin/pg_dump/pg_backup_archiver.c
*************** static teReqs _tocEntryRequired(TocEntry
*** 141,147 ****
  static bool _tocEntryIsACL(TocEntry *te);
  static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
  static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
- static TocEntry *getTocEntryByDumpId(ArchiveHandle *AH, DumpId id);
  static void _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te);
  static int	_discoverArchiveFormat(ArchiveHandle *AH);
  
--- 141,146 ----
*************** static void RestoreOutput(ArchiveHandle
*** 154,160 ****
  
  static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
  				  RestoreOptions *ropt, bool is_parallel);
! static void restore_toc_entries_parallel(ArchiveHandle *AH);
  static thandle spawn_restore(RestoreArgs *args);
  static thandle reap_child(ParallelSlot *slots, int n_slots, int *work_status);
  static bool work_in_progress(ParallelSlot *slots, int n_slots);
--- 153,161 ----
  
  static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
  				  RestoreOptions *ropt, bool is_parallel);
! static void restore_toc_entries_prefork(ArchiveHandle *AH);
! static void restore_toc_entries_parallel(ArchiveHandle *AH, TocEntry *pending_list);
! static void restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list);
  static thandle spawn_restore(RestoreArgs *args);
  static thandle reap_child(ParallelSlot *slots, int n_slots, int *work_status);
  static bool work_in_progress(ParallelSlot *slots, int n_slots);
*************** static void reduce_dependencies(ArchiveH
*** 178,185 ****
  					TocEntry *ready_list);
  static void mark_create_done(ArchiveHandle *AH, TocEntry *te);
  static void inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te);
- static ArchiveHandle *CloneArchive(ArchiveHandle *AH);
- static void DeCloneArchive(ArchiveHandle *AH);
  
  static void setProcessIdentifier(ParallelStateEntry *pse, ArchiveHandle *AH);
  static void unsetProcessIdentifier(ParallelStateEntry *pse);
--- 179,184 ----
*************** RestoreArchive(Archive *AHX, RestoreOpti
*** 272,278 ****
  	/*
  	 * If we're going to do parallel restore, there are some restrictions.
  	 */
! 	parallel_mode = (ropt->number_of_jobs > 1 && ropt->useDB);
  	if (parallel_mode)
  	{
  		/* We haven't got round to making this work for all archive formats */
--- 271,277 ----
  	/*
  	 * If we're going to do parallel restore, there are some restrictions.
  	 */
! 	parallel_mode = (AH->public.numWorkers > 1 && ropt->useDB);
  	if (parallel_mode)
  	{
  		/* We haven't got round to making this work for all archive formats */
*************** RestoreArchive(Archive *AHX, RestoreOpti
*** 438,444 ****
  	 * In parallel mode, turn control over to the parallel-restore logic.
  	 */
  	if (parallel_mode)
! 		restore_toc_entries_parallel(AH);
  	else
  	{
  		for (te = AH->toc->next; te != AH->toc; te = te->next)
--- 437,458 ----
  	 * In parallel mode, turn control over to the parallel-restore logic.
  	 */
  	if (parallel_mode)
! 	{
! 		TocEntry pending_list;
! 
! 		par_list_header_init(&pending_list);
! 
! 		/* This runs PRE_DATA items and then disconnects from the database */
! 		restore_toc_entries_prefork(AH);
! 		Assert(AH->connection == NULL);
! 
! 		/* This will actually fork the processes */
! 		restore_toc_entries_parallel(AH, &pending_list);
! 
! 		/* reconnect the master and see if we missed something */
! 		restore_toc_entries_postfork(AH, &pending_list);
! 		Assert(AH->connection != NULL);
! 	}
  	else
  	{
  		for (te = AH->toc->next; te != AH->toc; te = te->next)
*************** _moveBefore(ArchiveHandle *AH, TocEntry
*** 1524,1530 ****
  	pos->prev = te;
  }
  
! static TocEntry *
  getTocEntryByDumpId(ArchiveHandle *AH, DumpId id)
  {
  	TocEntry   *te;
--- 1538,1544 ----
  	pos->prev = te;
  }
  
! TocEntry *
  getTocEntryByDumpId(ArchiveHandle *AH, DumpId id)
  {
  	TocEntry   *te;
*************** on_exit_close_archive(Archive *AHX)
*** 3337,3378 ****
  	on_exit_nicely(archive_close_connection, &shutdown_info);
  }
  
- /*
-  * Main engine for parallel restore.
-  *
-  * Work is done in three phases.
-  * First we process all SECTION_PRE_DATA tocEntries, in a single connection,
-  * just as for a standard restore.	Second we process the remaining non-ACL
-  * steps in parallel worker children (threads on Windows, processes on Unix),
-  * each of which connects separately to the database.  Finally we process all
-  * the ACL entries in a single connection (that happens back in
-  * RestoreArchive).
-  */
  static void
! restore_toc_entries_parallel(ArchiveHandle *AH)
  {
  	RestoreOptions *ropt = AH->ropt;
- 	int			n_slots = ropt->number_of_jobs;
- 	ParallelSlot *slots;
- 	int			work_status;
- 	int			next_slot;
  	bool		skipped_some;
- 	TocEntry	pending_list;
- 	TocEntry	ready_list;
  	TocEntry   *next_work_item;
- 	thandle		ret_child;
- 	TocEntry   *te;
- 	ParallelState *pstate;
- 	int			i;
- 
- 	ahlog(AH, 2, "entering restore_toc_entries_parallel\n");
  
! 	slots = (ParallelSlot *) pg_calloc(n_slots, sizeof(ParallelSlot));
! 	pstate = (ParallelState *) pg_malloc(sizeof(ParallelState));
! 	pstate->pse = (ParallelStateEntry *) pg_calloc(n_slots, sizeof(ParallelStateEntry));
! 	pstate->numWorkers = ropt->number_of_jobs;
! 	for (i = 0; i < pstate->numWorkers; i++)
! 		unsetProcessIdentifier(&(pstate->pse[i]));
  
  	/* Adjust dependency information */
  	fix_dependencies(AH);
--- 3351,3364 ----
  	on_exit_nicely(archive_close_connection, &shutdown_info);
  }
  
  static void
! restore_toc_entries_prefork(ArchiveHandle *AH)
  {
  	RestoreOptions *ropt = AH->ropt;
  	bool		skipped_some;
  	TocEntry   *next_work_item;
  
! 	ahlog(AH, 2, "entering restore_toc_entries_prefork\n");
  
  	/* Adjust dependency information */
  	fix_dependencies(AH);
*************** restore_toc_entries_parallel(ArchiveHand
*** 3428,3439 ****
  	 */
  	DisconnectDatabase(&AH->public);
  
- 	/*
- 	 * Set the pstate in the shutdown_info. The exit handler uses pstate if set
- 	 * and falls back to AHX otherwise.
- 	 */
- 	shutdown_info.pstate = pstate;
- 
  	/* blow away any transient state from the old connection */
  	if (AH->currUser)
  		free(AH->currUser);
--- 3414,3419 ----
*************** restore_toc_entries_parallel(ArchiveHand
*** 3445,3461 ****
  		free(AH->currTablespace);
  	AH->currTablespace = NULL;
  	AH->currWithOids = -1;
  
  	/*
! 	 * Initialize the lists of pending and ready items.  After this setup, the
! 	 * pending list is everything that needs to be done but is blocked by one
! 	 * or more dependencies, while the ready list contains items that have no
! 	 * remaining dependencies.	Note: we don't yet filter out entries that
! 	 * aren't going to be restored.  They might participate in dependency
! 	 * chains connecting entries that should be restored, so we treat them as
! 	 * live until we actually process them.
  	 */
- 	par_list_header_init(&pending_list);
  	par_list_header_init(&ready_list);
  	skipped_some = false;
  	for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
--- 3425,3483 ----
  		free(AH->currTablespace);
  	AH->currTablespace = NULL;
  	AH->currWithOids = -1;
+ }
+ 
+ /*
+  * Main engine for parallel restore.
+  *
+  * Work is done in three phases.
+  * First we process all SECTION_PRE_DATA tocEntries, in a single connection,
+  * just as for a standard restore. This is done in restore_toc_entries_prefork().
+  * Second we process the remaining non-ACL steps in parallel worker children
+  * (threads on Windows, processes on Unix), each of which connects separately
+  * to the database.
+  * Finally we process all the ACL entries in a single connection (that happens
+  * back in RestoreArchive).
+  */
+ static void
+ restore_toc_entries_parallel(ArchiveHandle *AH, TocEntry *pending_list)
+ {
+ 	ParallelState *pstate;
+ 	ParallelSlot *slots;
+ 	int			n_slots = AH->public.numWorkers;
+ 	TocEntry   *next_work_item;
+ 	int			next_slot;
+ 	TocEntry	ready_list;
+ 	int			ret_child;
+ 	bool		skipped_some;
+ 	int			work_status;
+ 	int			i;
+ 
+ 	ahlog(AH, 2, "entering restore_toc_entries_parallel\n");
+ 
+ 	slots = (ParallelSlot *) pg_calloc(n_slots, sizeof(ParallelSlot));
+ 	pstate = (ParallelState *) pg_malloc(sizeof(ParallelState));
+ 	pstate->pse = (ParallelStateEntry *) pg_calloc(n_slots, sizeof(ParallelStateEntry));
+ 	pstate->numWorkers = AH->public.numWorkers;
+ 	for (i = 0; i < pstate->numWorkers; i++)
+ 		unsetProcessIdentifier(&(pstate->pse[i]));
  
  	/*
! 	 * Set the pstate in the shutdown_info. The exit handler uses pstate if set
! 	 * and falls back to AHX otherwise.
! 	 */
! 	shutdown_info.pstate = pstate;
! 
! 	/*
! 	 * Initialize the lists of ready items, the list for pending items has
! 	 * already been initialized in the caller.  After this setup, the pending
! 	 * list is everything that needs to be done but is blocked by one or more
! 	 * dependencies, while the ready list contains items that have no remaining
! 	 * dependencies.	Note: we don't yet filter out entries that aren't going
! 	 * to be restored.  They might participate in dependency chains connecting
! 	 * entries that should be restored, so we treat them as live until we
! 	 * actually process them.
  	 */
  	par_list_header_init(&ready_list);
  	skipped_some = false;
  	for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
*************** restore_toc_entries_parallel(ArchiveHand
*** 3480,3486 ****
  		}
  
  		if (next_work_item->depCount > 0)
! 			par_list_append(&pending_list, next_work_item);
  		else
  			par_list_append(&ready_list, next_work_item);
  	}
--- 3502,3508 ----
  		}
  
  		if (next_work_item->depCount > 0)
! 			par_list_append(pending_list, next_work_item);
  		else
  			par_list_append(&ready_list, next_work_item);
  	}
*************** restore_toc_entries_parallel(ArchiveHand
*** 3566,3571 ****
--- 3588,3602 ----
  	}
  
  	ahlog(AH, 1, "finished main parallel loop\n");
+ }
+ 
+ static void
+ restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list)
+ {
+ 	RestoreOptions *ropt = AH->ropt;
+ 	TocEntry   *te;
+ 
+ 	ahlog(AH, 2, "entering restore_toc_entries_postfork\n");
  
  	/*
  	 * Remove the pstate again, so the exit handler will now fall back to
*************** restore_toc_entries_parallel(ArchiveHand
*** 3587,3593 ****
  	 * dependencies, or some other pathological condition. If so, do it in the
  	 * single parent connection.
  	 */
! 	for (te = pending_list.par_next; te != &pending_list; te = te->par_next)
  	{
  		ahlog(AH, 1, "processing missed item %d %s %s\n",
  			  te->dumpId, te->desc, te->tag);
--- 3618,3624 ----
  	 * dependencies, or some other pathological condition. If so, do it in the
  	 * single parent connection.
  	 */
! 	for (te = pending_list->par_next; te != pending_list; te = te->par_next)
  	{
  		ahlog(AH, 1, "processing missed item %d %s %s\n",
  			  te->dumpId, te->desc, te->tag);
*************** inhibit_data_for_failed_table(ArchiveHan
*** 4302,4311 ****
   *
   * Enough of the structure is cloned to ensure that there is no
   * conflict between different threads each with their own clone.
-  *
-  * These could be public, but no need at present.
   */
! static ArchiveHandle *
  CloneArchive(ArchiveHandle *AH)
  {
  	ArchiveHandle *clone;
--- 4333,4340 ----
   *
   * Enough of the structure is cloned to ensure that there is no
   * conflict between different threads each with their own clone.
   */
! ArchiveHandle *
  CloneArchive(ArchiveHandle *AH)
  {
  	ArchiveHandle *clone;
*************** CloneArchive(ArchiveHandle *AH)
*** 4342,4348 ****
   *
   * Note: we assume any clone-local connection was already closed.
   */
! static void
  DeCloneArchive(ArchiveHandle *AH)
  {
  	/* Clear format-specific state */
--- 4371,4377 ----
   *
   * Note: we assume any clone-local connection was already closed.
   */
! void
  DeCloneArchive(ArchiveHandle *AH)
  {
  	/* Clear format-specific state */
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index c84ec61..005a8fe 100644
*** a/src/bin/pg_dump/pg_backup_archiver.h
--- b/src/bin/pg_dump/pg_backup_archiver.h
*************** extern void ReadHead(ArchiveHandle *AH);
*** 334,341 ****
--- 334,344 ----
  extern void WriteToc(ArchiveHandle *AH);
  extern void ReadToc(ArchiveHandle *AH);
  extern void WriteDataChunks(ArchiveHandle *AH);
+ extern ArchiveHandle *CloneArchive(ArchiveHandle *AH);
+ extern void DeCloneArchive(ArchiveHandle *AH);
  
  extern teReqs TocIDRequired(ArchiveHandle *AH, DumpId id, RestoreOptions *ropt);
+ TocEntry *getTocEntryByDumpId(ArchiveHandle *AH, DumpId id);
  extern bool checkSeek(FILE *fp);
  
  #define appendStringLiteralAHX(buf,str,AH) \
*************** int			ahprintf(ArchiveHandle *AH, const
*** 376,379 ****
--- 379,394 ----
  
  void		ahlog(ArchiveHandle *AH, int level, const char *fmt,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
  
+ #ifdef USE_ASSERT_CHECKING
+ #define Assert(condition) \
+ 	if (!(condition)) \
+ 	{ \
+ 		write_msg(NULL, "Failed assertion in %s, line %d\n", \
+ 				  __FILE__, __LINE__); \
+ 		abort();\
+ 	}
+ #else
+ #define Assert(condition)
+ #endif
+ 
  #endif
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index b315e68..985a5b0 100644
*** a/src/bin/pg_dump/pg_backup_db.c
--- b/src/bin/pg_dump/pg_backup_db.c
*************** ConnectDatabase(Archive *AHX,
*** 308,319 ****
  	PQsetNoticeProcessor(AH->connection, notice_processor, NULL);
  }
  
  void
  DisconnectDatabase(Archive *AHX)
  {
  	ArchiveHandle *AH = (ArchiveHandle *) AHX;
  
! 	PQfinish(AH->connection);		/* noop if AH->connection is NULL */
  	AH->connection = NULL;
  }
  
--- 308,337 ----
  	PQsetNoticeProcessor(AH->connection, notice_processor, NULL);
  }
  
+ /*
+  * Close the connection to the database and also cancel off the query if we
+  * have one running.
+  */
  void
  DisconnectDatabase(Archive *AHX)
  {
  	ArchiveHandle *AH = (ArchiveHandle *) AHX;
+ 	PGcancel   *cancel;
+ 	char		errbuf[1];
  
! 	if (!AH->connection)
! 		return;
! 
! 	if (PQtransactionStatus(AH->connection) == PQTRANS_ACTIVE)
! 	{
! 		if ((cancel = PQgetCancel(AH->connection)))
! 		{
! 			PQcancel(cancel, errbuf, sizeof(errbuf));
! 			PQfreeCancel(cancel);
! 		}
! 	}
! 
! 	PQfinish(AH->connection);
  	AH->connection = NULL;
  }
  
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 8d43cd2..aa56262 100644
*** a/src/bin/pg_dump/pg_backup_directory.c
--- b/src/bin/pg_dump/pg_backup_directory.c
*************** static void _EndBlob(ArchiveHandle *AH,
*** 82,89 ****
  static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
  static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt);
  
! static char *prependDirectory(ArchiveHandle *AH, const char *relativeFilename);
! 
  static void createDirectory(const char *dir);
  
  
--- 82,88 ----
  static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
  static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt);
  
! static char *prependDirectory(ArchiveHandle *AH, char *buf, const char *relativeFilename);
  static void createDirectory(const char *dir);
  
  
*************** InitArchiveFmt_Directory(ArchiveHandle *
*** 153,162 ****
  	}
  	else
  	{							/* Read Mode */
! 		char	   *fname;
  		cfp		   *tocFH;
  
! 		fname = prependDirectory(AH, "toc.dat");
  
  		tocFH = cfopen_read(fname, PG_BINARY_R);
  		if (tocFH == NULL)
--- 152,161 ----
  	}
  	else
  	{							/* Read Mode */
! 		char	   fname[MAXPGPATH];
  		cfp		   *tocFH;
  
! 		prependDirectory(AH, fname, "toc.dat");
  
  		tocFH = cfopen_read(fname, PG_BINARY_R);
  		if (tocFH == NULL)
*************** _StartData(ArchiveHandle *AH, TocEntry *
*** 282,290 ****
  {
  	lclTocEntry *tctx = (lclTocEntry *) te->formatData;
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	char	   *fname;
  
! 	fname = prependDirectory(AH, tctx->filename);
  
  	ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression);
  	if (ctx->dataFH == NULL)
--- 281,289 ----
  {
  	lclTocEntry *tctx = (lclTocEntry *) te->formatData;
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	char		fname[MAXPGPATH];
  
! 	prependDirectory(AH, fname, tctx->filename);
  
  	ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression);
  	if (ctx->dataFH == NULL)
*************** _PrintTocData(ArchiveHandle *AH, TocEntr
*** 376,383 ****
  		_LoadBlobs(AH, ropt);
  	else
  	{
! 		char	   *fname = prependDirectory(AH, tctx->filename);
  
  		_PrintFileData(AH, fname, ropt);
  	}
  }
--- 375,383 ----
  		_LoadBlobs(AH, ropt);
  	else
  	{
! 		char		fname[MAXPGPATH];
  
+ 		prependDirectory(AH, fname, tctx->filename);
  		_PrintFileData(AH, fname, ropt);
  	}
  }
*************** _LoadBlobs(ArchiveHandle *AH, RestoreOpt
*** 387,398 ****
  {
  	Oid			oid;
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	char	   *fname;
  	char		line[MAXPGPATH];
  
  	StartRestoreBlobs(AH);
  
! 	fname = prependDirectory(AH, "blobs.toc");
  
  	ctx->blobsTocFH = cfopen_read(fname, PG_BINARY_R);
  
--- 387,398 ----
  {
  	Oid			oid;
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	char		fname[MAXPGPATH];
  	char		line[MAXPGPATH];
  
  	StartRestoreBlobs(AH);
  
! 	prependDirectory(AH, fname, "blobs.toc");
  
  	ctx->blobsTocFH = cfopen_read(fname, PG_BINARY_R);
  
*************** _CloseArchive(ArchiveHandle *AH)
*** 519,525 ****
  	if (AH->mode == archModeWrite)
  	{
  		cfp		   *tocFH;
! 		char	   *fname = prependDirectory(AH, "toc.dat");
  
  		/* The TOC is always created uncompressed */
  		tocFH = cfopen_write(fname, PG_BINARY_W, 0);
--- 519,527 ----
  	if (AH->mode == archModeWrite)
  	{
  		cfp		   *tocFH;
! 		char		fname[MAXPGPATH];
! 
! 		prependDirectory(AH, fname, "toc.dat");
  
  		/* The TOC is always created uncompressed */
  		tocFH = cfopen_write(fname, PG_BINARY_W, 0);
*************** static void
*** 561,569 ****
  _StartBlobs(ArchiveHandle *AH, TocEntry *te)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	char	   *fname;
  
! 	fname = prependDirectory(AH, "blobs.toc");
  
  	/* The blob TOC file is never compressed */
  	ctx->blobsTocFH = cfopen_write(fname, "ab", 0);
--- 563,571 ----
  _StartBlobs(ArchiveHandle *AH, TocEntry *te)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	char		fname[MAXPGPATH];
  
! 	prependDirectory(AH, fname, "blobs.toc");
  
  	/* The blob TOC file is never compressed */
  	ctx->blobsTocFH = cfopen_write(fname, "ab", 0);
*************** _EndBlobs(ArchiveHandle *AH, TocEntry *t
*** 629,667 ****
  }
  
  static void
! createDirectory(const char *dir)
  {
  	struct stat st;
  
! 	/* the directory must not exist yet. */
! 	if (stat(dir, &st) == 0)
  	{
  		if (S_ISDIR(st.st_mode))
! 			exit_horribly(modulename,
! 						  "cannot create directory %s, it exists already\n",
! 						  dir);
  		else
  			exit_horribly(modulename,
  						  "cannot create directory %s, a file with this name "
! 						  "exists already\n", dir);
  	}
- 
- 	/*
- 	 * Now we create the directory. Note that for some race condition we could
- 	 * also run into the situation that the directory has been created just
- 	 * between our two calls.
- 	 */
- 	if (mkdir(dir, 0700) < 0)
- 		exit_horribly(modulename, "could not create directory %s: %s",
- 					  dir, strerror(errno));
  }
  
! 
  static char *
! prependDirectory(ArchiveHandle *AH, const char *relativeFilename)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
- 	static char buf[MAXPGPATH];
  	char	   *dname;
  
  	dname = ctx->directory;
--- 631,707 ----
  }
  
  static void
! createDirectory(const char *directory)
  {
  	struct stat st;
  
! 	/* the directory must either not exist or must be empty. */
! 	if (stat(directory, &st) == 0)
  	{
  		if (S_ISDIR(st.st_mode))
! 		{
! 			DIR	   *dir;
! 			struct dirent *entry;
! 			bool	empty = true;
! 
! 			dir = opendir(directory);
! 			if (!dir)
! 				exit_horribly(modulename,
! 							  "cannot create directory %s, it exists already\n",
! 							  directory);
! 
! 			while ((entry = readdir(dir))) {
! 				if (strcmp(entry->d_name, ".") == 0)
! 					continue;
! 				if (strcmp(entry->d_name, "..") == 0)
! 					continue;
! 				empty = false;
! 				break;
! 			}
! 			closedir(dir);
! 
! 			if (!empty)
! 				exit_horribly(modulename,
! 							  "cannot create directory %s, it exists already "
! 							  "and is not empty\n",
! 							  directory);
! 
! 			/*
! 			 * Down here we know that the directory exists and is empty. This
! 			 * doesn't mean that we can create files in it, but we will soon
! 			 * find out. We could test-create a file and delete it again
! 			 * already now but we need to be prepared for failing later
! 			 * anyway...
! 			 */
! 		}
  		else
  			exit_horribly(modulename,
  						  "cannot create directory %s, a file with this name "
! 						  "exists already\n", directory);
! 	}
! 	else
! 	{
! 		/*
! 		 * Now we create the directory. Note that for some race condition we could
! 		 * also run into the situation that the directory has been created just
! 		 * between our two calls.
! 		 */
! 		if (mkdir(directory, 0700) < 0)
! 			exit_horribly(modulename, "could not create directory %s: %s",
! 						  directory, strerror(errno));
  	}
  }
  
! /*
!  * Gets a relative file name and prepends the output directory, writing the
!  * result to buf. The caller needs to make sure that buf is MAXPGPATH bytes
!  * big. Can't use a static char[MAXPGPATH] inside the function because we run
!  * multithreaded on Windows.
!  */
  static char *
! prependDirectory(ArchiveHandle *AH, char* buf, const char *relativeFilename)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
  	char	   *dname;
  
  	dname = ctx->directory;
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index f9fbaee..7f91eb9 100644
*** a/src/bin/pg_dump/pg_dump.c
--- b/src/bin/pg_dump/pg_dump.c
*************** static Oid	findLastBuiltinOid_V70(Archiv
*** 235,242 ****
  static void selectSourceSchema(Archive *fout, const char *schemaName);
  static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
  static char *myFormatType(const char *typname, int32 typmod);
- static const char *fmtQualifiedId(Archive *fout,
- 								  const char *schema, const char *id);
  static void getBlobs(Archive *fout);
  static void dumpBlob(Archive *fout, BlobInfo *binfo);
  static int	dumpBlobs(Archive *fout, void *arg);
--- 235,240 ----
*************** static void binary_upgrade_extension_mem
*** 254,260 ****
  								DumpableObject *dobj,
  								const char *objlabel);
  static const char *getAttrName(int attrnum, TableInfo *tblInfo);
! static const char *fmtCopyColumnList(const TableInfo *ti);
  static PGresult *ExecuteSqlQueryForSingleRow(Archive *fout, char *query);
  
  int
--- 252,258 ----
  								DumpableObject *dobj,
  								const char *objlabel);
  static const char *getAttrName(int attrnum, TableInfo *tblInfo);
! static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
  static PGresult *ExecuteSqlQueryForSingleRow(Archive *fout, char *query);
  
  int
*************** dumpTableData_copy(Archive *fout, void *
*** 1220,1225 ****
--- 1218,1228 ----
  	const bool	hasoids = tbinfo->hasoids;
  	const bool	oids = tdinfo->oids;
  	PQExpBuffer q = createPQExpBuffer();
+ 	/*
+ 	 * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId which
+ 	 * uses it already.
+ 	 */
+ 	PQExpBuffer clistBuf = createPQExpBuffer();
  	PGconn	   *conn = GetConnection(fout);
  	PGresult   *res;
  	int			ret;
*************** dumpTableData_copy(Archive *fout, void *
*** 1244,1257 ****
  	 * cases involving ADD COLUMN and inheritance.)
  	 */
  	if (fout->remoteVersion >= 70300)
! 		column_list = fmtCopyColumnList(tbinfo);
  	else
  		column_list = "";		/* can't select columns in COPY */
  
  	if (oids && hasoids)
  	{
  		appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
! 						  fmtQualifiedId(fout,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname),
  						  column_list);
--- 1247,1260 ----
  	 * cases involving ADD COLUMN and inheritance.)
  	 */
  	if (fout->remoteVersion >= 70300)
! 		column_list = fmtCopyColumnList(tbinfo, clistBuf);
  	else
  		column_list = "";		/* can't select columns in COPY */
  
  	if (oids && hasoids)
  	{
  		appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
! 						  fmtQualifiedId(fout->remoteVersion,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname),
  						  column_list);
*************** dumpTableData_copy(Archive *fout, void *
*** 1269,1275 ****
  		else
  			appendPQExpBufferStr(q, "* ");
  		appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
! 						  fmtQualifiedId(fout,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname),
  						  tdinfo->filtercond);
--- 1272,1278 ----
  		else
  			appendPQExpBufferStr(q, "* ");
  		appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
! 						  fmtQualifiedId(fout->remoteVersion,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname),
  						  tdinfo->filtercond);
*************** dumpTableData_copy(Archive *fout, void *
*** 1277,1289 ****
  	else
  	{
  		appendPQExpBuffer(q, "COPY %s %s TO stdout;",
! 						  fmtQualifiedId(fout,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname),
  						  column_list);
  	}
  	res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
  	PQclear(res);
  
  	for (;;)
  	{
--- 1280,1293 ----
  	else
  	{
  		appendPQExpBuffer(q, "COPY %s %s TO stdout;",
! 						  fmtQualifiedId(fout->remoteVersion,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname),
  						  column_list);
  	}
  	res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
  	PQclear(res);
+ 	destroyPQExpBuffer(clistBuf);
  
  	for (;;)
  	{
*************** dumpTableData_insert(Archive *fout, void
*** 1402,1408 ****
  	{
  		appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
  						  "SELECT * FROM ONLY %s",
! 						  fmtQualifiedId(fout,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname));
  	}
--- 1406,1412 ----
  	{
  		appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
  						  "SELECT * FROM ONLY %s",
! 						  fmtQualifiedId(fout->remoteVersion,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname));
  	}
*************** dumpTableData_insert(Archive *fout, void
*** 1410,1416 ****
  	{
  		appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
  						  "SELECT * FROM %s",
! 						  fmtQualifiedId(fout,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname));
  	}
--- 1414,1420 ----
  	{
  		appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
  						  "SELECT * FROM %s",
! 						  fmtQualifiedId(fout->remoteVersion,
  										 tbinfo->dobj.namespace->dobj.name,
  										 classname));
  	}
*************** dumpTableData(Archive *fout, TableDataIn
*** 1542,1547 ****
--- 1546,1552 ----
  {
  	TableInfo  *tbinfo = tdinfo->tdtable;
  	PQExpBuffer copyBuf = createPQExpBuffer();
+ 	PQExpBuffer clistBuf = createPQExpBuffer();
  	DataDumperPtr dumpFn;
  	char	   *copyStmt;
  
*************** dumpTableData(Archive *fout, TableDataIn
*** 1553,1559 ****
  		appendPQExpBuffer(copyBuf, "COPY %s ",
  						  fmtId(tbinfo->dobj.name));
  		appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
! 						  fmtCopyColumnList(tbinfo),
  					  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
  		copyStmt = copyBuf->data;
  	}
--- 1558,1564 ----
  		appendPQExpBuffer(copyBuf, "COPY %s ",
  						  fmtId(tbinfo->dobj.name));
  		appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
! 						  fmtCopyColumnList(tbinfo, clistBuf),
  					  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
  		copyStmt = copyBuf->data;
  	}
*************** dumpTableData(Archive *fout, TableDataIn
*** 1573,1578 ****
--- 1578,1584 ----
  				 dumpFn, tdinfo);
  
  	destroyPQExpBuffer(copyBuf);
+ 	destroyPQExpBuffer(clistBuf);
  }
  
  /*
*************** getTables(Archive *fout, int *numTables)
*** 3842,3847 ****
--- 3848,3854 ----
  	int			i_reloptions;
  	int			i_toastreloptions;
  	int			i_reloftype;
+ 	int			i_relpages;
  
  	/* Make sure we are in proper schema */
  	selectSourceSchema(fout, "pg_catalog");
*************** getTables(Archive *fout, int *numTables)
*** 3881,3886 ****
--- 3888,3894 ----
  						  "c.relfrozenxid, tc.oid AS toid, "
  						  "tc.relfrozenxid AS tfrozenxid, "
  						  "c.relpersistence, "
+ 						  "c.relpages, "
  						  "CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
  						  "d.refobjid AS owning_tab, "
  						  "d.refobjsubid AS owning_col, "
*************** getTables(Archive *fout, int *numTables)
*** 3917,3922 ****
--- 3925,3931 ----
  						  "c.relfrozenxid, tc.oid AS toid, "
  						  "tc.relfrozenxid AS tfrozenxid, "
  						  "'p' AS relpersistence, "
+ 						  "c.relpages, "
  						  "CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
  						  "d.refobjid AS owning_tab, "
  						  "d.refobjsubid AS owning_col, "
*************** getTables(Archive *fout, int *numTables)
*** 3952,3957 ****
--- 3961,3967 ----
  						  "c.relfrozenxid, tc.oid AS toid, "
  						  "tc.relfrozenxid AS tfrozenxid, "
  						  "'p' AS relpersistence, "
+ 						  "c.relpages, "
  						  "NULL AS reloftype, "
  						  "d.refobjid AS owning_tab, "
  						  "d.refobjsubid AS owning_col, "
*************** getTables(Archive *fout, int *numTables)
*** 3987,3992 ****
--- 3997,4003 ----
  						  "c.relfrozenxid, tc.oid AS toid, "
  						  "tc.relfrozenxid AS tfrozenxid, "
  						  "'p' AS relpersistence, "
+ 						  "c.relpages, "
  						  "NULL AS reloftype, "
  						  "d.refobjid AS owning_tab, "
  						  "d.refobjsubid AS owning_col, "
*************** getTables(Archive *fout, int *numTables)
*** 4023,4028 ****
--- 4034,4040 ----
  						  "0 AS toid, "
  						  "0 AS tfrozenxid, "
  						  "'p' AS relpersistence, "
+ 						  "relpages, "
  						  "NULL AS reloftype, "
  						  "d.refobjid AS owning_tab, "
  						  "d.refobjsubid AS owning_col, "
*************** getTables(Archive *fout, int *numTables)
*** 4058,4063 ****
--- 4070,4076 ----
  						  "0 AS toid, "
  						  "0 AS tfrozenxid, "
  						  "'p' AS relpersistence, "
+ 						  "relpages, "
  						  "NULL AS reloftype, "
  						  "d.refobjid AS owning_tab, "
  						  "d.refobjsubid AS owning_col, "
*************** getTables(Archive *fout, int *numTables)
*** 4089,4094 ****
--- 4102,4108 ----
  						  "0 AS toid, "
  						  "0 AS tfrozenxid, "
  						  "'p' AS relpersistence, "
+ 						  "relpages, "
  						  "NULL AS reloftype, "
  						  "NULL::oid AS owning_tab, "
  						  "NULL::int4 AS owning_col, "
*************** getTables(Archive *fout, int *numTables)
*** 4115,4120 ****
--- 4129,4135 ----
  						  "0 AS toid, "
  						  "0 AS tfrozenxid, "
  						  "'p' AS relpersistence, "
+ 						  "relpages, "
  						  "NULL AS reloftype, "
  						  "NULL::oid AS owning_tab, "
  						  "NULL::int4 AS owning_col, "
*************** getTables(Archive *fout, int *numTables)
*** 4151,4156 ****
--- 4166,4172 ----
  						  "0 AS toid, "
  						  "0 AS tfrozenxid, "
  						  "'p' AS relpersistence, "
+ 						  "0 AS relpages, "
  						  "NULL AS reloftype, "
  						  "NULL::oid AS owning_tab, "
  						  "NULL::int4 AS owning_col, "
*************** getTables(Archive *fout, int *numTables)
*** 4204,4209 ****
--- 4220,4226 ----
  	i_reloptions = PQfnumber(res, "reloptions");
  	i_toastreloptions = PQfnumber(res, "toast_reloptions");
  	i_reloftype = PQfnumber(res, "reloftype");
+ 	i_relpages = PQfnumber(res, "relpages");
  
  	if (lockWaitTimeout && fout->remoteVersion >= 70300)
  	{
*************** getTables(Archive *fout, int *numTables)
*** 4260,4265 ****
--- 4277,4283 ----
  		tblinfo[i].reltablespace = pg_strdup(PQgetvalue(res, i, i_reltablespace));
  		tblinfo[i].reloptions = pg_strdup(PQgetvalue(res, i, i_reloptions));
  		tblinfo[i].toast_reloptions = pg_strdup(PQgetvalue(res, i, i_toastreloptions));
+ 		tblinfo[i].relpages = atoi(PQgetvalue(res, i, i_relpages));
  
  		/* other fields were zeroed above */
  
*************** getTables(Archive *fout, int *numTables)
*** 4288,4294 ****
  			resetPQExpBuffer(query);
  			appendPQExpBuffer(query,
  							  "LOCK TABLE %s IN ACCESS SHARE MODE",
! 						 fmtQualifiedId(fout,
  										tblinfo[i].dobj.namespace->dobj.name,
  										tblinfo[i].dobj.name));
  			ExecuteSqlStatement(fout, query->data);
--- 4306,4312 ----
  			resetPQExpBuffer(query);
  			appendPQExpBuffer(query,
  							  "LOCK TABLE %s IN ACCESS SHARE MODE",
! 						 fmtQualifiedId(fout->remoteVersion,
  										tblinfo[i].dobj.namespace->dobj.name,
  										tblinfo[i].dobj.name));
  			ExecuteSqlStatement(fout, query->data);
*************** getIndexes(Archive *fout, TableInfo tbli
*** 4427,4433 ****
  				i_conoid,
  				i_condef,
  				i_tablespace,
! 				i_options;
  	int			ntups;
  
  	for (i = 0; i < numTables; i++)
--- 4445,4452 ----
  				i_conoid,
  				i_condef,
  				i_tablespace,
! 				i_options,
! 				i_relpages;
  	int			ntups;
  
  	for (i = 0; i < numTables; i++)
*************** getIndexes(Archive *fout, TableInfo tbli
*** 4469,4474 ****
--- 4488,4494 ----
  					 "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
  							  "t.relnatts AS indnkeys, "
  							  "i.indkey, i.indisclustered, "
+ 							  "t.relpages, "
  							  "c.contype, c.conname, "
  							  "c.condeferrable, c.condeferred, "
  							  "c.tableoid AS contableoid, "
*************** getIndexes(Archive *fout, TableInfo tbli
*** 4494,4499 ****
--- 4514,4520 ----
  					 "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
  							  "t.relnatts AS indnkeys, "
  							  "i.indkey, i.indisclustered, "
+ 							  "t.relpages, "
  							  "c.contype, c.conname, "
  							  "c.condeferrable, c.condeferred, "
  							  "c.tableoid AS contableoid, "
*************** getIndexes(Archive *fout, TableInfo tbli
*** 4522,4527 ****
--- 4543,4549 ----
  					 "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
  							  "t.relnatts AS indnkeys, "
  							  "i.indkey, i.indisclustered, "
+ 							  "t.relpages, "
  							  "c.contype, c.conname, "
  							  "c.condeferrable, c.condeferred, "
  							  "c.tableoid AS contableoid, "
*************** getIndexes(Archive *fout, TableInfo tbli
*** 4550,4555 ****
--- 4572,4578 ----
  					 "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
  							  "t.relnatts AS indnkeys, "
  							  "i.indkey, i.indisclustered, "
+ 							  "t.relpages, "
  							  "c.contype, c.conname, "
  							  "c.condeferrable, c.condeferred, "
  							  "c.tableoid AS contableoid, "
*************** getIndexes(Archive *fout, TableInfo tbli
*** 4578,4583 ****
--- 4601,4607 ----
  							  "pg_get_indexdef(i.indexrelid) AS indexdef, "
  							  "t.relnatts AS indnkeys, "
  							  "i.indkey, false AS indisclustered, "
+ 							  "t.relpages, "
  							  "CASE WHEN i.indisprimary THEN 'p'::char "
  							  "ELSE '0'::char END AS contype, "
  							  "t.relname AS conname, "
*************** getIndexes(Archive *fout, TableInfo tbli
*** 4604,4609 ****
--- 4628,4634 ----
  							  "pg_get_indexdef(i.indexrelid) AS indexdef, "
  							  "t.relnatts AS indnkeys, "
  							  "i.indkey, false AS indisclustered, "
+ 							  "t.relpages, "
  							  "CASE WHEN i.indisprimary THEN 'p'::char "
  							  "ELSE '0'::char END AS contype, "
  							  "t.relname AS conname, "
*************** getIndexes(Archive *fout, TableInfo tbli
*** 4632,4637 ****
--- 4657,4663 ----
  		i_indnkeys = PQfnumber(res, "indnkeys");
  		i_indkey = PQfnumber(res, "indkey");
  		i_indisclustered = PQfnumber(res, "indisclustered");
+ 		i_relpages = PQfnumber(res, "relpages");
  		i_contype = PQfnumber(res, "contype");
  		i_conname = PQfnumber(res, "conname");
  		i_condeferrable = PQfnumber(res, "condeferrable");
*************** getIndexes(Archive *fout, TableInfo tbli
*** 4674,4679 ****
--- 4700,4706 ----
  			parseOidArray(PQgetvalue(res, j, i_indkey),
  						  indxinfo[j].indkeys, INDEX_MAX_KEYS);
  			indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't');
+ 			indxinfo[j].relpages = atoi(PQgetvalue(res, j, i_relpages));
  			contype = *(PQgetvalue(res, j, i_contype));
  
  			if (contype == 'p' || contype == 'u' || contype == 'x')
*************** getDependencies(Archive *fout)
*** 14075,14096 ****
   *
   * Whenever the selected schema is not pg_catalog, be careful to qualify
   * references to system catalogs and types in our emitted commands!
   */
  static void
  selectSourceSchema(Archive *fout, const char *schemaName)
  {
- 	static char *curSchemaName = NULL;
  	PQExpBuffer query;
  
  	/* Not relevant if fetching from pre-7.3 DB */
  	if (fout->remoteVersion < 70300)
  		return;
- 	/* Ignore null schema names */
- 	if (schemaName == NULL || *schemaName == '\0')
- 		return;
- 	/* Optimize away repeated selection of same schema */
- 	if (curSchemaName && strcmp(curSchemaName, schemaName) == 0)
- 		return;
  
  	query = createPQExpBuffer();
  	appendPQExpBuffer(query, "SET search_path = %s",
--- 14102,14122 ----
   *
   * Whenever the selected schema is not pg_catalog, be careful to qualify
   * references to system catalogs and types in our emitted commands!
+  *
+  * This function is called only from selectSourceSchemaOnAH and
+  * selectSourceSchema.
   */
  static void
  selectSourceSchema(Archive *fout, const char *schemaName)
  {
  	PQExpBuffer query;
  
+ 	/* This is checked by the callers already */
+ 	Assert(schemaName != NULL && *schemaName != '\0');
+ 
  	/* Not relevant if fetching from pre-7.3 DB */
  	if (fout->remoteVersion < 70300)
  		return;
  
  	query = createPQExpBuffer();
  	appendPQExpBuffer(query, "SET search_path = %s",
*************** selectSourceSchema(Archive *fout, const
*** 14101,14109 ****
  	ExecuteSqlStatement(fout, query->data);
  
  	destroyPQExpBuffer(query);
- 	if (curSchemaName)
- 		free(curSchemaName);
- 	curSchemaName = pg_strdup(schemaName);
  }
  
  /*
--- 14127,14132 ----
*************** myFormatType(const char *typname, int32
*** 14241,14311 ****
  }
  
  /*
-  * fmtQualifiedId - convert a qualified name to the proper format for
-  * the source database.
-  *
-  * Like fmtId, use the result before calling again.
-  */
- static const char *
- fmtQualifiedId(Archive *fout, const char *schema, const char *id)
- {
- 	static PQExpBuffer id_return = NULL;
- 
- 	if (id_return)				/* first time through? */
- 		resetPQExpBuffer(id_return);
- 	else
- 		id_return = createPQExpBuffer();
- 
- 	/* Suppress schema name if fetching from pre-7.3 DB */
- 	if (fout->remoteVersion >= 70300 && schema && *schema)
- 	{
- 		appendPQExpBuffer(id_return, "%s.",
- 						  fmtId(schema));
- 	}
- 	appendPQExpBuffer(id_return, "%s",
- 					  fmtId(id));
- 
- 	return id_return->data;
- }
- 
- /*
   * Return a column list clause for the given relation.
   *
   * Special case: if there are no undropped columns in the relation, return
   * "", not an invalid "()" column list.
   */
  static const char *
! fmtCopyColumnList(const TableInfo *ti)
  {
- 	static PQExpBuffer q = NULL;
  	int			numatts = ti->numatts;
  	char	  **attnames = ti->attnames;
  	bool	   *attisdropped = ti->attisdropped;
  	bool		needComma;
  	int			i;
  
! 	if (q)						/* first time through? */
! 		resetPQExpBuffer(q);
! 	else
! 		q = createPQExpBuffer();
! 
! 	appendPQExpBuffer(q, "(");
  	needComma = false;
  	for (i = 0; i < numatts; i++)
  	{
  		if (attisdropped[i])
  			continue;
  		if (needComma)
! 			appendPQExpBuffer(q, ", ");
! 		appendPQExpBuffer(q, "%s", fmtId(attnames[i]));
  		needComma = true;
  	}
  
  	if (!needComma)
  		return "";				/* no undropped columns */
  
! 	appendPQExpBuffer(q, ")");
! 	return q->data;
  }
  
  /*
--- 14264,14300 ----
  }
  
  /*
   * Return a column list clause for the given relation.
   *
   * Special case: if there are no undropped columns in the relation, return
   * "", not an invalid "()" column list.
   */
  static const char *
! fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer)
  {
  	int			numatts = ti->numatts;
  	char	  **attnames = ti->attnames;
  	bool	   *attisdropped = ti->attisdropped;
  	bool		needComma;
  	int			i;
  
! 	appendPQExpBuffer(buffer, "(");
  	needComma = false;
  	for (i = 0; i < numatts; i++)
  	{
  		if (attisdropped[i])
  			continue;
  		if (needComma)
! 			appendPQExpBuffer(buffer, ", ");
! 		appendPQExpBuffer(buffer, "%s", fmtId(attnames[i]));
  		needComma = true;
  	}
  
  	if (!needComma)
  		return "";				/* no undropped columns */
  
! 	appendPQExpBuffer(buffer, ")");
! 	return buffer->data;
  }
  
  /*
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index fba6953..0efc15f 100644
*** a/src/bin/pg_dump/pg_dump.h
--- b/src/bin/pg_dump/pg_dump.h
*************** typedef struct _tableInfo
*** 256,261 ****
--- 256,262 ----
  	/* these two are set only if table is a sequence owned by a column: */
  	Oid			owning_tab;		/* OID of table owning sequence */
  	int			owning_col;		/* attr # of column owning sequence */
+ 	int			relpages;
  
  	bool		interesting;	/* true if need to collect more data */
  
*************** typedef struct _indxInfo
*** 319,324 ****
--- 320,326 ----
  	bool		indisclustered;
  	/* if there is an associated constraint object, its dumpId: */
  	DumpId		indexconstraint;
+ 	int			relpages;		/* relpages of the underlying table */
  } IndxInfo;
  
  typedef struct _ruleInfo
*************** extern void parseOidArray(const char *st
*** 523,528 ****
--- 525,531 ----
  extern void sortDumpableObjects(DumpableObject **objs, int numObjs);
  extern void sortDumpableObjectsByTypeName(DumpableObject **objs, int numObjs);
  extern void sortDumpableObjectsByTypeOid(DumpableObject **objs, int numObjs);
+ extern void sortDataAndIndexObjectsBySize(DumpableObject **objs, int numObjs);
  
  /*
   * version specific routines
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index a6533be..fe4d965 100644
*** a/src/bin/pg_dump/pg_dump_sort.c
--- b/src/bin/pg_dump/pg_dump_sort.c
*************** static void repairDependencyLoop(Dumpabl
*** 121,126 ****
--- 121,213 ----
  static void describeDumpableObject(DumpableObject *obj,
  					   char *buf, int bufsize);
  
+ static int DOSizeCompare(const void *p1, const void *p2);
+ 
+ static int
+ findFirstEqualType(DumpableObjectType type, DumpableObject **objs, int numObjs)
+ {
+ 	int i;
+ 	for (i = 0; i < numObjs; i++)
+ 		if (objs[i]->objType == type)
+ 			return i;
+ 	return -1;
+ }
+ 
+ static int
+ findFirstDifferentType(DumpableObjectType type, DumpableObject **objs, int numObjs, int start)
+ {
+ 	int i;
+ 	for (i = start; i < numObjs; i++)
+ 		if (objs[i]->objType != type)
+ 			return i;
+ 	return numObjs - 1;
+ }
+ 
+ /*
+  * When we do a parallel dump, we want to start with the largest items first.
+  *
+  * Say we have the objects in this order:
+  * ....DDDDD....III....
+  *
+  * with D = Table data, I = Index, . = other object
+  *
+  * This sorting function now takes each of the D or I blocks and sorts them
+  * according to their size.
+  */
+ void
+ sortDataAndIndexObjectsBySize(DumpableObject **objs, int numObjs)
+ {
+ 	int		startIdx, endIdx;
+ 	void   *startPtr;
+ 
+ 	if (numObjs <= 1)
+ 		return;
+ 
+ 	startIdx = findFirstEqualType(DO_TABLE_DATA, objs, numObjs);
+ 	if (startIdx >= 0)
+ 	{
+ 		endIdx = findFirstDifferentType(DO_TABLE_DATA, objs, numObjs, startIdx);
+ 		startPtr = objs + startIdx;
+ 		qsort(startPtr, endIdx - startIdx, sizeof(DumpableObject *),
+ 			  DOSizeCompare);
+ 	}
+ 
+ 	startIdx = findFirstEqualType(DO_INDEX, objs, numObjs);
+ 	if (startIdx >= 0)
+ 	{
+ 		endIdx = findFirstDifferentType(DO_INDEX, objs, numObjs, startIdx);
+ 		startPtr = objs + startIdx;
+ 		qsort(startPtr, endIdx - startIdx, sizeof(DumpableObject *),
+ 			  DOSizeCompare);
+ 	}
+ }
+ 
+ static int
+ DOSizeCompare(const void *p1, const void *p2)
+ {
+ 	DumpableObject *obj1 = *(DumpableObject **) p1;
+ 	DumpableObject *obj2 = *(DumpableObject **) p2;
+ 	int			obj1_size = 0;
+ 	int			obj2_size = 0;
+ 
+ 	if (obj1->objType == DO_TABLE_DATA)
+ 		obj1_size = ((TableDataInfo *) obj1)->tdtable->relpages;
+ 	if (obj1->objType == DO_INDEX)
+ 		obj1_size = ((IndxInfo *) obj1)->relpages;
+ 
+ 	if (obj2->objType == DO_TABLE_DATA)
+ 		obj2_size = ((TableDataInfo *) obj2)->tdtable->relpages;
+ 	if (obj2->objType == DO_INDEX)
+ 		obj2_size = ((IndxInfo *) obj2)->relpages;
+ 
+ 	/* we want to see the biggest item go first */
+ 	if (obj1_size > obj2_size)
+ 		return -1;
+ 	if (obj2_size > obj1_size)
+ 		return 1;
+ 
+ 	return 0;
+ }
  
  /*
   * Sort the given objects into a type/name-based ordering
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index bd2feff..11c83f7 100644
*** a/src/bin/pg_dump/pg_restore.c
--- b/src/bin/pg_dump/pg_restore.c
*************** main(int argc, char **argv)
*** 72,77 ****
--- 72,78 ----
  	RestoreOptions *opts;
  	int			c;
  	int			exit_code;
+ 	int			numWorkers = 1;
  	Archive    *AH;
  	char	   *inputFileSpec;
  	static int	disable_triggers = 0;
*************** main(int argc, char **argv)
*** 183,189 ****
  				break;
  
  			case 'j':			/* number of restore jobs */
! 				opts->number_of_jobs = atoi(optarg);
  				break;
  
  			case 'l':			/* Dump the TOC summary */
--- 184,190 ----
  				break;
  
  			case 'j':			/* number of restore jobs */
! 				numWorkers = atoi(optarg);
  				break;
  
  			case 'l':			/* Dump the TOC summary */
*************** main(int argc, char **argv)
*** 338,344 ****
  	}
  
  	/* Can't do single-txn mode with multiple connections */
! 	if (opts->single_txn && opts->number_of_jobs > 1)
  	{
  		fprintf(stderr, _("%s: cannot specify both --single-transaction and multiple jobs\n"),
  				progname);
--- 339,345 ----
  	}
  
  	/* Can't do single-txn mode with multiple connections */
! 	if (opts->single_txn && numWorkers > 1)
  	{
  		fprintf(stderr, _("%s: cannot specify both --single-transaction and multiple jobs\n"),
  				progname);
*************** main(int argc, char **argv)
*** 405,410 ****
--- 406,413 ----
  		InitDummyWantedList(AH, opts);
  	}
  
+ 	AH->numWorkers = numWorkers;
+ 
  	if (opts->tocSummary)
  		PrintTOCSummary(AH, opts);
  	else
