diff --git a/configure.in b/configure.in
index 4bfa459..c3180cf 100644
*** a/configure.in
--- b/configure.in
*************** PGAC_ARG_BOOL(with, zlib, yes,
*** 755,760 ****
--- 755,766 ----
  AC_SUBST(with_zlib)
  
  #
+ # libLZF
+ #
+ PGAC_ARG_BOOL(with, lzf, no, [use lzf compression library])
+ AC_SUBST(with_lzf)
+ 
+ #
  # Elf
  #
  
*************** failure.  It is possible the compiler is
*** 897,902 ****
--- 903,916 ----
  Use --without-zlib to disable zlib support.])])
  fi
  
+ if test "$with_lzf" = yes; then
+   AC_CHECK_LIB(lzf, lzf_compress, [],
+                [AC_MSG_ERROR([lzf library not found
+ If you have lzf already installed, see config.log for details on the
+ failure.  It is possible the compiler isn't looking in the proper directory.
+ Use --without-lzf to disable lzf support.])])
+ fi
+ 
  if test "$enable_spinlocks" = yes; then
    AC_DEFINE(HAVE_SPINLOCKS, 1, [Define to 1 if you have spinlocks.])
  else
diff --git a/src/bin/pg_dump/.gitignore b/src/bin/pg_dump/.gitignore
index c2c8677..c28ddea 100644
*** a/src/bin/pg_dump/.gitignore
--- b/src/bin/pg_dump/.gitignore
***************
*** 1,4 ****
--- 1,5 ----
  /kwlookup.c
+ /md5.c
  
  /pg_dump
  /pg_dumpall
diff --git a/src/bin/pg_dump/Makefile b/src/bin/pg_dump/Makefile
index 0367466..d012de8 100644
*** a/src/bin/pg_dump/Makefile
--- b/src/bin/pg_dump/Makefile
*************** override CPPFLAGS := -I$(libpq_srcdir) $
*** 20,32 ****
  
  OBJS=	pg_backup_archiver.o pg_backup_db.o pg_backup_custom.o \
  	pg_backup_files.o pg_backup_null.o pg_backup_tar.o \
! 	dumputils.o $(WIN32RES)
  
  KEYWRDOBJS = keywords.o kwlookup.o
  
  kwlookup.c: % : $(top_srcdir)/src/backend/parser/%
  	rm -f $@ && $(LN_S) $< .
  
  all: pg_dump pg_restore pg_dumpall
  
  pg_dump: pg_dump.o common.o pg_dump_sort.o $(OBJS) $(KEYWRDOBJS) | submake-libpq submake-libpgport
--- 20,35 ----
  
  OBJS=	pg_backup_archiver.o pg_backup_db.o pg_backup_custom.o \
  	pg_backup_files.o pg_backup_null.o pg_backup_tar.o \
! 	dumputils.o pg_backup_directory.o compress_io.o md5.o $(WIN32RES)
  
  KEYWRDOBJS = keywords.o kwlookup.o
  
  kwlookup.c: % : $(top_srcdir)/src/backend/parser/%
  	rm -f $@ && $(LN_S) $< .
  
+ md5.c: % : $(top_srcdir)/src/backend/libpq/%
+ 	rm -f $@ && $(LN_S) $< .
+ 
  all: pg_dump pg_restore pg_dumpall
  
  pg_dump: pg_dump.o common.o pg_dump_sort.o $(OBJS) $(KEYWRDOBJS) | submake-libpq submake-libpgport
*************** uninstall:
*** 50,53 ****
  	rm -f $(addprefix '$(DESTDIR)$(bindir)'/, pg_dump$(X) pg_restore$(X) pg_dumpall$(X))
  
  clean distclean maintainer-clean:
! 	rm -f pg_dump$(X) pg_restore$(X) pg_dumpall$(X) $(OBJS) pg_dump.o common.o pg_dump_sort.o pg_restore.o pg_dumpall.o kwlookup.c $(KEYWRDOBJS)
--- 53,56 ----
  	rm -f $(addprefix '$(DESTDIR)$(bindir)'/, pg_dump$(X) pg_restore$(X) pg_dumpall$(X))
  
  clean distclean maintainer-clean:
! 	rm -f pg_dump$(X) pg_restore$(X) pg_dumpall$(X) $(OBJS) pg_dump.o common.o pg_dump_sort.o pg_restore.o pg_dumpall.o md5.c kwlookup.c $(KEYWRDOBJS)
diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c
index ...c1f19a5 .
*** a/src/bin/pg_dump/compress_io.c
--- b/src/bin/pg_dump/compress_io.c
***************
*** 0 ****
--- 1,630 ----
+ /*-------------------------------------------------------------------------
+  *
+  * compress_io.c
+  *   Routines for archivers to write an uncompressed or compressed data
+  *   stream.
+  *
+  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1994, Regents of the University of California
+  *
+  *	pg_dump will read the system catalogs in a database and dump out a
+  *	script that reproduces the schema in terms of SQL that is understood
+  *	by PostgreSQL
+  *
+  * IDENTIFICATION
+  *     XXX
+  *
+  *-------------------------------------------------------------------------
+  */
+ 
+ #include "compress_io.h"
+ 
+ static const char *modulename = gettext_noop("compress_io");
+ 
+ static void _DoInflate(ArchiveHandle *AH, CompressorState *cs, ReadFunc readF);
+ static void _DoDeflate(ArchiveHandle *AH, CompressorState *cs, int flush, WriteFunc writeF);
+ 
+ #ifdef HAVE_LIBZ
+ static void _DoInflateZlib(ArchiveHandle *AH, CompressorState *cs, ReadFunc readF);
+ static void _DoDeflateZlib(ArchiveHandle *AH, CompressorState *cs, int flush, WriteFunc writeF);
+ #endif
+ 
+ #ifdef HAVE_LIBLZF
+ static void _DoInflateLZF(ArchiveHandle *AH, CompressorState *cs, ReadFunc readF);
+ static void _DoDeflateLZF(ArchiveHandle *AH, CompressorState *cs, int flush, WriteFunc writeF);
+ static void DoDeflateBufferLZF(ArchiveHandle *AH, char *in, bool isBase, size_t dLen, char *outBase, WriteFunc writeF);
+ #endif
+ 
+ /*
+  * If a compression library is in use, then startit up. This is called from
+  * StartData & StartBlob. The buffers are setup in the Init routine.
+  */
+ void
+ InitCompressorState(ArchiveHandle *AH, CompressorState *cs, CompressorAction action)
+ {
+ 	if (AH->compression == 0 && cs->comprAlg != COMPR_ALG_NONE)
+ 		AH->compression = -1;
+ 
+ 	Assert(AH->compression == 0 ?
+ 			 (cs->comprAlg == COMPR_ALG_NONE) :
+ 			 (cs->comprAlg != COMPR_ALG_NONE));
+ 
+ 	if (cs->comprAlg == COMPR_ALG_LIBZ)
+ 	{
+ #ifdef HAVE_LIBZ
+ 		z_streamp			zp = cs->zp;
+ 
+ 		if (AH->compression < 0 || AH->compression > 9)
+ 			AH->compression = Z_DEFAULT_COMPRESSION;
+ 
+ 		zp->zalloc = Z_NULL;
+ 		zp->zfree = Z_NULL;
+ 		zp->opaque = Z_NULL;
+ 
+ 		if (action == COMPRESSOR_DEFLATE)
+ 			if (deflateInit(zp, AH->compression) != Z_OK)
+ 				die_horribly(AH, modulename, "could not initialize compression library: %s\n", zp->msg);
+ 		if (action == COMPRESSOR_INFLATE)
+ 			if (inflateInit(zp) != Z_OK)
+ 				die_horribly(AH, modulename, "could not initialize compression library: %s\n", zp->msg);
+ 
+ 		/* Just be paranoid - maybe End is called after Start, with no Write */
+ 		zp->next_out = (void *) cs->comprOut;
+ 		zp->avail_out = comprOutInitSize;
+ #endif
+ 	}
+ 
+ 	/* Nothing to be done for COMPR_ALG_LIBLZF */
+ 
+ 	/* Nothing to be done for COMPR_ALG_NONE */
+ }
+ 
+ /*
+  * Terminate compression library context and flush its buffers. If no compression
+  * library is in use then just return.
+  */
+ void
+ FlushCompressorState(ArchiveHandle *AH, CompressorState *cs, WriteFunc writeF)
+ {
+ 	Assert(AH->compression == 0 ?
+ 			 (cs->comprAlg == COMPR_ALG_NONE) :
+ 			 (cs->comprAlg != COMPR_ALG_NONE));
+ 
+ #ifdef HAVE_LIBZ
+ 	if (cs->comprAlg == COMPR_ALG_LIBZ)
+ 	{
+ 		z_streamp			zp = cs->zp;
+ 
+ 		zp->next_in = NULL;
+ 		zp->avail_in = 0;
+ 
+ 		_DoDeflate(AH, cs, Z_FINISH, writeF);
+ 
+ 		if (deflateEnd(zp) != Z_OK)
+ 			die_horribly(AH, modulename, "could not close compression stream: %s\n", zp->msg);
+ 	}
+ #endif
+ #ifdef HAVE_LIBLZF
+ 	if (cs->comprAlg == COMPR_ALG_LIBLZF)
+ 	{
+ 		lzf_streamp	lzfp = cs->lzfp;
+ 
+ 		lzfp->next_in = NULL;
+ 		lzfp->avail_in = 0;
+ 
+ 		_DoDeflate(AH, cs, 1, writeF);
+ 	}
+ #endif
+ 	/* Nothing to be done for COMPR_ALG_NONE */
+ }
+ 
+ void
+ _DoDeflate(ArchiveHandle *AH, CompressorState *cs, int flush, WriteFunc writeF)
+ {
+ 	switch(cs->comprAlg)
+ 	{
+ 		case COMPR_ALG_LIBZ:
+ #ifdef HAVE_LIBZ
+ 			_DoDeflateZlib(AH, cs, flush, writeF);
+ #endif
+ 			break;
+ 		case COMPR_ALG_LIBLZF:
+ #ifdef HAVE_LIBLZF
+ 			_DoDeflateLZF(AH, cs, flush, writeF);
+ #endif
+ 			break;
+ 		case COMPR_ALG_NONE:
+ 			Assert(false);
+ 			break;
+ 	}
+ }
+ 
+ 
+ #ifdef HAVE_LIBLZF
+ void
+ DoDeflateBufferLZF(ArchiveHandle *AH, char *in, bool isBase, size_t dLen, char *outBase, WriteFunc writeF)
+ {
+ 	size_t		avail;
+ 	char	   *header;
+ 	size_t		len;
+ 	const char *start = in;
+ 
+ 	if (isBase)
+ 		start += LZF_HDR_SIZE;
+ 
+ 	avail = lzf_compress(start, dLen, outBase + LZF_HDR_SIZE, dLen - 1);
+ 
+ 	if (avail == 0)
+ 	{
+ 		/* output buffer was not large enough. As the output buffer is
+ 		 * always one byte less than the input buffer, we do save more
+ 		 * if we just store the data uncompressed. */
+ 		if (!isBase)
+ 		{
+ 			memcpy(outBase + LZF_HDR_SIZE, in, dLen);
+ 			header = outBase;
+ 		}
+ 		else
+ 			header = in;
+ 		header[0] = 'Z';
+ 		header[1] = 'U';	/* not compressed */
+ 		header[2] = dLen >> 8;
+ 		header[3] = dLen & 0xff;
+ 		header[4] = 0;
+ 		header[5] = 0;
+ 		len = dLen + LZF_HDR_SIZE;
+ 	}
+ 	else
+ 	{
+ 		header = outBase;
+ 		header[0] = 'Z';
+ 		header[1] = 'C';	/* compressed */
+ 		header[2] = dLen >> 8;
+ 		header[3] = dLen & 0xff;
+ 		header[4] = avail >> 8;
+ 		header[5] = avail & 0xff;
+ 		len = avail + LZF_HDR_SIZE;
+ 	}
+ 	writeF(AH, header, len);
+ }
+ #endif
+ 
+ 
+ /*
+  * Send compressed data to the output stream (via writeF).
+  */
+ #ifdef HAVE_LIBLZF
+ void
+ _DoDeflateLZF(ArchiveHandle *AH, CompressorState *cs, int flush, WriteFunc writeF)
+ {
+ 	lzf_streamp	lzfp = cs->lzfp;
+ 	size_t		freeBytes;
+ 	size_t		copyBytes;
+ 	size_t		remainBytes;
+ 
+ 	freeBytes = LZF_BLOCKSIZE - lzfp->comprInFill;
+ 	copyBytes = (freeBytes >= lzfp->avail_in) ? lzfp->avail_in : freeBytes;
+ 	memcpy(cs->comprIn + lzfp->comprInFill, lzfp->next_in, copyBytes);
+ 
+ 	lzfp->comprInFill += copyBytes;
+ 
+ 	if (lzfp->comprInFill < LZF_BLOCKSIZE && !flush)
+ 		return;
+ 
+ 	DoDeflateBufferLZF(AH, lzfp->comprInBase, true, lzfp->comprInFill,
+ 					   lzfp->comprOutBase, writeF);
+ 
+ 	for(;;)
+ 	{
+ 		remainBytes = lzfp->avail_in - copyBytes;
+ 		if (remainBytes < LZF_BLOCKSIZE)
+ 			break;
+ 		DoDeflateBufferLZF(AH, lzfp->next_in + copyBytes, false, LZF_BLOCKSIZE,
+ 						   lzfp->comprOutBase, writeF);
+ 		copyBytes += LZF_BLOCKSIZE;
+ 	}
+ 	/* copy remaining bytes and overwrite "in" buffer */
+ 	memcpy(cs->comprIn, lzfp->next_in + copyBytes, remainBytes);
+ 	lzfp->comprInFill = remainBytes;
+ }
+ #endif
+ 
+ #ifdef HAVE_LIBZ
+ /*
+  * Send compressed data to the output stream (via writeF).
+  */
+ void
+ _DoDeflateZlib(ArchiveHandle *AH, CompressorState *cs, int flush, WriteFunc writeF)
+ {
+ 	z_streamp	zp = cs->zp;
+ 	char	   *out = cs->comprOut;
+ 	int			res = Z_OK;
+ 
+ 	Assert(AH->compression != 0);
+ 
+ 	while (cs->zp->avail_in != 0 || flush)
+ 	{
+ 		res = deflate(zp, flush);
+ 		if (res == Z_STREAM_ERROR)
+ 			die_horribly(AH, modulename, "could not compress data: %s\n", zp->msg);
+ 		if (((flush == Z_FINISH) && (zp->avail_out < comprOutInitSize))
+ 			|| (zp->avail_out == 0)
+ 			|| (zp->avail_in != 0)
+ 			)
+ 		{
+ 			/*
+ 			 * Extra paranoia: avoid zero-length chunks, since a zero length
+ 			 * chunk is the EOF marker in the custom format. This should never
+ 			 * happen but...
+ 			 */
+ 			if (zp->avail_out < comprOutInitSize)
+ 			{
+ 				/*
+ 				 * Any write function shoud do its own error checking but
+ 				 * to make sure we do a check here as well...
+ 				 */
+ 				size_t len = comprOutInitSize - zp->avail_out;
+ 				if (writeF(AH, out, len) != len)
+ 					die_horribly(AH, modulename, "could not write to output file: %s\n", strerror(errno));
+ 			}
+ 			zp->next_out = (void *) out;
+ 			zp->avail_out = comprOutInitSize;
+ 		}
+ 
+ 		if (res == Z_STREAM_END)
+ 			break;
+ 	}
+ }
+ #endif
+ 
+ static void
+ _DoInflate(ArchiveHandle *AH, CompressorState *cs, ReadFunc readF)
+ {
+ 	switch(cs->comprAlg)
+ 	{
+ 		case COMPR_ALG_LIBZ:
+ #ifdef HAVE_LIBZ
+ 			_DoInflateZlib(AH, cs, readF);
+ #endif
+ 			break;
+ 		case COMPR_ALG_LIBLZF:
+ #ifdef HAVE_LIBLZF
+ 			_DoInflateLZF(AH, cs, readF);
+ #endif
+ 			break;
+ 		case COMPR_ALG_NONE:
+ 			Assert(false);
+ 			break;
+ 	}
+ }
+ 
+ #ifdef HAVE_LIBLZF
+ static void
+ _DoInflateLZF(ArchiveHandle *AH, CompressorState *cs, ReadFunc readF)
+ {
+ 	void	   *in;
+ 	lzf_streamp	lzfp = cs->lzfp;
+ 	size_t		cnt;
+ 	char	   *header;
+ 	char	   *data;
+ 	size_t		dLen;
+ 	size_t		uncompressedSize, compressedSize, needSize;
+ 	bool		isCompressed;
+ 
+ 	/* first we need at least LZF_HDR_SIZE */
+ 	while ((cnt = readF(AH, &in, LZF_HDR_SIZE)))
+ 	{
+ 		/* then we check the header and read the compressed size until we have
+ 		 * LZF_HDR_SIZE + compressed_size. */
+ 		if (cnt < LZF_HDR_SIZE)
+ 			die_horribly(AH, modulename, "corrupted archive");
+ 
+ 		header = (char *) in;
+ 
+ 		if (header[0] != 'Z' || (header[1] != 'C' && header[1] != 'U'))
+ 			die_horribly(AH, modulename, "corrupted archive");
+ 
+ 		uncompressedSize = (unsigned char) header[2] << 8 | (unsigned char) header[3];
+ 		compressedSize = (unsigned char) header[4] << 8 | (unsigned char) header[5];
+ 		isCompressed = header[1] == 'C';
+ 		needSize = isCompressed ? compressedSize : uncompressedSize;
+ 
+ 		/*
+ 		 * If we read more data in the beginning, it must match exactly the
+ 		 * required size (because then the archive was written in blocks and
+ 		 * the size of each block got recorded).
+ 		 */
+ 		if (cnt > LZF_HDR_SIZE)
+ 		{
+ 			if (cnt != LZF_HDR_SIZE + needSize)
+ 				die_horribly(AH, modulename, "corrupted archive");
+ 
+ 			lzfp->avail_in = cnt - LZF_HDR_SIZE;
+ 			lzfp->next_in = (char *) in + LZF_HDR_SIZE;
+ 		}
+ 		else
+ 		{
+ 			cnt = readF(AH, &in, needSize);
+ 			if (cnt != needSize)
+ 				die_horribly(AH, modulename, "corrupted archive");
+ 
+ 			lzfp->avail_in = cnt;
+ 			lzfp->next_in = (char *) in;
+ 		}
+ 
+ 		if (isCompressed)
+ 		{
+ 			dLen = lzf_decompress(lzfp->next_in,
+ 								  lzfp->avail_in,
+ 								  cs->comprOut, cs->comprOutSize);
+ 
+ 			if (uncompressedSize != dLen)
+ 				die_horribly(AH, modulename, "corrupted archive");
+ 
+ 			data = cs->comprOut;
+ 		}
+ 		else
+ 		{
+ 			/* uncompressed data */
+ 			data = lzfp->next_in;
+ 			dLen = lzfp->avail_in;
+ 		}
+ 		data[dLen] = '\0';
+ 		ahwrite(data, 1, dLen, AH);
+ 	}
+ }
+ #endif
+ 
+ #ifdef HAVE_LIBZ
+ /*
+  * This function is void as it either returns successfully or fails via
+  * die_horribly().
+  */
+ static void
+ _DoInflateZlib(ArchiveHandle *AH, CompressorState *cs, ReadFunc readF)
+ {
+ 	z_streamp	zp = cs->zp;
+ 	char	   *out = cs->comprOut;
+ 	int			res = Z_OK;
+ 	size_t		cnt;
+ 	void	   *in;
+ 
+ 	Assert(AH->compression != 0);
+ 
+ 	/* no minimal chunk size for zlib */
+ 	while ((cnt = readF(AH, &in, 0)))
+ 	{
+ 		zp->next_in = (void *) in;
+ 		zp->avail_in = cnt;
+ 
+ 		while (zp->avail_in > 0)
+ 		{
+ 			zp->next_out = (void *) out;
+ 			zp->avail_out = comprOutInitSize;
+ 
+ 			res = inflate(zp, 0);
+ 			if (res != Z_OK && res != Z_STREAM_END)
+ 				die_horribly(AH, modulename, "could not uncompress data: %s\n", zp->msg);
+ 
+ 			out[comprOutInitSize - zp->avail_out] = '\0';
+ 			ahwrite(out, 1, comprOutInitSize - zp->avail_out, AH);
+ 		}
+ 	}
+ 
+ 	zp->next_in = NULL;
+ 	zp->avail_in = 0;
+ 	while (res != Z_STREAM_END)
+ 	{
+ 		zp->next_out = (void *) out;
+ 		zp->avail_out = comprOutInitSize;
+ 		res = inflate(zp, 0);
+ 		if (res != Z_OK && res != Z_STREAM_END)
+ 			die_horribly(AH, modulename, "could not uncompress data: %s\n", zp->msg);
+ 
+ 		out[comprOutInitSize - zp->avail_out] = '\0';
+ 		ahwrite(out, 1, comprOutInitSize - zp->avail_out, AH);
+ 	}
+ 
+ 	if (inflateEnd(zp) != Z_OK)
+ 		die_horribly(AH, modulename, "could not close compression library: %s\n", zp->msg);
+ }
+ #endif
+ 
+ void
+ ReadDataFromArchive(ArchiveHandle *AH, CompressorState *cs, ReadFunc readF)
+ {
+ 	Assert(AH->compression == 0 ?
+ 			 (cs->comprAlg == COMPR_ALG_NONE) :
+ 			 (cs->comprAlg != COMPR_ALG_NONE));
+ 
+ 	switch(cs->comprAlg)
+ 	{
+ 		case COMPR_ALG_LIBZ:
+ 		case COMPR_ALG_LIBLZF:
+ 			_DoInflate(AH, cs, readF);
+ 			break;
+ 		case COMPR_ALG_NONE:
+ 		{
+ 			size_t	cnt;
+ 			void   *in;
+ 
+ 			/* no minimal chunk size for uncompressed data */
+ 			while ((cnt = readF(AH, &in, 0)))
+ 			{
+ 				ahwrite(in, 1, cnt, AH);
+ 			}
+ 		}
+ 	}
+ }
+ 
+ size_t
+ WriteDataToArchive(ArchiveHandle *AH, CompressorState *cs, WriteFunc writeF,
+ 				   const void *data, size_t dLen)
+ {
+ 	Assert(AH->compression == 0 ?
+ 			 (cs->comprAlg == COMPR_ALG_NONE) :
+ 			 (cs->comprAlg != COMPR_ALG_NONE));
+ 
+ 	switch(cs->comprAlg)
+ 	{
+ 		case COMPR_ALG_LIBZ:
+ #ifdef HAVE_LIBZ
+ 			cs->zp->next_in = (void *) data;
+ 			cs->zp->avail_in = dLen;
+ 			_DoDeflate(AH, cs, Z_NO_FLUSH, writeF);
+ #endif
+ 			break;
+ 		case COMPR_ALG_LIBLZF:
+ #ifdef HAVE_LIBLZF
+ 			cs->lzfp->next_in = (char *) data;
+ 			cs->lzfp->avail_in = dLen;
+ 			_DoDeflate(AH, cs, 0, writeF);
+ #endif
+ 			break;
+ 		case COMPR_ALG_NONE:
+ 			/*
+ 			 * Any write function shoud do its own error checking but to make sure
+ 			 * we do a check here as well...
+ 			 */
+ 			if (writeF(AH, data, dLen) != dLen)
+ 				die_horribly(AH, modulename, "could not write to output file: %s\n", strerror(errno));
+ 	}
+ 	/* we have either succeeded in writing dLen bytes or we have called die_horribly() */
+ 	return dLen;
+ }
+ 
+ CompressorState *
+ AllocateCompressorState(ArchiveHandle *AH)
+ {
+ 	CompressorAlgorithm	alg = COMPR_ALG_NONE;
+ 	CompressorState	   *cs;
+ 
+ 	/*
+ 	 * AH->compression is set either on the commandline when creating an archive
+ 	 * or by ReadHead() when restoring an archive.
+ 	 */
+ 
+ 	switch (AH->compression)
+ 	{
+ 		case Z_DEFAULT_COMPRESSION:
+ 			alg = COMPR_ALG_LIBZ;
+ 			break;
+ 		case 0:
+ 			alg = COMPR_ALG_NONE;
+ 			break;
+ 		case 1:
+ 		case 2:
+ 		case 3:
+ 		case 4:
+ 		case 5:
+ 		case 6:
+ 		case 7:
+ 		case 8:
+ 		case 9:
+ 			alg = COMPR_ALG_LIBZ;
+ 			break;
+ 		case COMPR_LZF_CODE:
+ 			alg = COMPR_ALG_LIBLZF;
+ 			break;
+ 		default:
+ 			die_horribly(AH, modulename, "Invalid compression code: %d\n",
+ 						 AH->compression);
+ 	}
+ 
+ #ifndef HAVE_LIBZ
+ 	if (alg == COMPR_ALG_LIBZ)
+ 		die_horribly(AH, modulename, "not built with zlib support\n");
+ #endif
+ #ifndef HAVE_LIBLZF
+ 	if (alg == COMPR_ALG_LIBLZF)
+ 		die_horribly(AH, modulename, "not built with liblzf support\n");
+ #endif
+ 
+ 	cs = (CompressorState *) malloc(sizeof(CompressorState));
+ 	if (cs == NULL)
+ 		die_horribly(AH, modulename, "out of memory\n");
+ 
+ 	cs->comprAlg = alg;
+ 
+ 	switch(alg)
+ 	{
+ 		case COMPR_ALG_LIBZ:
+ #ifdef HAVE_LIBZ
+ 			cs->zp = (z_streamp) malloc(sizeof(z_stream));
+ 			if (cs->zp == NULL)
+ 				die_horribly(AH, modulename, "out of memory\n");
+ 
+ 			/*
+ 			 * comprOutInitSize is the buffer size we tell zlib it can output
+ 			 * to.  We actually allocate one extra byte because some routines
+ 			 * want to append a trailing zero byte to the zlib output.  The
+ 			 * input buffer is expansible and is always of size
+ 			 * cs->comprInSize; comprInInitSize is just the initial default
+ 			 * size for it.
+ 			 */
+ 			cs->comprOut = (char *) malloc(comprOutInitSize + 1);
+ 			cs->comprIn = (char *) malloc(comprInInitSize);
+ 			cs->comprInSize = comprInInitSize;
+ 			cs->comprOutSize = comprOutInitSize;
+ 
+ 			if (cs->comprOut == NULL || cs->comprIn == NULL)
+ 				die_horribly(AH, modulename, "out of memory\n");
+ #endif
+ 			break;
+ 		case COMPR_ALG_LIBLZF:
+ #ifdef HAVE_LIBLZF
+ 			cs->lzfp = (lzf_streamp) malloc(sizeof(lzf_stream));
+ 			if (cs->lzfp == NULL)
+ 				die_horribly(AH, modulename, "out of memory\n");
+ 
+ 			cs->lzfp->comprOutBase = (char *) malloc(comprOutInitSize + LZF_HDR_SIZE);
+ 			cs->lzfp->comprInBase = (char *) malloc(comprInInitSize + LZF_HDR_SIZE);
+ 			cs->comprInSize = comprInInitSize;
+ 			cs->comprOutSize = comprOutInitSize;
+ 
+ 			if (cs->lzfp->comprOutBase == NULL || cs->lzfp->comprInBase == NULL)
+ 				die_horribly(AH, modulename, "out of memory\n");
+ 
+ 			cs->comprIn = cs->lzfp->comprInBase + LZF_HDR_SIZE;
+ 			cs->comprOut = cs->lzfp->comprOutBase + LZF_HDR_SIZE;
+ 
+ 			cs->lzfp->comprInFill = 0;
+ #endif
+ 			break;
+ 		case COMPR_ALG_NONE:
+ 			cs->comprOut = (char *) malloc(comprOutInitSize + 1);
+ 			cs->comprIn = (char *) malloc(comprInInitSize);
+ 			cs->comprInSize = comprInInitSize;
+ 			cs->comprOutSize = comprOutInitSize;
+ 
+ 			if (cs->comprOut == NULL || cs->comprIn == NULL)
+ 				die_horribly(AH, modulename, "out of memory\n");
+ 			break;
+ 	}
+ 
+ 	return cs;
+ }
+ 
+ void
+ FreeCompressorState(CompressorState *cs)
+ {
+ 	free(cs->comprOut);
+ 	free(cs->comprIn);
+ 	switch(cs->comprAlg)
+ 	{
+ 		case COMPR_ALG_NONE:
+ 			break;
+ 		case COMPR_ALG_LIBZ:
+ #ifdef HAVE_LIBZ
+ 			free(cs->zp);
+ #endif
+ 			break;
+ 		case COMPR_ALG_LIBLZF:
+ #ifdef HAVE_LIBLZF
+ 			free(cs->lzfp);
+ #endif
+ 			break;
+ 	}
+ 	free(cs);
+ }
+ 
diff --git a/src/bin/pg_dump/compress_io.h b/src/bin/pg_dump/compress_io.h
index ...416cccc .
*** a/src/bin/pg_dump/compress_io.h
--- b/src/bin/pg_dump/compress_io.h
***************
*** 0 ****
--- 1,95 ----
+ /*-------------------------------------------------------------------------
+  *
+  * compress_io.h
+  *   Routines for archivers to write an uncompressed or compressed data
+  *   stream.
+  *
+  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1994, Regents of the University of California
+  *
+  *	pg_dump will read the system catalogs in a database and dump out a
+  *	script that reproduces the schema in terms of SQL that is understood
+  *	by PostgreSQL
+  *
+  * IDENTIFICATION
+  *     XXX
+  *
+  *-------------------------------------------------------------------------
+  */
+ 
+ #include "pg_backup_archiver.h"
+ 
+ #define comprOutInitSize 4096000
+ #define comprInInitSize	4096000
+ 
+ 
+ #ifdef HAVE_LIBLZF
+ #include "lzf.h"
+ /* we cannot do more with the current header format */
+ #define	LZF_BLOCKSIZE	(1024 * 64 - 1)
+ #define LZF_HDR_SIZE	6
+ typedef struct
+ {
+ 	char   *next_in;
+ 	char   *comprInBase;
+ 	char   *comprOutBase;
+ 	size_t	comprInFill;		/* how much of comprIn are we using ? */
+ 	size_t	avail_in;
+ } lzf_stream;
+ 
+ typedef lzf_stream *lzf_streamp;
+ #endif
+ 
+ typedef enum
+ {
+ 	COMPRESSOR_INFLATE,
+ 	COMPRESSOR_DEFLATE
+ } CompressorAction;
+ 
+ typedef enum
+ {
+ 	COMPR_ALG_NONE,
+ 	COMPR_ALG_LIBZ,
+ 	COMPR_ALG_LIBLZF
+ } CompressorAlgorithm;
+ 
+ #define COMPR_LZF_CODE		100
+ 
+ typedef struct
+ {
+ 	CompressorAlgorithm comprAlg;
+ #ifdef HAVE_LIBZ
+ 	z_streamp		zp;
+ #endif
+ #ifdef HAVE_LIBLZF
+ 	lzf_streamp		lzfp;
+ #endif
+ 	char		   *comprOut;
+ 	char		   *comprIn;
+ 	size_t			comprInSize;
+ 	size_t			comprOutSize;
+ } CompressorState;
+ 
+ typedef size_t (*WriteFunc)(ArchiveHandle *AH, const void *buf, size_t len);
+ /*
+  * The sizeHint parameter tells the format which size is required for the algorithm.
+  * If the format doesn't know better it should send back that many bytes from the input.
+  * If the format was written by blocks however, then the format already knows the block
+  * size and can deliver exactly the size of the next block.
+  *
+  * The custom archive is written in such blocks.
+  * The directory archive however is just a continuous stream of data. With liblzf however
+  * we get blocks on the algorithm level and then the algorithm is able to tell the format
+  * the amount of data that it is ready to consume next.
+  */
+ typedef size_t (*ReadFunc)(ArchiveHandle *AH, void **buf, size_t sizeHint);
+ 
+ void ReadDataFromArchive(ArchiveHandle *AH, CompressorState *cs, ReadFunc readF);
+ size_t WriteDataToArchive(ArchiveHandle *AH, CompressorState *cs, WriteFunc writeF, const void *data, size_t dLen);
+ 
+ void InitCompressorState(ArchiveHandle *AH, CompressorState *cs, CompressorAction action);
+ void FlushCompressorState(ArchiveHandle *AH, CompressorState *cs, WriteFunc writeF);
+ 
+ void FreeCompressorState(CompressorState *cs);
+ CompressorState *AllocateCompressorState(ArchiveHandle *AH);
+ 
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 8fa9a57..5def7a7 100644
*** a/src/bin/pg_dump/pg_backup.h
--- b/src/bin/pg_dump/pg_backup.h
*************** typedef enum _archiveFormat
*** 48,56 ****
  {
  	archUnknown = 0,
  	archCustom = 1,
! 	archFiles = 2,
! 	archTar = 3,
! 	archNull = 4
  } ArchiveFormat;
  
  typedef enum _archiveMode
--- 48,58 ----
  {
  	archUnknown = 0,
  	archCustom = 1,
! 	archDirectory = 2,
! 	archFiles = 3,
! 	archTar = 4,
! 	archNull = 5,
!     archNullAppend = 6
  } ArchiveFormat;
  
  typedef enum _archiveMode
*************** typedef struct _restoreOptions
*** 112,117 ****
--- 114,120 ----
  	int			schemaOnly;
  	int			verbose;
  	int			aclsSkip;
+ 	int			checkArchive;
  	int			tocSummary;
  	char	   *tocFile;
  	int			format;
*************** extern Archive *CreateArchive(const char
*** 195,200 ****
--- 198,206 ----
  /* The --list option */
  extern void PrintTOCSummary(Archive *AH, RestoreOptions *ropt);
  
+ /* Check an existing archive */
+ extern bool CheckArchive(Archive *AH, RestoreOptions *ropt);
+ 
  extern RestoreOptions *NewRestoreOptions(void);
  
  /* Rearrange and filter TOC entries */
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index d1a9c54..c5b5fcc 100644
*** a/src/bin/pg_dump/pg_backup_archiver.c
--- b/src/bin/pg_dump/pg_backup_archiver.c
***************
*** 22,30 ****
--- 22,32 ----
  
  #include "pg_backup_db.h"
  #include "dumputils.h"
+ #include "compress_io.h"
  
  #include <ctype.h>
  #include <unistd.h>
+ #include <sys/stat.h>
  #include <sys/types.h>
  #include <sys/wait.h>
  
*************** static int	_discoverArchiveFormat(Archiv
*** 108,113 ****
--- 110,117 ----
  static void dump_lo_buf(ArchiveHandle *AH);
  static void _write_msg(const char *modulename, const char *fmt, va_list ap);
  static void _die_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt, va_list ap);
+ static const char *getFmtName(ArchiveFormat fmt);
+ static void outputSummaryHeaderText(Archive *AHX);
  
  static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
  static OutputContext SetOutput(ArchiveHandle *AH, char *filename, int compression);
*************** RestoreArchive(Archive *AHX, RestoreOpti
*** 230,242 ****
  	 * Make sure we won't need (de)compression we haven't got
  	 */
  #ifndef HAVE_LIBZ
! 	if (AH->compression != 0 && AH->PrintTocDataPtr !=NULL)
  	{
  		for (te = AH->toc->next; te != AH->toc; te = te->next)
  		{
  			reqs = _tocEntryRequired(te, ropt, false);
  			if (te->hadDumper && (reqs & REQ_DATA) != 0)
! 				die_horribly(AH, modulename, "cannot restore from compressed archive (compression not supported in this installation)\n");
  		}
  	}
  #endif
--- 234,258 ----
  	 * Make sure we won't need (de)compression we haven't got
  	 */
  #ifndef HAVE_LIBZ
! 	if (AH->compression > 0 && AH->compression <= 9 && AH->PrintTocDataPtr !=NULL)
  	{
  		for (te = AH->toc->next; te != AH->toc; te = te->next)
  		{
  			reqs = _tocEntryRequired(te, ropt, false);
  			if (te->hadDumper && (reqs & REQ_DATA) != 0)
! 				die_horribly(AH, modulename, "cannot restore from compressed archive (zlib compression not supported in this installation)\n");
! 		}
! 	}
! #endif
! #ifndef HAVE_LIBLZF
! 	/* XXX are these checks correct?? */
! 	if (AH->compression == COMPR_LZF_CODE && AH->PrintTocDataPtr !=NULL)
! 	{
! 		for (te = AH->toc->next; te != AH->toc; te = te->next)
! 		{
! 			reqs = _tocEntryRequired(te, ropt, false);
! 			if (te->hadDumper && (reqs & REQ_DATA) != 0)
! 				die_horribly(AH, modulename, "cannot restore from compressed archive (lzf compression not supported in this installation)\n");
  		}
  	}
  #endif
*************** PrintTOCSummary(Archive *AHX, RestoreOpt
*** 778,817 ****
  	ArchiveHandle *AH = (ArchiveHandle *) AHX;
  	TocEntry   *te;
  	OutputContext sav;
- 	char	   *fmtName;
  
  	if (ropt->filename)
  		sav = SetOutput(AH, ropt->filename, 0 /* no compression */ );
  
! 	ahprintf(AH, ";\n; Archive created at %s", ctime(&AH->createDate));
! 	ahprintf(AH, ";     dbname: %s\n;     TOC Entries: %d\n;     Compression: %d\n",
! 			 AH->archdbname, AH->tocCount, AH->compression);
! 
! 	switch (AH->format)
! 	{
! 		case archFiles:
! 			fmtName = "FILES";
! 			break;
! 		case archCustom:
! 			fmtName = "CUSTOM";
! 			break;
! 		case archTar:
! 			fmtName = "TAR";
! 			break;
! 		default:
! 			fmtName = "UNKNOWN";
! 	}
! 
! 	ahprintf(AH, ";     Dump Version: %d.%d-%d\n", AH->vmaj, AH->vmin, AH->vrev);
! 	ahprintf(AH, ";     Format: %s\n", fmtName);
! 	ahprintf(AH, ";     Integer: %d bytes\n", (int) AH->intSize);
! 	ahprintf(AH, ";     Offset: %d bytes\n", (int) AH->offSize);
! 	if (AH->archiveRemoteVersion)
! 		ahprintf(AH, ";     Dumped from database version: %s\n",
! 				 AH->archiveRemoteVersion);
! 	if (AH->archiveDumpVersion)
! 		ahprintf(AH, ";     Dumped by pg_dump version: %s\n",
! 				 AH->archiveDumpVersion);
  
  	ahprintf(AH, ";\n;\n; Selected TOC Entries:\n;\n");
  
--- 794,804 ----
  	ArchiveHandle *AH = (ArchiveHandle *) AHX;
  	TocEntry   *te;
  	OutputContext sav;
  
  	if (ropt->filename)
  		sav = SetOutput(AH, ropt->filename, 0 /* no compression */ );
  
! 	outputSummaryHeaderText(AHX);
  
  	ahprintf(AH, ";\n;\n; Selected TOC Entries:\n;\n");
  
*************** PrintTOCSummary(Archive *AHX, RestoreOpt
*** 840,845 ****
--- 827,869 ----
  		ResetOutput(AH, sav);
  }
  
+ bool
+ CheckArchive(Archive *AHX, RestoreOptions *ropt)
+ {
+ 	ArchiveHandle  *AH = (ArchiveHandle *) AHX;
+ 	TocEntry	   *te;
+ 	teReqs			reqs;
+ 	bool			checkOK;
+ 
+ 	outputSummaryHeaderText(AHX);
+ 
+ 	checkOK = (*AH->StartCheckArchivePtr)(AH);
+ 
+ 	/* this gets only called from the commandline so we write to stdout as
+  	 * usual */
+ 	printf(";\n; Performing Checks...\n;\n");
+ 
+ 	for (te = AH->toc->next; te != AH->toc; te = te->next)
+ 	{
+ 		if (!(reqs = _tocEntryRequired(te, ropt, true)))
+ 			continue;
+ 
+ 		if (!(*AH->CheckTocEntryPtr)(AH, te, reqs))
+ 			checkOK = false;
+ 
+ 		/* do not dump the contents but only the errors */
+ 	}
+ 
+ 	if (!(*AH->EndCheckArchivePtr)(AH))
+ 		checkOK = false;
+ 
+ 	printf("; Check result: %s\n", checkOK ? "OK" : "FAILED");
+ 
+ 	return checkOK;
+ }
+ 
+ 
+ 
  /***********
   * BLOB Archival
   ***********/
*************** archprintf(Archive *AH, const char *fmt,
*** 1115,1120 ****
--- 1139,1197 ----
   * Stuff below here should be 'private' to the archiver routines
   *******************************/
  
+ static const char *
+ getFmtName(ArchiveFormat fmt)
+ {
+ 	const char *fmtName;
+ 
+ 	switch (fmt)
+ 	{
+ 		case archCustom:
+ 			fmtName = "CUSTOM";
+ 			break;
+ 		case archDirectory:
+ 			fmtName = "DIRECTORY";
+ 			break;
+ 		case archFiles:
+ 			fmtName = "FILES";
+ 			break;
+ 		case archTar:
+ 			fmtName = "TAR";
+ 			break;
+ 		default:
+ 			fmtName = "UNKNOWN";
+ 	}
+ 
+ 	return fmtName;
+ }
+ 
+ static void
+ outputSummaryHeaderText(Archive *AHX)
+ {
+ 	ArchiveHandle  *AH = (ArchiveHandle *) AHX;
+ 	const char	   *fmtName;
+ 
+ 	ahprintf(AH, ";\n; Archive created at %s", ctime(&AH->createDate));
+ 	ahprintf(AH, ";     dbname: %s\n;     TOC Entries: %d\n;     Compression: %d\n",
+ 			 AH->archdbname, AH->tocCount, AH->compression);
+ 
+ 	fmtName = getFmtName(AH->format);
+ 
+ 	ahprintf(AH, ";     Dump Version: %d.%d-%d\n", AH->vmaj, AH->vmin, AH->vrev);
+ 	ahprintf(AH, ";     Format: %s\n", fmtName);
+ 	ahprintf(AH, ";     Integer: %d bytes\n", (int) AH->intSize);
+ 	ahprintf(AH, ";     Offset: %d bytes\n", (int) AH->offSize);
+ 	if (AH->archiveRemoteVersion)
+ 		ahprintf(AH, ";     Dumped from database version: %s\n",
+ 				 AH->archiveRemoteVersion);
+ 	if (AH->archiveDumpVersion)
+ 		ahprintf(AH, ";     Dumped by pg_dump version: %s\n",
+ 				 AH->archiveDumpVersion);
+ 
+ 	if (AH->PrintExtraTocSummaryPtr != NULL)
+ 		(*AH->PrintExtraTocSummaryPtr) (AH);
+ }
+ 
  static OutputContext
  SetOutput(ArchiveHandle *AH, char *filename, int compression)
  {
*************** _discoverArchiveFormat(ArchiveHandle *AH
*** 1720,1725 ****
--- 1797,1804 ----
  	char		sig[6];			/* More than enough */
  	size_t		cnt;
  	int			wantClose = 0;
+ 	char		buf[MAXPGPATH];
+ 	struct stat	st;
  
  #if 0
  	write_msg(modulename, "attempting to ascertain archive format\n");
*************** _discoverArchiveFormat(ArchiveHandle *AH
*** 1736,1742 ****
  	if (AH->fSpec)
  	{
  		wantClose = 1;
! 		fh = fopen(AH->fSpec, PG_BINARY_R);
  		if (!fh)
  			die_horribly(AH, modulename, "could not open input file \"%s\": %s\n",
  						 AH->fSpec, strerror(errno));
--- 1815,1836 ----
  	if (AH->fSpec)
  	{
  		wantClose = 1;
! 		/*
! 		 * Check if the specified archive is a directory actually. If so, we open
! 		 * the TOC file instead.
! 		 */
! 		buf[0] = '\0';
! 		if (stat(AH->fSpec, &st) == 0 && S_ISDIR(st.st_mode))
! 		{
! 			if (snprintf(buf, MAXPGPATH, "%s/%s", AH->fSpec, "TOC") >= MAXPGPATH)
! 				die_horribly(AH, modulename, "directory name too long: \"%s\"\n",
! 							 AH->fSpec);
! 		}
! 
! 		if (strlen(buf) == 0)
! 			strcpy(buf, AH->fSpec);
! 
! 		fh = fopen(buf, PG_BINARY_R);
  		if (!fh)
  			die_horribly(AH, modulename, "could not open input file \"%s\": %s\n",
  						 AH->fSpec, strerror(errno));
*************** _allocAH(const char *FileSpec, const Arc
*** 1949,1954 ****
--- 2043,2052 ----
  			InitArchiveFmt_Custom(AH);
  			break;
  
+ 		case archDirectory:
+ 			InitArchiveFmt_Directory(AH);
+ 			break;
+ 
  		case archFiles:
  			InitArchiveFmt_Files(AH);
  			break;
*************** WriteHead(ArchiveHandle *AH)
*** 2974,2984 ****
  	(*AH->WriteBytePtr) (AH, AH->format);
  
  #ifndef HAVE_LIBZ
! 	if (AH->compression != 0)
  		write_msg(modulename, "WARNING: requested compression not available in this "
  				  "installation -- archive will be uncompressed\n");
  
! 	AH->compression = 0;
  #endif
  
  	WriteInt(AH, AH->compression);
--- 3072,3084 ----
  	(*AH->WriteBytePtr) (AH, AH->format);
  
  #ifndef HAVE_LIBZ
! 	if (AH->compression > 0 && AH->compression <= 9)
! 	{
  		write_msg(modulename, "WARNING: requested compression not available in this "
  				  "installation -- archive will be uncompressed\n");
  
! 		AH->compression = 0;
! 	}
  #endif
  
  	WriteInt(AH, AH->compression);
*************** ReadHead(ArchiveHandle *AH)
*** 3062,3068 ****
  		AH->compression = Z_DEFAULT_COMPRESSION;
  
  #ifndef HAVE_LIBZ
! 	if (AH->compression != 0)
  		write_msg(modulename, "WARNING: archive is compressed, but this installation does not support compression -- no data will be available\n");
  #endif
  
--- 3162,3172 ----
  		AH->compression = Z_DEFAULT_COMPRESSION;
  
  #ifndef HAVE_LIBZ
! 	if (AH->compression > 0 && AH->compression <= 9)
! 		write_msg(modulename, "WARNING: archive is compressed, but this installation does not support compression -- no data will be available\n");
! #endif
! #ifndef HAVE_LIBLZF
! 	if (AH->compression == COMPR_LZF_CODE)
  		write_msg(modulename, "WARNING: archive is compressed, but this installation does not support compression -- no data will be available\n");
  #endif
  
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index ae0c6e0..9eb9f6f 100644
*** a/src/bin/pg_dump/pg_backup_archiver.h
--- b/src/bin/pg_dump/pg_backup_archiver.h
***************
*** 49,54 ****
--- 49,55 ----
  #define GZCLOSE(fh) fclose(fh)
  #define GZWRITE(p, s, n, fh) (fwrite(p, s, n, fh) * (s))
  #define GZREAD(p, s, n, fh) fread(p, s, n, fh)
+ /* this is just the redefinition of a libz constant */
  #define Z_DEFAULT_COMPRESSION (-1)
  
  typedef struct _z_stream
*************** typedef struct _z_stream
*** 61,66 ****
--- 62,76 ----
  typedef z_stream *z_streamp;
  #endif
  
+ /* XXX eventually this should be an enum. However if we want something
+  * pluggable in the long run it can get hard to add values to a central
+  * enum from the plugins... */
+ #define COMPRESSION_UNKNOWN (-2)
+ #define COMPRESSION_NONE 0
+ 
+ /* XXX should we change the archive version for pg_dump with directory support?
+  * XXX We are not actually modifying the existing formats, but on the other hand
+  * XXX a file could now be compressed with liblzf. */
  /* Current archive version number (the format we can output) */
  #define K_VERS_MAJOR 1
  #define K_VERS_MINOR 12
*************** struct _archiveHandle;
*** 103,108 ****
--- 113,125 ----
  struct _tocEntry;
  struct _restoreList;
  
+ typedef enum
+ {
+ 	REQ_SCHEMA = 1,
+ 	REQ_DATA = 2,
+ 	REQ_ALL = REQ_SCHEMA + REQ_DATA
+ } teReqs;
+ 
  typedef void (*ClosePtr) (struct _archiveHandle * AH);
  typedef void (*ReopenPtr) (struct _archiveHandle * AH);
  typedef void (*ArchiveEntryPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
*************** typedef void (*WriteExtraTocPtr) (struct
*** 125,134 ****
--- 142,156 ----
  typedef void (*ReadExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
  typedef void (*PrintExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
  typedef void (*PrintTocDataPtr) (struct _archiveHandle * AH, struct _tocEntry * te, RestoreOptions *ropt);
+ typedef void (*PrintExtraTocSummaryPtr) (struct _archiveHandle * AH);
  
  typedef void (*ClonePtr) (struct _archiveHandle * AH);
  typedef void (*DeClonePtr) (struct _archiveHandle * AH);
  
+ typedef bool (*StartCheckArchivePtr)(struct _archiveHandle * AH);
+ typedef bool (*CheckTocEntryPtr)(struct _archiveHandle * AH, struct _tocEntry * te, teReqs reqs);
+ typedef bool (*EndCheckArchivePtr)(struct _archiveHandle * AH);
+ 
  typedef size_t (*CustomOutPtr) (struct _archiveHandle * AH, const void *buf, size_t len);
  
  typedef struct _outputContext
*************** typedef enum
*** 167,179 ****
  	STAGE_FINALIZING
  } ArchiverStage;
  
- typedef enum
- {
- 	REQ_SCHEMA = 1,
- 	REQ_DATA = 2,
- 	REQ_ALL = REQ_SCHEMA + REQ_DATA
- } teReqs;
- 
  typedef struct _archiveHandle
  {
  	Archive		public;			/* Public part of archive */
--- 189,194 ----
*************** typedef struct _archiveHandle
*** 229,234 ****
--- 244,250 ----
  										 * archie format */
  	PrintExtraTocPtr PrintExtraTocPtr;	/* Extra TOC info for format */
  	PrintTocDataPtr PrintTocDataPtr;
+ 	PrintExtraTocSummaryPtr PrintExtraTocSummaryPtr;
  
  	StartBlobsPtr StartBlobsPtr;
  	EndBlobsPtr EndBlobsPtr;
*************** typedef struct _archiveHandle
*** 238,243 ****
--- 254,263 ----
  	ClonePtr ClonePtr;			/* Clone format-specific fields */
  	DeClonePtr DeClonePtr;		/* Clean up cloned fields */
  
+ 	StartCheckArchivePtr StartCheckArchivePtr;
+ 	CheckTocEntryPtr CheckTocEntryPtr;
+ 	EndCheckArchivePtr EndCheckArchivePtr;
+ 
  	CustomOutPtr CustomOutPtr;	/* Alternative script output routine */
  
  	/* Stuff for direct DB connection */
*************** typedef struct _archiveHandle
*** 267,272 ****
--- 287,297 ----
  
  	struct _tocEntry *currToc;	/* Used when dumping data */
  	int			compression;	/* Compression requested on open */
+ 								/* Possible values for compression:
+ 								   0	no compression
+ 								   1-9	levels for gzip compression
+ 								   100 	liblzf compression (see COMPR_LZF_CODE)
+ 								*/
  	ArchiveMode mode;			/* File mode - r or w */
  	void	   *formatData;		/* Header data specific to file format */
  
*************** extern void EndRestoreBlob(ArchiveHandle
*** 367,372 ****
--- 392,398 ----
  extern void EndRestoreBlobs(ArchiveHandle *AH);
  
  extern void InitArchiveFmt_Custom(ArchiveHandle *AH);
+ extern void InitArchiveFmt_Directory(ArchiveHandle *AH);
  extern void InitArchiveFmt_Files(ArchiveHandle *AH);
  extern void InitArchiveFmt_Null(ArchiveHandle *AH);
  extern void InitArchiveFmt_Tar(ArchiveHandle *AH);
*************** int			ahprintf(ArchiveHandle *AH, const 
*** 381,384 ****
--- 407,421 ----
  
  void		ahlog(ArchiveHandle *AH, int level, const char *fmt,...) __attribute__((format(printf, 3, 4)));
  
+ #ifdef USE_ASSERT_CHECKING
+ #define Assert(condition) \
+ 	if (!(condition)) \
+ 	{ \
+ 		write_msg(NULL, "Failed assertion in %s, line %d\n", \
+ 				  __FILE__, __LINE__); \
+ 		abort();\
+ 	}
+ #else
+ #define Assert(condition)
+ #endif
  #endif
diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c
index 2bc7e8f..ccc9acb 100644
*** a/src/bin/pg_dump/pg_backup_custom.c
--- b/src/bin/pg_dump/pg_backup_custom.c
***************
*** 25,30 ****
--- 25,31 ----
   */
  
  #include "pg_backup_archiver.h"
+ #include "compress_io.h"
  
  /*--------
   * Routines in the format interface
*************** static void _LoadBlobs(ArchiveHandle *AH
*** 58,77 ****
  static void _Clone(ArchiveHandle *AH);
  static void _DeClone(ArchiveHandle *AH);
  
! /*------------
!  * Buffers used in zlib compression and extra data stored in archive and
!  * in TOC entries.
!  *------------
!  */
! #define zlibOutSize 4096
! #define zlibInSize	4096
  
  typedef struct
  {
! 	z_streamp	zp;
! 	char	   *zlibOut;
! 	char	   *zlibIn;
! 	size_t		inSize;
  	int			hasSeek;
  	pgoff_t		filePos;
  	pgoff_t		dataStart;
--- 59,70 ----
  static void _Clone(ArchiveHandle *AH);
  static void _DeClone(ArchiveHandle *AH);
  
! static size_t _CustomWriteFunc(ArchiveHandle *AH, const void *buf, size_t len);
! static size_t _CustomReadFunction(ArchiveHandle *AH, void **buf, size_t sizeHint);
  
  typedef struct
  {
! 	CompressorState *cs;
  	int			hasSeek;
  	pgoff_t		filePos;
  	pgoff_t		dataStart;
*************** typedef struct
*** 81,86 ****
--- 74,80 ----
  {
  	int			dataState;
  	pgoff_t		dataPos;
+ 	int			restore_status;
  } lclTocEntry;
  
  
*************** static void _readBlockHeader(ArchiveHand
*** 92,98 ****
  static void _StartDataCompressor(ArchiveHandle *AH, TocEntry *te);
  static void _EndDataCompressor(ArchiveHandle *AH, TocEntry *te);
  static pgoff_t _getFilePos(ArchiveHandle *AH, lclContext *ctx);
- static int	_DoDeflate(ArchiveHandle *AH, lclContext *ctx, int flush);
  
  static const char *modulename = gettext_noop("custom archiver");
  
--- 86,91 ----
*************** InitArchiveFmt_Custom(ArchiveHandle *AH)
*** 128,133 ****
--- 121,127 ----
  	AH->ReadExtraTocPtr = _ReadExtraToc;
  	AH->WriteExtraTocPtr = _WriteExtraToc;
  	AH->PrintExtraTocPtr = _PrintExtraToc;
+ 	AH->PrintExtraTocSummaryPtr = NULL;
  
  	AH->StartBlobsPtr = _StartBlobs;
  	AH->StartBlobPtr = _StartBlob;
*************** InitArchiveFmt_Custom(ArchiveHandle *AH)
*** 136,141 ****
--- 130,139 ----
  	AH->ClonePtr = _Clone;
  	AH->DeClonePtr = _DeClone;
  
+ 	AH->StartCheckArchivePtr = NULL;
+ 	AH->CheckTocEntryPtr = NULL;
+ 	AH->EndCheckArchivePtr = NULL;
+ 
  	/*
  	 * Set up some special context used in compressing data.
  	 */
*************** InitArchiveFmt_Custom(ArchiveHandle *AH)
*** 144,179 ****
  		die_horribly(AH, modulename, "out of memory\n");
  	AH->formatData = (void *) ctx;
  
- 	ctx->zp = (z_streamp) malloc(sizeof(z_stream));
- 	if (ctx->zp == NULL)
- 		die_horribly(AH, modulename, "out of memory\n");
- 
  	/* Initialize LO buffering */
  	AH->lo_buf_size = LOBBUFSIZE;
  	AH->lo_buf = (void *) malloc(LOBBUFSIZE);
  	if (AH->lo_buf == NULL)
  		die_horribly(AH, modulename, "out of memory\n");
  
- 	/*
- 	 * zlibOutSize is the buffer size we tell zlib it can output to.  We
- 	 * actually allocate one extra byte because some routines want to append a
- 	 * trailing zero byte to the zlib output.  The input buffer is expansible
- 	 * and is always of size ctx->inSize; zlibInSize is just the initial
- 	 * default size for it.
- 	 */
- 	ctx->zlibOut = (char *) malloc(zlibOutSize + 1);
- 	ctx->zlibIn = (char *) malloc(zlibInSize);
- 	ctx->inSize = zlibInSize;
  	ctx->filePos = 0;
  
- 	if (ctx->zlibOut == NULL || ctx->zlibIn == NULL)
- 		die_horribly(AH, modulename, "out of memory\n");
- 
  	/*
  	 * Now open the file
  	 */
  	if (AH->mode == archModeWrite)
  	{
  		if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
  		{
  			AH->FH = fopen(AH->fSpec, PG_BINARY_W);
--- 142,162 ----
  		die_horribly(AH, modulename, "out of memory\n");
  	AH->formatData = (void *) ctx;
  
  	/* Initialize LO buffering */
  	AH->lo_buf_size = LOBBUFSIZE;
  	AH->lo_buf = (void *) malloc(LOBBUFSIZE);
  	if (AH->lo_buf == NULL)
  		die_horribly(AH, modulename, "out of memory\n");
  
  	ctx->filePos = 0;
  
  	/*
  	 * Now open the file
  	 */
  	if (AH->mode == archModeWrite)
  	{
+ 		ctx->cs = AllocateCompressorState(AH);
+ 
  		if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
  		{
  			AH->FH = fopen(AH->fSpec, PG_BINARY_W);
*************** InitArchiveFmt_Custom(ArchiveHandle *AH)
*** 211,216 ****
--- 194,201 ----
  		ctx->hasSeek = checkSeek(AH->FH);
  
  		ReadHead(AH);
+ 		ctx->cs = AllocateCompressorState(AH);
+ 		
  		ReadToc(AH);
  		ctx->dataStart = _getFilePos(AH, ctx);
  	}
*************** static size_t
*** 340,356 ****
  _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	z_streamp	zp = ctx->zp;
! 
! 	zp->next_in = (void *) data;
! 	zp->avail_in = dLen;
  
! 	while (zp->avail_in != 0)
! 	{
! 		/* printf("Deflating %lu bytes\n", (unsigned long) dLen); */
! 		_DoDeflate(AH, ctx, 0);
! 	}
! 	return dLen;
  }
  
  /*
--- 325,333 ----
  _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	CompressorState	   *cs = ctx->cs;
  
! 	return WriteDataToArchive(AH, cs, _CustomWriteFunc, data, dLen);
  }
  
  /*
*************** static void
*** 533,639 ****
  _PrintData(ArchiveHandle *AH)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	z_streamp	zp = ctx->zp;
! 	size_t		blkLen;
! 	char	   *in = ctx->zlibIn;
! 	size_t		cnt;
! 
! #ifdef HAVE_LIBZ
! 	int			res;
! 	char	   *out = ctx->zlibOut;
! #endif
! 
! #ifdef HAVE_LIBZ
! 
! 	res = Z_OK;
! 
! 	if (AH->compression != 0)
! 	{
! 		zp->zalloc = Z_NULL;
! 		zp->zfree = Z_NULL;
! 		zp->opaque = Z_NULL;
! 
! 		if (inflateInit(zp) != Z_OK)
! 			die_horribly(AH, modulename, "could not initialize compression library: %s\n", zp->msg);
! 	}
! #endif
! 
! 	blkLen = ReadInt(AH);
! 	while (blkLen != 0)
! 	{
! 		if (blkLen + 1 > ctx->inSize)
! 		{
! 			free(ctx->zlibIn);
! 			ctx->zlibIn = NULL;
! 			ctx->zlibIn = (char *) malloc(blkLen + 1);
! 			if (!ctx->zlibIn)
! 				die_horribly(AH, modulename, "out of memory\n");
! 
! 			ctx->inSize = blkLen + 1;
! 			in = ctx->zlibIn;
! 		}
! 
! 		cnt = fread(in, 1, blkLen, AH->FH);
! 		if (cnt != blkLen)
! 		{
! 			if (feof(AH->FH))
! 				die_horribly(AH, modulename,
! 							 "could not read from input file: end of file\n");
! 			else
! 				die_horribly(AH, modulename,
! 					"could not read from input file: %s\n", strerror(errno));
! 		}
! 
! 		ctx->filePos += blkLen;
! 
! 		zp->next_in = (void *) in;
! 		zp->avail_in = blkLen;
! 
! #ifdef HAVE_LIBZ
! 		if (AH->compression != 0)
! 		{
! 			while (zp->avail_in != 0)
! 			{
! 				zp->next_out = (void *) out;
! 				zp->avail_out = zlibOutSize;
! 				res = inflate(zp, 0);
! 				if (res != Z_OK && res != Z_STREAM_END)
! 					die_horribly(AH, modulename, "could not uncompress data: %s\n", zp->msg);
! 
! 				out[zlibOutSize - zp->avail_out] = '\0';
! 				ahwrite(out, 1, zlibOutSize - zp->avail_out, AH);
! 			}
! 		}
! 		else
! #endif
! 		{
! 			in[zp->avail_in] = '\0';
! 			ahwrite(in, 1, zp->avail_in, AH);
! 			zp->avail_in = 0;
! 		}
! 		blkLen = ReadInt(AH);
! 	}
! 
! #ifdef HAVE_LIBZ
! 	if (AH->compression != 0)
! 	{
! 		zp->next_in = NULL;
! 		zp->avail_in = 0;
! 		while (res != Z_STREAM_END)
! 		{
! 			zp->next_out = (void *) out;
! 			zp->avail_out = zlibOutSize;
! 			res = inflate(zp, 0);
! 			if (res != Z_OK && res != Z_STREAM_END)
! 				die_horribly(AH, modulename, "could not uncompress data: %s\n", zp->msg);
  
! 			out[zlibOutSize - zp->avail_out] = '\0';
! 			ahwrite(out, 1, zlibOutSize - zp->avail_out, AH);
! 		}
! 		if (inflateEnd(zp) != Z_OK)
! 			die_horribly(AH, modulename, "could not close compression library: %s\n", zp->msg);
! 	}
! #endif
  }
  
  static void
--- 510,519 ----
  _PrintData(ArchiveHandle *AH)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	CompressorState *cs = ctx->cs;
  
! 	InitCompressorState(AH, cs, COMPRESSOR_INFLATE);
! 	ReadDataFromArchive(AH, cs, _CustomReadFunction);
  }
  
  static void
*************** static void
*** 683,701 ****
  _skipData(ArchiveHandle *AH)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
  	size_t		blkLen;
! 	char	   *in = ctx->zlibIn;
  	size_t		cnt;
  
  	blkLen = ReadInt(AH);
  	while (blkLen != 0)
  	{
! 		if (blkLen > ctx->inSize)
  		{
! 			free(ctx->zlibIn);
! 			ctx->zlibIn = (char *) malloc(blkLen);
! 			ctx->inSize = blkLen;
! 			in = ctx->zlibIn;
  		}
  		cnt = fread(in, 1, blkLen, AH->FH);
  		if (cnt != blkLen)
--- 563,582 ----
  _skipData(ArchiveHandle *AH)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
+ 	CompressorState *cs = ctx->cs;
  	size_t		blkLen;
! 	char	   *in = cs->comprIn;
  	size_t		cnt;
  
  	blkLen = ReadInt(AH);
  	while (blkLen != 0)
  	{
! 		if (blkLen > cs->comprInSize)
  		{
! 			free(cs->comprIn);
! 			cs->comprIn = (char *) malloc(blkLen);
! 			cs->comprInSize = blkLen;
! 			in = cs->comprIn;
  		}
  		cnt = fread(in, 1, blkLen, AH->FH);
  		if (cnt != blkLen)
*************** _readBlockHeader(ArchiveHandle *AH, int 
*** 961,1099 ****
  }
  
  /*
!  * If zlib is available, then startit up. This is called from
!  * StartData & StartBlob. The buffers are setup in the Init routine.
   */
  static void
  _StartDataCompressor(ArchiveHandle *AH, TocEntry *te)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	z_streamp	zp = ctx->zp;
! 
! #ifdef HAVE_LIBZ
! 
! 	if (AH->compression < 0 || AH->compression > 9)
! 		AH->compression = Z_DEFAULT_COMPRESSION;
  
! 	if (AH->compression != 0)
! 	{
! 		zp->zalloc = Z_NULL;
! 		zp->zfree = Z_NULL;
! 		zp->opaque = Z_NULL;
  
! 		if (deflateInit(zp, AH->compression) != Z_OK)
! 			die_horribly(AH, modulename, "could not initialize compression library: %s\n", zp->msg);
! 	}
! #else
  
! 	AH->compression = 0;
! #endif
  
! 	/* Just be paranoid - maybe End is called after Start, with no Write */
! 	zp->next_out = (void *) ctx->zlibOut;
! 	zp->avail_out = zlibOutSize;
  }
  
! /*
!  * Send compressed data to the output stream (via ahwrite).
!  * Each data chunk is preceded by it's length.
!  * In the case of Z0, or no zlib, just write the raw data.
!  *
!  */
! static int
! _DoDeflate(ArchiveHandle *AH, lclContext *ctx, int flush)
  {
! 	z_streamp	zp = ctx->zp;
  
! #ifdef HAVE_LIBZ
! 	char	   *out = ctx->zlibOut;
! 	int			res = Z_OK;
  
! 	if (AH->compression != 0)
  	{
! 		res = deflate(zp, flush);
! 		if (res == Z_STREAM_ERROR)
! 			die_horribly(AH, modulename, "could not compress data: %s\n", zp->msg);
  
! 		if (((flush == Z_FINISH) && (zp->avail_out < zlibOutSize))
! 			|| (zp->avail_out == 0)
! 			|| (zp->avail_in != 0)
! 			)
! 		{
! 			/*
! 			 * Extra paranoia: avoid zero-length chunks since a zero length
! 			 * chunk is the EOF marker. This should never happen but...
! 			 */
! 			if (zp->avail_out < zlibOutSize)
! 			{
! 				/*
! 				 * printf("Wrote %lu byte deflated chunk\n", (unsigned long)
! 				 * (zlibOutSize - zp->avail_out));
! 				 */
! 				WriteInt(AH, zlibOutSize - zp->avail_out);
! 				if (fwrite(out, 1, zlibOutSize - zp->avail_out, AH->FH) != (zlibOutSize - zp->avail_out))
! 					die_horribly(AH, modulename, "could not write to output file: %s\n", strerror(errno));
! 				ctx->filePos += zlibOutSize - zp->avail_out;
! 			}
! 			zp->next_out = (void *) out;
! 			zp->avail_out = zlibOutSize;
! 		}
  	}
! 	else
! #endif
  	{
! 		if (zp->avail_in > 0)
! 		{
! 			WriteInt(AH, zp->avail_in);
! 			if (fwrite(zp->next_in, 1, zp->avail_in, AH->FH) != zp->avail_in)
! 				die_horribly(AH, modulename, "could not write to output file: %s\n", strerror(errno));
! 			ctx->filePos += zp->avail_in;
! 			zp->avail_in = 0;
! 		}
  		else
! 		{
! #ifdef HAVE_LIBZ
! 			if (flush == Z_FINISH)
! 				res = Z_STREAM_END;
! #endif
! 		}
  	}
! 
! #ifdef HAVE_LIBZ
! 	return res;
! #else
! 	return 1;
! #endif
  }
  
  /*
!  * Terminate zlib context and flush it's buffers. If no zlib
!  * then just return.
   */
  static void
  _EndDataCompressor(ArchiveHandle *AH, TocEntry *te)
  {
  
! #ifdef HAVE_LIBZ
! 	lclContext *ctx = (lclContext *) AH->formatData;
! 	z_streamp	zp = ctx->zp;
! 	int			res;
! 
! 	if (AH->compression != 0)
! 	{
! 		zp->next_in = NULL;
! 		zp->avail_in = 0;
! 
! 		do
! 		{
! 			/* printf("Ending data output\n"); */
! 			res = _DoDeflate(AH, ctx, Z_FINISH);
! 		} while (res != Z_STREAM_END);
! 
! 		if (deflateEnd(zp) != Z_OK)
! 			die_horribly(AH, modulename, "could not close compression stream: %s\n", zp->msg);
! 	}
! #endif
  
  	/* Send the end marker */
  	WriteInt(AH, 0);
--- 842,924 ----
  }
  
  /*
!  * If a compression algorithm is available, then startit up. This is called
!  * from StartData & StartBlob. The buffers are setup in the Init routine.
   */
  static void
  _StartDataCompressor(ArchiveHandle *AH, TocEntry *te)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
! 	CompressorState *cs = ctx->cs;
  
! 	InitCompressorState(AH, cs, COMPRESSOR_DEFLATE);
! }
  
! static size_t
! _CustomWriteFunc(ArchiveHandle *AH, const void *buf, size_t len)
! {
! 	Assert(len != 0);
  
! 	/* never write 0-byte blocks (this should not happen) */
! 	if (len == 0)
! 		return 0;
  
! 	WriteInt(AH, len);
! 	return _WriteBuf(AH, buf, len);
  }
  
! static size_t
! _CustomReadFunction(ArchiveHandle *AH, void **buf, size_t sizeHint)
  {
! 	lclContext *ctx = (lclContext *) AH->formatData;
! 	CompressorState *cs = ctx->cs;
! 	size_t		blkLen;
! 	size_t		cnt;
  
!     /*
!      * We deliberately ignore the sizeHint parameter because we know
!      * the exact size of the next compressed block (=blkLen).
!      */
  
! 	blkLen = ReadInt(AH);
! 
! 	if (blkLen == 0)
! 		return 0;
! 
! 	if (blkLen + 1 > cs->comprInSize)
  	{
! 		free(cs->comprIn);
! 		cs->comprIn = NULL;
! 		cs->comprIn = (char *) malloc(blkLen + 1);
! 		if (!cs->comprIn)
! 			die_horribly(AH, modulename, "out of memory\n");
  
! 		cs->comprInSize = blkLen + 1;
  	}
! 	cnt = _ReadBuf(AH, cs->comprIn, blkLen);
! 	if (cnt != blkLen)
  	{
! 		if (feof(AH->FH))
! 			die_horribly(AH, modulename,
! 						 "could not read from input file: end of file\n");
  		else
! 			die_horribly(AH, modulename,
! 				"could not read from input file: %s\n", strerror(errno));
  	}
! 	*buf = cs->comprIn;
! 	return cnt;
  }
  
  /*
!  * Terminate zlib context and flush it's buffers.
   */
  static void
  _EndDataCompressor(ArchiveHandle *AH, TocEntry *te)
  {
+ 	lclContext		   *ctx = (lclContext *) AH->formatData;
+ 	CompressorState	   *cs = ctx->cs;
  
! 	FlushCompressorState(AH, cs, _CustomWriteFunc);
  
  	/* Send the end marker */
  	WriteInt(AH, 0);
*************** _Clone(ArchiveHandle *AH)
*** 1114,1125 ****
  	memcpy(AH->formatData, ctx, sizeof(lclContext));
  	ctx = (lclContext *) AH->formatData;
  
! 	ctx->zp = (z_streamp) malloc(sizeof(z_stream));
! 	ctx->zlibOut = (char *) malloc(zlibOutSize + 1);
! 	ctx->zlibIn = (char *) malloc(ctx->inSize);
! 
! 	if (ctx->zp == NULL || ctx->zlibOut == NULL || ctx->zlibIn == NULL)
! 		die_horribly(AH, modulename, "out of memory\n");
  
  	/*
  	 * Note: we do not make a local lo_buf because we expect at most one BLOBS
--- 939,945 ----
  	memcpy(AH->formatData, ctx, sizeof(lclContext));
  	ctx = (lclContext *) AH->formatData;
  
! 	ctx->cs = AllocateCompressorState(AH);
  
  	/*
  	 * Note: we do not make a local lo_buf because we expect at most one BLOBS
*************** static void
*** 1133,1141 ****
  _DeClone(ArchiveHandle *AH)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
  
- 	free(ctx->zlibOut);
- 	free(ctx->zlibIn);
- 	free(ctx->zp);
  	free(ctx);
  }
--- 953,962 ----
  _DeClone(ArchiveHandle *AH)
  {
  	lclContext *ctx = (lclContext *) AH->formatData;
+ 	CompressorState	   *cs = ctx->cs;
+ 
+ 	FreeCompressorState(cs);
  
  	free(ctx);
  }
+ 
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index ...1da57b3 .
*** a/src/bin/pg_dump/pg_backup_directory.c
--- b/src/bin/pg_dump/pg_backup_directory.c
***************
*** 0 ****
--- 1,1496 ----
+ /*-------------------------------------------------------------------------
+  *
+  * pg_backup_directory.c
+  *
+  *	This file is copied from the 'files' format file and dumps data into
+  *	separate files in a directory.
+  *
+  *	See the headers to pg_backup_files & pg_restore for more details.
+  *
+  * Copyright (c) 2000, Philip Warner
+  *		Rights are granted to use this software in any way so long
+  *		as this notice is not removed.
+  *
+  *	The author is not responsible for loss or damages that may
+  *	result from it's use.
+  *
+  *
+  * IDENTIFICATION
+  * 	XXX
+  *
+  *-------------------------------------------------------------------------
+  */
+ 
+ #include <dirent.h>
+ #include <sys/stat.h>
+ 
+ #include "compress_io.h"
+ #include "pg_backup_archiver.h"
+ #include "libpq/md5.h"
+ #include "utils/pg_crc.h"
+ 
+ #ifdef USE_SSL
+ /* for RAND_bytes() */
+ #include <openssl/rand.h>
+ #endif
+ 
+ #define TOC_FH_ACTIVE (ctx->dataFH == NULL && ctx->blobsTocFH == NULL && AH->FH != NULL)
+ #define BLOBS_TOC_FH_ACTIVE (ctx->dataFH == NULL && ctx->blobsTocFH != NULL)
+ #define DATA_FH_ACTIVE (ctx->dataFH != NULL)
+ 
+ struct _lclFileHeader;
+ struct _lclContext;
+ 
+ static void _ArchiveEntry(ArchiveHandle *AH, TocEntry *te);
+ static void _StartData(ArchiveHandle *AH, TocEntry *te);
+ static void _EndData(ArchiveHandle *AH, TocEntry *te);
+ static size_t _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
+ static int	_WriteByte(ArchiveHandle *AH, const int i);
+ static int	_ReadByte(ArchiveHandle *);
+ static size_t _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
+ static size_t _ReadBuf(ArchiveHandle *AH, void *buf, size_t len);
+ static void _CloseArchive(ArchiveHandle *AH);
+ static void _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
+ 
+ static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te);
+ static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
+ static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
+ static void _PrintExtraTocSummary(ArchiveHandle *AH);
+ 
+ static void _WriteExtraHead(ArchiveHandle *AH);
+ static void _ReadExtraHead(ArchiveHandle *AH);
+ 
+ static void WriteFileHeader(ArchiveHandle *AH, int type);
+ static int ReadFileHeader(ArchiveHandle *AH, struct _lclFileHeader *fileHeader);
+ 
+ static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
+ static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
+ static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
+ static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
+ static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt);
+ 
+ static size_t _DirectoryReadFunction(ArchiveHandle *AH, void **buf, size_t sizeHint);
+ 
+ static bool _StartCheckArchive(ArchiveHandle *AH);
+ static bool _CheckTocEntry(ArchiveHandle *AH, TocEntry *te, teReqs reqs);
+ static bool _CheckFileContents(ArchiveHandle *AH, const char *fname, const char* idStr, bool terminateOnError);
+ static bool _CheckFileSize(ArchiveHandle *AH, const char *fname, pgoff_t pgSize, bool terminateOnError);
+ static bool _CheckBlob(ArchiveHandle *AH, Oid oid, pgoff_t size);
+ static bool _CheckBlobs(ArchiveHandle *AH, TocEntry *te, teReqs reqs);
+ static bool _EndCheckArchive(ArchiveHandle *AH);
+ 
+ static char *prependDirectory(ArchiveHandle *AH, const char *relativeFilename);
+ static char *prependBlobsDirectory(ArchiveHandle *AH, Oid oid);
+ static void createDirectory(const char *dir, const char *subdir);
+ 
+ static char *getRandomData(char *s, int len);
+ 
+ static void _StartDataCompressor(ArchiveHandle *AH, TocEntry *te);
+ static void _EndDataCompressor(ArchiveHandle *AH, TocEntry *te);
+ 
+ static bool isDirectory(const char *fname);
+ static bool isRegularFile(const char *fname);
+ 
+ #define K_STD_BUF_SIZE	1024
+ #define FILE_SUFFIX		".dat"
+ 
+ typedef struct _lclContext
+ {
+ 	/*
+ 	 * Our archive location. This is basically what the user specified as his
+ 	 * backup file but of course here it is a directory.
+ 	 */
+ 	char			   *directory;
+ 
+ 	/*
+ 	 * As a directory archive contains of several files we want to make sure
+ 	 * that we do not interchange files of different backups. That's why we
+ 	 * assign a (hopefully) unique ID to every set. This ID is written to the
+ 	 * TOC and to every data file.
+ 	 */
+ 	char				idStr[33];
+ 
+ 	/*
+ 	 * In the directory archive format we have three file handles:
+ 	 *
+ 	 * AH->FH           points to the TOC
+ 	 * ctx->blobsTocFH  points to the TOC for the BLOBs
+ 	 * ctx->dataFH      points to data files (both BLOBs and regular)
+ 	 *
+ 	 * Instead of specifying where each I/O operation should go (which would
+ 	 * require own prototypes anyway and wouldn't be that straightforward
+ 	 * either), we rely on a hierarchy among the file descriptors.
+ 	 *
+ 	 * As a matter of fact we never access any of the TOCs when we are writing
+ 	 * to a data file, only before or after that. Similarly we never access the
+ 	 * general TOC when we have opened the TOC for BLOBs. Given these facts we
+ 	 * can just write our I/O routines such that they access:
+ 	 *
+ 	 * if      defined(ctx->dataFH)      => access ctx->dataFH
+ 	 * else if defined(ctx->blobsTocFH)  => access ctx->blobsTocFH
+ 	 * else                              => access AH->FH
+ 	 *
+ 	 * To make it more transparent what is going on, we use assertions like
+ 	 *
+ 	 *      Assert(DATA_FH_ACTIVE); ...
+ 	 *
+ 	 */
+ 	FILE			   *dataFH;
+ 	pgoff_t				dataFilePos;
+ 	FILE			   *blobsTocFH;
+ 	pgoff_t				blobsTocFilePos;
+ 	pgoff_t				tocFilePos; /* this counts the file position for AH->FH */
+ 
+ 	/* these are used for checking a directory archive */
+ 	DumpId			   *chkList;
+ 	int					chkListSize;
+ 
+ 	CompressorState	   *cs;
+ } lclContext;
+ 
+ typedef struct
+ {
+ 	char	   *filename;		/* filename excluding the directory (basename) */
+ 	pgoff_t		fileSize;
+ } lclTocEntry;
+ 
+ typedef struct _lclFileHeader
+ {
+ 	int			version;
+ 	int			type;			/* BLK_DATA or BLK_BLOB */
+ 	char	   *idStr;
+ } lclFileHeader;
+ 
+ static const char *modulename = gettext_noop("directory archiver");
+ 
+ /*
+  *	Init routine required by ALL formats. This is a global routine
+  *	and should be declared in pg_backup_archiver.h
+  *
+  *	It's task is to create any extra archive context (using AH->formatData),
+  *	and to initialize the supported function pointers.
+  *
+  *	It should also prepare whatever it's input source is for reading/writing,
+  *	and in the case of a read mode connection, it should load the Header & TOC.
+  */
+ void
+ InitArchiveFmt_Directory(ArchiveHandle *AH)
+ {
+ 	lclContext *ctx;
+ 
+ 	/* Assuming static functions, this can be copied for each format. */
+ 	AH->ArchiveEntryPtr = _ArchiveEntry;
+ 	AH->StartDataPtr = _StartData;
+ 	AH->WriteDataPtr = _WriteData;
+ 	AH->EndDataPtr = _EndData;
+ 	AH->WriteBytePtr = _WriteByte;
+ 	AH->ReadBytePtr = _ReadByte;
+ 	AH->WriteBufPtr = _WriteBuf;
+ 	AH->ReadBufPtr = _ReadBuf;
+ 	AH->ClosePtr = _CloseArchive;
+ 	AH->ReopenPtr = NULL;
+ 	AH->PrintTocDataPtr = _PrintTocData;
+ 	AH->ReadExtraTocPtr = _ReadExtraToc;
+ 	AH->WriteExtraTocPtr = _WriteExtraToc;
+ 	AH->PrintExtraTocPtr = _PrintExtraToc;
+ 	AH->PrintExtraTocSummaryPtr = _PrintExtraTocSummary;
+ 
+ 	AH->StartBlobsPtr = _StartBlobs;
+ 	AH->StartBlobPtr = _StartBlob;
+ 	AH->EndBlobPtr = _EndBlob;
+ 	AH->EndBlobsPtr = _EndBlobs;
+ 
+ 	AH->ClonePtr = NULL;
+ 	AH->DeClonePtr = NULL;
+ 
+ 	AH->StartCheckArchivePtr = _StartCheckArchive;
+ 	AH->CheckTocEntryPtr = _CheckTocEntry;
+ 	AH->EndCheckArchivePtr = _EndCheckArchive;
+ 
+ 	/*
+ 	 * Set up some special context used in compressing data.
+ 	 */
+ 	ctx = (lclContext *) calloc(1, sizeof(lclContext));
+ 	if (ctx == NULL)
+ 		die_horribly(AH, modulename, "out of memory\n");
+ 	AH->formatData = (void *) ctx;
+ 
+ 	ctx->dataFH = NULL;
+ 	ctx->blobsTocFH = NULL;
+ 	ctx->cs = NULL;
+ 
+ 	/* Initialize LO buffering */
+ 	AH->lo_buf_size = LOBBUFSIZE;
+ 	AH->lo_buf = (void *) malloc(LOBBUFSIZE);
+ 	if (AH->lo_buf == NULL)
+ 		die_horribly(AH, modulename, "out of memory\n");
+ 
+ 	/*
+ 	 * Now open the TOC file
+ 	 */
+ 
+ 	if (!AH->fSpec || strcmp(AH->fSpec, "") == 0)
+ 		die_horribly(AH, modulename, "no directory specified\n");
+ 
+ 	ctx->directory = AH->fSpec;
+ 
+ 	if (AH->mode == archModeWrite)
+ 	{
+ 		char   *fname = prependDirectory(AH, "TOC");
+ 		char   buf[256];
+ 
+ 		/*
+ 		 * Create the ID string, basically a large random number that prevents that
+  		 * we mix files from different backups
+ 		 */
+ 		getRandomData(buf, sizeof(buf));
+ 		if (!pg_md5_hash(buf, strlen(buf), ctx->idStr))
+ 			die_horribly(AH, modulename, "Error computing checksum");
+ 
+ 		/* Create the directory, errors are caught there */
+ 		createDirectory(ctx->directory, NULL);
+ 
+ 		ctx->cs = AllocateCompressorState(AH);
+ 
+ 		AH->FH = fopen(fname, PG_BINARY_W);
+ 		if (AH->FH == NULL)
+ 			die_horribly(AH, modulename, "could not open output file \"%s\": %s\n",
+ 						 fname, strerror(errno));
+ 	}
+ 	else
+ 	{							/* Read Mode */
+ 		char	   *fname;
+ 
+ 		fname = prependDirectory(AH, "TOC");
+ 
+ 		AH->FH = fopen(fname, PG_BINARY_R);
+ 		if (AH->FH == NULL)
+ 			die_horribly(AH, modulename,
+ 						 "could not open input file \"%s\": %s\n",
+ 						 fname, strerror(errno));
+ 
+ 		Assert(TOC_FH_ACTIVE);
+ 
+ 		ReadHead(AH);
+ 		_ReadExtraHead(AH);
+ 		ReadToc(AH);
+ 
+ 		/*
+ 		 * We get the compression information from the TOC, hence no need to
+ 		 * initialize the compressor earlier.  Also, remember that the TOC file is
+ 		 * always uncompressed. Compression is only used for the data files.
+ 		 */
+ 		ctx->cs = AllocateCompressorState(AH);
+ 
+ 		/* Nothing else in the file, so close it again... */
+ 
+ 		if (fclose(AH->FH) != 0)
+ 			die_horribly(AH, modulename, "could not close TOC file: %s\n", strerror(errno));
+ 	}
+ }
+ 
+ /*
+  * Called by the Archiver when the dumper creates a new TOC entry.
+  *
+  * Optional.
+  *
+  * Set up extrac format-related TOC data.
+ */
+ static void
+ _ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclTocEntry	   *tctx;
+ 	char			fn[MAXPGPATH];
+ 
+ 	tctx = (lclTocEntry *) calloc(1, sizeof(lclTocEntry));
+ 	if (te->dataDumper)
+ 	{
+ 		sprintf(fn, "%d"FILE_SUFFIX, te->dumpId);
+ 		tctx->filename = strdup(fn);
+ 	}
+ 	else if (strcmp(te->desc, "BLOBS") == 0)
+ 	{
+ 		tctx->filename = strdup("BLOBS.TOC");
+ 	}
+ 	else
+ 		tctx->filename = NULL;
+ 
+ 	tctx->fileSize = 0;
+ 	te->formatData = (void *) tctx;
+ }
+ 
+ /*
+  * Called by the Archiver to save any extra format-related TOC entry
+  * data.
+  *
+  * Optional.
+  *
+  * Use the Archiver routines to write data - they are non-endian, and
+  * maintain other important file information.
+  */
+ static void
+ _WriteExtraToc(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclTocEntry *tctx = (lclTocEntry *) te->formatData;
+ 
+ 	/*
+ 	 * A dumpable object has set tctx->filename, any other object hasnt.
+ 	 * (see _ArchiveEntry).
+ 	 */
+ 	if (tctx->filename)
+ 	{
+ 		WriteStr(AH, tctx->filename);
+ 		WriteOffset(AH, tctx->fileSize, K_OFFSET_POS_SET);
+ 	}
+ 	else
+ 		WriteStr(AH, "");
+ }
+ 
+ /*
+  * Called by the Archiver to read any extra format-related TOC data.
+  *
+  * Optional.
+  *
+  * Needs to match the order defined in _WriteExtraToc, and sould also
+  * use the Archiver input routines.
+  */
+ static void
+ _ReadExtraToc(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclTocEntry *tctx = (lclTocEntry *) te->formatData;
+ 
+ 	if (tctx == NULL)
+ 	{
+ 		tctx = (lclTocEntry *) calloc(1, sizeof(lclTocEntry));
+ 		te->formatData = (void *) tctx;
+ 	}
+ 
+ 	tctx->filename = ReadStr(AH);
+ 	if (strlen(tctx->filename) == 0)
+ 	{
+ 		free(tctx->filename);
+ 		tctx->filename = NULL;
+ 	}
+ 	else
+ 		ReadOffset(AH, &(tctx->fileSize));
+ }
+ 
+ /*
+  * Called by the Archiver when restoring an archive to output a comment
+  * that includes useful information about the TOC entry.
+  *
+  * Optional.
+  *
+  */
+ static void
+ _PrintExtraToc(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclTocEntry *tctx = (lclTocEntry *) te->formatData;
+ 
+ 	if (AH->public.verbose && tctx->filename)
+ 		ahprintf(AH, "-- File: %s\n", tctx->filename);
+ }
+ 
+ /*
+  * Called by the Archiver when listing the contents of an archive to output a
+  * comment that includes useful information about the archive.
+  *
+  * Optional.
+  *
+  */
+ static void
+ _PrintExtraTocSummary(ArchiveHandle *AH)
+ {
+ 	lclContext *ctx = (lclContext *) AH->formatData;
+ 	ahprintf(AH, ";     ID: %s\n", ctx->idStr);
+ }
+ 
+ 
+ /*
+  * Called by the archiver when saving TABLE DATA (not schema). This routine
+  * should save whatever format-specific information is needed to read
+  * the archive back.
+  *
+  * It is called just prior to the dumper's 'DataDumper' routine being called.
+  *
+  * Optional, but strongly recommended.
+  *
+  */
+ static void
+ _StartData(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclTocEntry	   *tctx = (lclTocEntry *) te->formatData;
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	char		   *fname;
+ 
+ 	fname = prependDirectory(AH, tctx->filename);
+ 
+ 	ctx->dataFH = (FILE *) fopen(fname, PG_BINARY_W);
+ 	if (ctx->dataFH == NULL)
+ 		die_horribly(AH, modulename, "could not open output file \"%s\": %s\n",
+ 					 fname, strerror(errno));
+ 
+ 	Assert(DATA_FH_ACTIVE);
+ 
+ 	ctx->dataFilePos = 0;
+ 
+ 	WriteFileHeader(AH, BLK_DATA);
+ 
+ 	_StartDataCompressor(AH, te);
+ }
+ 
+ static void
+ WriteFileHeader(ArchiveHandle *AH, int type)
+ {
+ 	lclContext *ctx = (lclContext *) AH->formatData;
+ 	int compression = AH->compression;
+ 
+ 	/*
+ 	 * We always write the header uncompressed. If any compression is active,
+ 	 * switch it off for a moment and restore it after writing the header.
+ 	 */
+ 	AH->compression = 0;
+ 	(*AH->WriteBufPtr) (AH, "PGDMP", 5);		/* Magic code */
+ 	(*AH->WriteBytePtr) (AH, AH->vmaj);
+ 	(*AH->WriteBytePtr) (AH, AH->vmin);
+ 	(*AH->WriteBytePtr) (AH, AH->vrev);
+ 
+ 	_WriteByte(AH, type);
+ 	WriteStr(AH, ctx->idStr);
+ 
+ 	AH->compression = compression;
+ }
+ 
+ static int
+ ReadFileHeader(ArchiveHandle *AH, lclFileHeader *fileHeader)
+ {
+ 	char		tmpMag[7];
+ 	int			vmaj, vmin, vrev;
+ 	lclContext *ctx = (lclContext *) AH->formatData;
+ 	int			compression = AH->compression;
+ 	bool		err = false;
+ 
+ 	Assert(ftell(ctx->dataFH ? ctx->dataFH : ctx->blobsTocFH ? ctx->blobsTocFH : AH->FH) == 0);
+ 
+ 	/* Read with compression switched off. See WriteFileHeader() */
+ 	AH->compression = 0;
+ 	if ((*AH->ReadBufPtr) (AH, tmpMag, 5) != 5)
+ 		die_horribly(AH, modulename, "unexpected end of file\n");
+ 
+ 	vmaj = (*AH->ReadBytePtr) (AH);
+ 	vmin = (*AH->ReadBytePtr) (AH);
+ 	vrev = (*AH->ReadBytePtr) (AH);
+ 
+ 	/* Make a convenient integer <maj><min><rev>00 */
+ 	fileHeader->version = ((vmaj * 256 + vmin) * 256 + vrev) * 256 + 0;
+ 	fileHeader->type = _ReadByte(AH);
+ 	if (fileHeader->type != BLK_BLOBS && fileHeader->type != BLK_DATA)
+ 		err = true;
+ 	if (!err)
+ 	{
+ 		fileHeader->idStr = ReadStr(AH);
+ 		if (fileHeader->idStr == NULL)
+ 			err = true;
+ 	}
+ 	if (!err)
+ 	{
+ 		if (strcmp(fileHeader->idStr, ctx->idStr) != 0)
+ 			err = true;
+ 	}
+ 	AH->compression = compression;
+ 
+ 	return err ? -1 : 0;
+ }
+ 
+ /*
+  * Called by archiver when dumper calls WriteData. This routine is
+  * called for both BLOB and TABLE data; it is the responsibility of
+  * the format to manage each kind of data using StartBlob/StartData.
+  *
+  * It should only be called from within a DataDumper routine.
+  *
+  * Mandatory.
+  */
+ static size_t
+ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
+ {
+ 	lclContext		   *ctx = (lclContext *) AH->formatData;
+ 	CompressorState	   *cs = ctx->cs;
+ 
+ 	return WriteDataToArchive(AH, cs, _WriteBuf, data, dLen);
+ }
+ 
+ /*
+  * Called by the archiver when a dumper's 'DataDumper' routine has
+  * finished.
+  *
+  * Optional.
+  *
+  */
+ static void
+ _EndData(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclTocEntry	   *tctx = (lclTocEntry *) te->formatData;
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 
+ 	_EndDataCompressor(AH, te);
+ 
+ 	Assert(DATA_FH_ACTIVE);
+ 
+ 	/* Close the file */
+ 	fclose(ctx->dataFH);
+ 
+ 	/* the file won't grow anymore. Record the size. */
+ 	tctx->fileSize = ctx->dataFilePos;
+ 
+ 	ctx->dataFH = NULL;
+ }
+ 
+ /*
+  * Print data for a given file (can be a BLOB as well)
+  */
+ static void
+ _PrintFileData(ArchiveHandle *AH, char *filename, pgoff_t expectedSize, RestoreOptions *ropt)
+ {
+ 	lclContext		   *ctx = (lclContext *) AH->formatData;
+ 	CompressorState	   *cs = ctx->cs;
+ 	lclFileHeader		fileHeader;
+ 
+ 	InitCompressorState(AH, cs, COMPRESSOR_INFLATE);
+ 
+ 	if (!filename)
+ 		return;
+ 
+ 	_CheckFileSize(AH, filename, expectedSize, true);
+ 	_CheckFileContents(AH, filename, ctx->idStr, true);
+ 
+ 	ctx->dataFH = fopen(filename, PG_BINARY_R);
+ 	if (!ctx->dataFH)
+ 		die_horribly(AH, modulename, "could not open input file \"%s\": %s\n",
+ 					 filename, strerror(errno));
+ 
+ 	if (ReadFileHeader(AH, &fileHeader) != 0)
+ 		die_horribly(AH, modulename, "could not read valid file header from file \"%s\"\n",
+ 					 filename);
+ 
+ 	Assert(DATA_FH_ACTIVE);
+ 
+ 	ReadDataFromArchive(AH, cs, _DirectoryReadFunction);
+ 
+ 	ctx->dataFH = NULL;
+ }
+ 
+ 
+ /*
+  * Print data for a given TOC entry
+ */
+ static void
+ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
+ {
+ 	lclTocEntry *tctx = (lclTocEntry *) te->formatData;
+ 
+ 	if (!tctx->filename)
+ 		return;
+ 
+ 	if (strcmp(te->desc, "BLOBS") == 0)
+ 		_LoadBlobs(AH, ropt);
+ 	else
+ 	{
+ 		char   *fname = prependDirectory(AH, tctx->filename);
+ 		_PrintFileData(AH, fname, tctx->fileSize, ropt);
+ 	}
+ }
+ 
+ static void
+ _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt)
+ {
+ 	Oid				oid;
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	lclFileHeader	fileHeader;
+ 	char		   *fname;
+ 
+ 	StartRestoreBlobs(AH);
+ 
+ 	fname = prependDirectory(AH, "BLOBS.TOC");
+ 
+ 	ctx->blobsTocFH = fopen(fname, "rb");
+ 
+ 	if (ctx->blobsTocFH == NULL)
+ 		die_horribly(AH, modulename, "could not open large object TOC file \"%s\" for input: %s\n",
+ 					 fname, strerror(errno));
+ 
+ 	ReadFileHeader(AH, &fileHeader);
+ 
+ 	/* we cannot test for feof() since EOF only shows up in the low
+  	 * level read functions. But they would die_horribly() anyway. */
+ 	while (1)
+ 	{
+ 		char			   *blobFname;
+ 		pgoff_t				blobSize;
+ 
+ 		oid = ReadInt(AH);
+ 		/* oid == 0 is our end marker */
+ 		if (oid == 0)
+ 			break;
+ 		ReadOffset(AH, &blobSize);
+ 
+ 		StartRestoreBlob(AH, oid, ropt->dropSchema);
+ 		blobFname = prependBlobsDirectory(AH, oid);
+ 		_PrintFileData(AH, blobFname, blobSize, ropt);
+ 		EndRestoreBlob(AH, oid);
+ 	}
+ 
+ 	if (fclose(ctx->blobsTocFH) != 0)
+ 		die_horribly(AH, modulename, "could not close large object TOC file \"%s\": %s\n",
+ 					 fname, strerror(errno));
+ 
+ 	ctx->blobsTocFH = NULL;
+ 
+ 	EndRestoreBlobs(AH);
+ }
+ 
+ 
+ /*
+  * Write a byte of data to the archive.
+  *
+  * Mandatory.
+  *
+  * Called by the archiver to do integer & byte output to the archive.
+  * These routines are only used to read & write headers & TOC.
+  *
+  */
+ static int
+ _WriteByte(ArchiveHandle *AH, const int i)
+ {
+ 	lclContext *ctx = (lclContext *) AH->formatData;
+ 	pgoff_t	   *filePos = &ctx->tocFilePos;
+ 	FILE	   *stream = AH->FH;
+ 
+ 	if (ctx->dataFH)
+ 	{
+ 		stream = ctx->dataFH;
+ 		filePos = &ctx->dataFilePos;
+ 	}
+ 	else if (ctx->blobsTocFH)
+ 	{
+ 		stream = ctx->blobsTocFH;
+ 		filePos = &ctx->blobsTocFilePos;
+ 	}
+ 
+ 	if (fputc(i, stream) == EOF)
+ 		die_horribly(AH, modulename, "could not write byte\n");
+ 
+ 	*filePos += 1;
+ 
+ 	return 1;
+ }
+ 
+ /*
+  * Read a byte of data from the archive.
+  *
+  * Mandatory
+  *
+  * Called by the archiver to read bytes & integers from the archive.
+  * These routines are only used to read & write headers & TOC.
+  * EOF should be treated as a fatal error.
+  */
+ static int
+ _ReadByte(ArchiveHandle *AH)
+ {
+ 	lclContext *ctx = (lclContext *) AH->formatData;
+ 	pgoff_t	   *filePos = &ctx->tocFilePos;
+ 	int			res;
+ 	FILE	   *stream = AH->FH;
+ 
+ 	if (ctx->dataFH)
+ 	{
+ 		stream = ctx->dataFH;
+ 		filePos = &ctx->dataFilePos;
+ 	}
+ 	else if (ctx->blobsTocFH)
+ 	{
+ 		stream = ctx->blobsTocFH;
+ 		filePos = &ctx->blobsTocFilePos;
+ 	}
+ 
+ 	res = getc(stream);
+ 	if (res == EOF)
+ 		die_horribly(AH, modulename, "unexpected end of file\n");
+ 
+ 	*filePos += 1;
+ 
+ 	return res;
+ }
+ 
+ /*
+  * Write a buffer of data to the archive.
+  *
+  * Mandatory.
+  *
+  * Called by the archiver to write a block of bytes to the TOC and by the
+  * compressor to write compressed data to the data files.
+  *
+  */
+ static size_t
+ _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
+ {
+ 	lclContext *ctx = (lclContext *) AH->formatData;
+ 	pgoff_t	   *filePos = &ctx->tocFilePos;
+ 	size_t		res;
+ 	FILE	   *stream = AH->FH;
+ 
+ 	if (ctx->dataFH)
+ 	{
+ 		stream = ctx->dataFH;
+ 		filePos = &ctx->dataFilePos;
+ 	}
+ 	else if (ctx->blobsTocFH)
+ 	{
+ 		stream = ctx->blobsTocFH;
+ 		filePos = &ctx->blobsTocFilePos;
+ 	}
+ 
+ 	res = fwrite(buf, 1, len, stream);
+ 	if (res != len)
+ 		die_horribly(AH, modulename, "could not write to output file: %s\n", strerror(errno));
+ 
+ 	*filePos += res;
+ 
+ 	return res;
+ }
+ 
+ /*
+  * Read a block of bytes from the archive.
+  *
+  * Mandatory.
+  *
+  * Called by the archiver to read a block of bytes from the archive
+  *
+  */
+ static size_t
+ _ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
+ {
+ 	lclContext *ctx = (lclContext *) AH->formatData;
+ 	pgoff_t	   *filePos = &ctx->tocFilePos;
+ 	size_t		res;
+ 	FILE	   *stream = AH->FH;
+ 
+ 	if (ctx->dataFH)
+ 	{
+ 		stream = ctx->dataFH;
+ 		filePos = &ctx->dataFilePos;
+ 	}
+ 	else if (ctx->blobsTocFH)
+ 	{
+ 		stream = ctx->blobsTocFH;
+ 		filePos = &ctx->blobsTocFilePos;
+ 	}
+ 
+ 	res = fread(buf, 1, len, stream);
+ 
+ 	*filePos += res;
+ 
+ 	return res;
+ }
+ 
+ /*
+  * Close the archive.
+  *
+  * Mandatory.
+  *
+  * When writing the archive, this is the routine that actually starts
+  * the process of saving it to files. No data should be written prior
+  * to this point, since the user could sort the TOC after creating it.
+  *
+  * If an archive is to be written, this routine must call:
+  *		WriteHead			to save the archive header
+  *		WriteToc			to save the TOC entries
+  *		WriteDataChunks		to save all DATA & BLOBs.
+  *
+  */
+ static void
+ _CloseArchive(ArchiveHandle *AH)
+ {
+ 	if (AH->mode == archModeWrite)
+ 	{
+ #ifdef USE_ASSERT_CHECKING
+ 		lclContext	   *ctx = (lclContext *) AH->formatData;
+ #endif
+ 
+ 		WriteDataChunks(AH);
+ 
+ 		Assert(TOC_FH_ACTIVE);
+ 
+ 		WriteHead(AH);
+ 		_WriteExtraHead(AH);
+ 		WriteToc(AH);
+ 
+ 		if (fclose(AH->FH) != 0)
+ 			die_horribly(AH, modulename, "could not close TOC file: %s\n", strerror(errno));
+ 	}
+ 	AH->FH = NULL;
+ }
+ 
+ 
+ 
+ /*
+  * BLOB support
+  */
+ 
+ /*
+  * Called by the archiver when starting to save all BLOB DATA (not schema).
+  * This routine should save whatever format-specific information is needed
+  * to read the BLOBs back into memory.
+  *
+  * It is called just prior to the dumper's DataDumper routine.
+  *
+  * Optional, but strongly recommended.
+  */
+ static void
+ _StartBlobs(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	char		   *fname;
+ 
+ 	fname = prependDirectory(AH, "BLOBS.TOC");
+ 	createDirectory(ctx->directory, "blobs");
+ 
+ 	ctx->blobsTocFH = fopen(fname, "ab");
+ 	if (ctx->blobsTocFH == NULL)
+ 		die_horribly(AH, modulename, "could not open output file \"%s\": %s\n",
+ 					 fname, strerror(errno));
+ 
+ 	Assert(BLOBS_TOC_FH_ACTIVE);
+ 
+ 	ctx->blobsTocFilePos = 0;
+ 
+ 	WriteFileHeader(AH, BLK_BLOBS);
+ }
+ 
+ /*
+  * Called by the archiver when the dumper calls StartBlob.
+  *
+  * Mandatory.
+  *
+  * Must save the passed OID for retrieval at restore-time.
+  */
+ static void
+ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+ {
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	char		   *fname;
+ 
+ 	fname = prependBlobsDirectory(AH, oid);
+ 	ctx->dataFH = (FILE *) fopen(fname, PG_BINARY_W);
+ 
+ 	if (ctx->dataFH == NULL)
+ 		die_horribly(AH, modulename, "could not open output file \"%s\": %s\n",
+ 					 fname, strerror(errno));
+ 
+ 	Assert(DATA_FH_ACTIVE);
+ 
+ 	ctx->dataFilePos = 0;
+ 
+ 	WriteFileHeader(AH, BLK_BLOBS);
+ 
+ 	_StartDataCompressor(AH, te);
+ }
+ 
+ /*
+  * Called by the archiver when the dumper calls EndBlob.
+  *
+  * Optional.
+  */
+ static void
+ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
+ {
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	pgoff_t			save_filePos;
+ 
+ 	_EndDataCompressor(AH, te);
+ 
+ 	Assert(DATA_FH_ACTIVE);
+ 
+ 	save_filePos = ctx->dataFilePos;
+ 
+ 	/* Close the BLOB data file itself */
+ 	fclose(ctx->dataFH);
+ 	ctx->dataFH = NULL;
+ 
+ 	Assert(BLOBS_TOC_FH_ACTIVE);
+ 
+ 	/* register the BLOB data file to BLOBS.TOC */
+ 	WriteInt(AH, oid);
+ 	WriteOffset(AH, save_filePos, K_OFFSET_POS_NOT_SET);
+ }
+ 
+ /*
+  * Called by the archiver when finishing saving all BLOB DATA.
+  *
+  * Optional.
+  */
+ static void
+ _EndBlobs(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclContext *ctx = (lclContext *) AH->formatData;
+ 	lclTocEntry *tctx = (lclTocEntry *) te->formatData;
+ 
+ 	Assert(BLOBS_TOC_FH_ACTIVE);
+ 
+ 	WriteInt(AH, 0);
+ 
+ 	fclose(ctx->blobsTocFH);
+ 	ctx->blobsTocFH = NULL;
+ 
+ 	tctx->fileSize = ctx->blobsTocFilePos;
+ }
+ 
+ /*
+  * The idea for the directory check is as follows: First we do a list of every
+  * file that we find in the directory. We reject filenames that don't fit our
+  * pattern outright. So at this stage we only accept all kinds of TOC data
+  * and our data files.
+  *
+  * If a filename looks good (like nnnnn.dat), we save its dumpId to ctx->chkList.
+  *
+  * Other checks then walk through the TOC and for every file they make sure
+  * that the file is what it is pretending to be. Once it passes the checks we
+  * take out its entry in chkList, i.e. replace its dumpId by InvalidDumpId.
+  *
+  * At the end what is left in chkList must be files that are not referenced
+  * from the TOC.
+  */
+ static bool
+ _StartCheckArchive(ArchiveHandle *AH)
+ {
+ 	bool			checkOK = true;
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	DIR			   *dir;
+ 	char		   *dname = ctx->directory;
+ 	struct dirent  *entry;
+ 	int				idx = 0;
+ 	char		   *suffix;
+ 	bool			tocSeen = false;
+ 
+ 	dir = opendir(dname);
+ 	if (!dir)
+ 	{
+ 		printf("Could not open directory \"%s\": %s\n", dname, strerror(errno));
+ 		return false;
+ 	}
+ 
+ 	/*
+ 	 * Actually we are just avoiding a linked list here by getting an upper
+ 	 * limit of the number of elements in the directory.
+ 	 */
+ 	while ((entry = readdir(dir)))
+ 		idx++;
+ 
+ 	ctx->chkListSize = idx;
+ 	ctx->chkList = (DumpId *) malloc(ctx->chkListSize * sizeof(DumpId));
+ 
+ 	/* seems that Windows doesn't have a rewinddir() equivalent */
+ 	closedir(dir);
+ 	dir = opendir(dname);
+ 	if (!dir)
+ 	{
+ 		printf("Could not open directory \"%s\": %s\n", dname, strerror(errno));
+ 		return false;
+ 	}
+ 
+ 
+ 	idx = 0;
+ 
+ 	for (;;)
+ 	{
+ 		errno = 0;
+ 		entry = readdir(dir);
+ 		if (!entry && errno == 0)
+ 			/* end of directory entries reached */
+ 			break;
+ 		if (!entry && errno)
+ 		{
+ 			printf("Error reading directory %s: %s\n",
+ 					 entry->d_name, strerror(errno));
+ 			checkOK = false;
+ 			break;
+ 		}
+ 
+ 		if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
+ 			continue;
+ 		if (strcmp(entry->d_name, "blobs") == 0 &&
+ 						isDirectory(prependDirectory(AH, entry->d_name)))
+ 			continue;
+ 		if (strcmp(entry->d_name, "BLOBS.TOC") == 0 &&
+ 						isRegularFile(prependDirectory(AH, entry->d_name)))
+ 			continue;
+ 		if (strcmp(entry->d_name, "TOC") == 0 &&
+ 						isRegularFile(prependDirectory(AH, entry->d_name)))
+ 		{
+ 			tocSeen = true;
+ 			continue;
+ 		}
+ 		/* besides the above we only expect nnnn.dat, with nnnn being our numerical dumpID */
+ 		if ((suffix = strstr(entry->d_name, FILE_SUFFIX)) == NULL)
+ 		{
+ 			printf("Unexpected file \"%s\" in directory \"%s\"\n", entry->d_name, dname);
+ 			checkOK = false;
+ 			continue;
+ 		}
+ 		else
+ 		{
+ 			/* suffix now points into entry->d_name */
+ 			int dumpId;
+ 			int scBytes, scItems;
+ 
+ 			/* check if FILE_SUFFIX is really a suffix instead of just a
+ 			 * substring. */
+ 			if (strlen(suffix) != strlen(FILE_SUFFIX))
+ 			{
+ 				printf("Unexpected file \"%s\" in directory \"%s\"\n",
+ 					   entry->d_name, dname);
+ 				checkOK = false;
+ 				continue;
+ 			}
+ 
+ 			/* cut off the suffix, now entry->d_name contains the null terminated dumpId,
+ 			 * and we parse it back. */
+ 			*suffix = '\0';
+ 			scItems = sscanf(entry->d_name, "%d%n", &dumpId, &scBytes);
+ 			if (scItems != 1 || scBytes != strlen(entry->d_name))
+ 			{
+ 				printf("Unexpected file \"%s\" in directory \"%s\"\n",
+ 					   entry->d_name, dname);
+ 				checkOK = false;
+ 				continue;
+ 			}
+ 
+ 			/* Still here so this entry is good. Add the dumpId to our list. */
+ 			ctx->chkList[idx++] = (DumpId) dumpId;
+ 		}
+ 	}
+ 	closedir(dir);
+ 
+ 	/* we probably counted a few entries too much, just ignore them. */
+ 	while (idx < ctx->chkListSize)
+ 		ctx->chkList[idx++] = InvalidDumpId;
+ 
+ 	/* also return false if we haven't seen the TOC file */
+ 	return checkOK && tocSeen;
+ }
+ 
+ static bool
+ _CheckFileSize(ArchiveHandle *AH, const char *fname, pgoff_t pgSize, bool terminateOnError)
+ {
+ 	bool			checkOK = true;
+ 	FILE		   *f;
+ 	unsigned long	size = (unsigned long) pgSize;
+ 	struct stat		st;
+ 
+ 	/*
+ 	 * If terminateOnError is true, then we don't expect this to fail and if it
+ 	 * does, we need to terminate. On the other hand, if it is false we are
+ 	 * checking, go on then and present a report of all findings at the end.
+ 	 * Accordingly write to either stderr or stdout.
+ 	 */
+ 	if (terminateOnError)
+ 		f = stderr;
+ 	else
+ 		f = stdout;
+ 
+ 	if (!fname || fname[0] == '\0')
+ 	{
+ 		fprintf(f, "Invalid (empty) filename\n");
+ 		checkOK = false;
+ 	}
+ 	else if (stat(fname, &st) != 0)
+ 	{
+ 		fprintf(f, "File not found: \"%s\"\n", fname);
+ 		checkOK = false;
+ 	}
+ 	else if (st.st_size != (off_t) pgSize)
+ 	{
+ 		getuid();
+ 		fprintf(f, "Size mismatch for file \"%s\" (expected: %lu bytes, actual %lu bytes)\n",
+ 				fname, size, (unsigned long) st.st_size);
+ 		getgid();
+ 		checkOK = false;
+ 	}
+ 
+ 	if (!checkOK && terminateOnError)
+ 	{
+ 		if (AH->connection)
+ 			PQfinish(AH->connection);
+ 
+ 		exit(1);
+ 	}
+ 
+ 	return checkOK;
+ }
+ 
+ static bool
+ _CheckFileContents(ArchiveHandle *AH, const char *fname, const char* idStr, bool terminateOnError)
+ {
+ 	bool			checkOK = true;
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	FILE		   *file;
+ 	FILE		   *f;
+ 	lclFileHeader	fileHeader;
+ 
+ 	Assert(ctx->dataFH == NULL);
+ 
+ 	if (terminateOnError)
+ 		f = stderr;
+ 	else
+ 		f = stdout;
+ 
+ 	if (!fname || fname[0] == '\0')
+ 	{
+ 		fprintf(f, "Invalid (empty) filename\n");
+ 		return false;
+ 	}
+ 
+ 	if (!(file = fopen(fname, PG_BINARY_R)))
+ 	{
+ 		fprintf(f, "Could not open file \"%s\": %s\n", fname, strerror(errno));
+ 		return false;
+ 	}
+ 
+ 	ctx->dataFH = file;
+ 	if (ReadFileHeader(AH, &fileHeader) != 0)
+ 	{
+ 		fprintf(f, "Could not read valid file header from file \"%s\"\n", fname);
+ 		checkOK = false;
+ 	}
+ 	else if (strcmp(fileHeader.idStr, idStr) != 0)
+ 	{
+ 		fprintf(f, "File \"%s\" belongs to different backup (expected id: %s, actual id: %s)\n",
+ 				fname, idStr, fileHeader.idStr);
+ 		checkOK = false;
+ 	}
+ 
+ 	if (file)
+ 		fclose(file);
+ 
+ 	ctx->dataFH = NULL;
+ 
+ 	if (!checkOK && terminateOnError)
+ 	{
+ 		if (AH->connection)
+ 			PQfinish(AH->connection);
+ 		exit(1);
+ 	}
+ 
+ 	return checkOK;
+ }
+ 
+ static bool
+ _CheckBlob(ArchiveHandle *AH, Oid oid, pgoff_t size)
+ {
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	char		   *fname = prependBlobsDirectory(AH, oid);
+ 	bool			checkOK = true;
+ 
+ 	if (!_CheckFileSize(AH, fname, size, false))
+ 		checkOK = false;
+ 	else if (!_CheckFileContents(AH, fname, ctx->idStr, false))
+ 		checkOK = false;
+ 
+ 	return checkOK;
+ }
+ 
+ static bool
+ _CheckBlobs(ArchiveHandle *AH, TocEntry *te, teReqs reqs)
+ {
+ 	lclTocEntry	   *tctx = (lclTocEntry *) te->formatData;
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	char		   *fname;
+ 	bool			checkOK = true;
+ 	lclFileHeader	fileHeader;
+ 	pgoff_t			size;
+ 	Oid				oid;
+ 
+ 	/* check the BLOBS.TOC first */
+ 	fname = prependDirectory(AH, "BLOBS.TOC");
+ 
+ 	if (!fname)
+ 	{
+ 		printf("Could not find BLOBS.TOC. Check the archive!\n");
+ 		return false;
+ 	}
+ 
+ 	if (!_CheckFileSize(AH, fname, tctx->fileSize, false))
+ 		checkOK = false;
+ 	else if (!_CheckFileContents(AH, fname, ctx->idStr, false))
+ 		checkOK = false;
+ 
+ 	/* now check every single BLOB object */
+ 	ctx->blobsTocFH = fopen(fname, "rb");
+ 	if (ctx->blobsTocFH == NULL)
+ 	{
+ 		printf("could not open large object TOC for input: %s\n",
+ 			   strerror(errno));
+ 		return false;
+ 	}
+ 	ReadFileHeader(AH, &fileHeader);
+ 
+ 	/* we cannot test for feof() since EOF only shows up in the low
+  	 * level read functions. But they would die_horribly() anyway. */
+ 	while ((oid = ReadInt(AH)))
+ 	{
+ 		Assert(BLOBS_TOC_FH_ACTIVE);
+ 
+ 		ReadOffset(AH, &size);
+ 
+ 		if (!_CheckBlob(AH, oid, size))
+ 			checkOK = false;
+ 
+ 		Assert(BLOBS_TOC_FH_ACTIVE);
+ 	}
+ 
+ 	Assert(BLOBS_TOC_FH_ACTIVE);
+ 
+ 	if (fclose(ctx->blobsTocFH) != 0)
+ 	{
+ 		printf("could not close large object TOC file: %s\n",
+ 			   strerror(errno));
+ 		checkOK = false;
+ 	}
+ 
+ 	return checkOK;
+ }
+ 
+ 
+ static bool
+ _CheckTocEntry(ArchiveHandle *AH, TocEntry *te, teReqs reqs)
+ {
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	lclTocEntry	   *tctx = (lclTocEntry *) te->formatData;
+ 	int				idx;
+ 	bool			checkOK = true;
+ 
+ 	/* take out files from chkList as we see them */
+ 	for (idx = 0; idx < ctx->chkListSize; idx++)
+ 	{
+ 		if (ctx->chkList[idx] == te->dumpId && te->section == SECTION_DATA)
+ 		{
+ 			ctx->chkList[idx] = InvalidDumpId;
+ 			break;
+ 		}
+ 	}
+ 
+ 	/* see comment in _tocEntryRequired() for the special case of SEQUENCE SET */
+ 	if (reqs & REQ_DATA && strcmp(te->desc, "BLOBS") == 0)
+ 	{
+ 		if (!_CheckBlobs(AH, te, reqs))
+ 			checkOK = false;
+ 	}
+ 	else if (reqs & REQ_DATA && strcmp(te->desc, "SEQUENCE SET") != 0
+ 							 && strcmp(te->desc, "BLOB") != 0
+ 							 && strcmp(te->desc, "COMMENT") != 0)
+ 	{
+ 		char		   *fname;
+ 
+ 		fname = prependDirectory(AH, tctx->filename);
+ 		if (!fname)
+ 		{
+ 			printf("Could not find file %s\n", tctx->filename);
+ 			checkOK = false;
+ 		}
+ 		else if (!_CheckFileSize(AH, fname, tctx->fileSize, false))
+ 			checkOK = false;
+ 		else if (!_CheckFileContents(AH, fname, ctx->idStr, false))
+ 			checkOK = false;
+ 	}
+ 
+ 	return checkOK;
+ }
+ 
+ static bool
+ _EndCheckArchive(ArchiveHandle *AH)
+ {
+ 	/* check left over files */
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	int				idx;
+ 	bool			checkOK = true;
+ 
+ 	for (idx = 0; idx < ctx->chkListSize; idx++)
+ 	{
+ 		if (ctx->chkList[idx] != InvalidDumpId)
+ 		{
+ 			printf("Unexpected file: %d"FILE_SUFFIX"\n", ctx->chkList[idx]);
+ 			checkOK = false;
+ 		}
+ 	}
+ 
+ 	return checkOK;
+ }
+ 
+ 
+ static void
+ createDirectory(const char *dir, const char *subdir)
+ {
+ 	struct stat		st;
+ 	char			dirname[MAXPGPATH];
+ 
+ 	/* the directory must not yet exist, first check if it is existing */
+ 	if (subdir && strlen(dir) + 1 + strlen(subdir) + 1 > MAXPGPATH)
+ 		die_horribly(NULL, modulename, "directory name %s too long", dir);
+ 
+ 	strcpy(dirname, dir);
+ 
+ 	if (subdir)
+ 	{
+ 		strcat(dirname, "/");
+ 		strcat(dirname, subdir);
+ 	}
+ 
+ 	if (stat(dirname, &st) == 0)
+ 	{
+ 		if (S_ISDIR(st.st_mode))
+ 			die_horribly(NULL, modulename,
+ 						 "Cannot create directory %s, it exists already\n", dirname);
+ 		else
+ 			die_horribly(NULL, modulename,
+ 						 "Cannot create directory %s, a file with this name exists already\n", dirname);
+ 	}
+ 
+ 	/*
+ 	 * Now we create the directory. Note that for some race condition we
+ 	 * could also run into the situation that the directory has been created
+ 	 * just between our two calls.
+ 	 */
+ 	if (mkdir(dirname, 0700) < 0)
+ 		die_horribly(NULL, modulename, "Could not create directory %s: %s",
+ 					 dirname, strerror(errno));
+ }
+ 
+ 
+ static char *
+ prependDirectory(ArchiveHandle *AH, const char *relativeFilename)
+ {
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	static char		buf[MAXPGPATH];
+ 	char		   *dname;
+ 
+ 	dname = ctx->directory;
+ 
+ 	if (strlen(dname) + 1 + strlen(relativeFilename) + 1 > MAXPGPATH)
+ 			die_horribly(AH, modulename, "path name too long: %s", dname);
+ 
+ 	strcpy(buf, dname);
+ 	strcat(buf, "/");
+ 	strcat(buf, relativeFilename);
+ 
+ 	return buf;
+ }
+ 
+ static char *
+ prependBlobsDirectory(ArchiveHandle *AH, Oid oid)
+ {
+ 	static char		buf[MAXPGPATH];
+ 	char		   *dname;
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	int				r;
+ 
+ 	dname = ctx->directory;
+ 
+ 	r = snprintf(buf, MAXPGPATH, "%s/blobs/%d%s",
+ 				 dname, oid, FILE_SUFFIX);
+ 
+ 	if (r < 0 || r >= MAXPGPATH)
+ 		die_horribly(AH, modulename, "path name too long: %s", dname);
+ 
+ 	return buf;
+ }
+ 
+ static void
+ _StartDataCompressor(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclContext		   *ctx = (lclContext *) AH->formatData;
+ 	CompressorState	   *cs = ctx->cs;
+ 
+ 	InitCompressorState(AH, cs, COMPRESSOR_DEFLATE);
+ }
+ 
+ 
+ static void
+ _EndDataCompressor(ArchiveHandle *AH, TocEntry *te)
+ {
+ 	lclContext		   *ctx = (lclContext *) AH->formatData;
+ 	CompressorState	   *cs = ctx->cs;
+ 
+ 	FlushCompressorState(AH, cs, _WriteBuf);
+ }
+ 
+ static size_t
+ _DirectoryReadFunction(ArchiveHandle *AH, void **buf, size_t sizeHint)
+ {
+ 	lclContext		   *ctx = (lclContext *) AH->formatData;
+ 	CompressorState	   *cs = ctx->cs;
+ 
+ 	Assert(cs->comprInSize >= comprInInitSize);
+ 
+ 	if (sizeHint == 0)
+ 		sizeHint = comprInInitSize;
+ 
+ 	*buf = cs->comprIn;
+ 	return _ReadBuf(AH, cs->comprIn, sizeHint);
+ }
+ 
+ static void
+ _WriteExtraHead(ArchiveHandle *AH)
+ {
+ 	lclContext	   *ctx = (lclContext *) AH->formatData;
+ 	WriteStr(AH, ctx->idStr);
+ }
+ 
+ static void
+ _ReadExtraHead(ArchiveHandle *AH)
+ {
+ 	lclContext *ctx = (lclContext *) AH->formatData;
+ 	char	   *str = ReadStr(AH);
+ 
+ 	if (strlen(str) != 32)
+ 		die_horribly(AH, modulename, "Invalid ID of the backup set (corrupted TOC file?)\n");
+ 
+ 	strcpy(ctx->idStr, str);
+ }
+ 
+ static char *
+ getRandomData(char *s, int len)
+ {
+ 	int i;
+ 
+ #ifdef USE_SSL
+ 	if (RAND_bytes((unsigned char *)s, len) != 1)
+ #endif
+ 		for (i = 0; i < len; i++)
+ 			/* Use a lower strengh random number if OpenSSL is not available */
+ 			s[i] = random() % 255;
+ 
+ 	return s;
+ }
+ 
+ static bool
+ isDirectory(const char *fname)
+ {
+ 	struct stat st;
+ 
+ 	if (stat(fname, &st))
+ 		return false;
+ 
+ 	return S_ISDIR(st.st_mode);
+ }
+ 
+ static bool
+ isRegularFile(const char *fname)
+ {
+ 	struct stat st;
+ 
+ 	if (stat(fname, &st))
+ 		return false;
+ 
+ 	return S_ISREG(st.st_mode);
+ }
+ 
diff --git a/src/bin/pg_dump/pg_backup_files.c b/src/bin/pg_dump/pg_backup_files.c
index abc93b1..825c473 100644
*** a/src/bin/pg_dump/pg_backup_files.c
--- b/src/bin/pg_dump/pg_backup_files.c
*************** InitArchiveFmt_Files(ArchiveHandle *AH)
*** 92,97 ****
--- 92,98 ----
  	AH->ReadExtraTocPtr = _ReadExtraToc;
  	AH->WriteExtraTocPtr = _WriteExtraToc;
  	AH->PrintExtraTocPtr = _PrintExtraToc;
+ 	AH->PrintExtraTocSummaryPtr = NULL;
  
  	AH->StartBlobsPtr = _StartBlobs;
  	AH->StartBlobPtr = _StartBlob;
*************** InitArchiveFmt_Files(ArchiveHandle *AH)
*** 100,105 ****
--- 101,110 ----
  	AH->ClonePtr = NULL;
  	AH->DeClonePtr = NULL;
  
+ 	AH->StartCheckArchivePtr = NULL;
+ 	AH->CheckTocEntryPtr = NULL;
+ 	AH->EndCheckArchivePtr = NULL;
+ 
  	/*
  	 * Set up some special context used in compressing data.
  	 */
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index 006f7da..dcc13ee 100644
*** a/src/bin/pg_dump/pg_backup_tar.c
--- b/src/bin/pg_dump/pg_backup_tar.c
*************** InitArchiveFmt_Tar(ArchiveHandle *AH)
*** 144,149 ****
--- 144,150 ----
  	AH->ReadExtraTocPtr = _ReadExtraToc;
  	AH->WriteExtraTocPtr = _WriteExtraToc;
  	AH->PrintExtraTocPtr = _PrintExtraToc;
+ 	AH->PrintExtraTocSummaryPtr = NULL;
  
  	AH->StartBlobsPtr = _StartBlobs;
  	AH->StartBlobPtr = _StartBlob;
*************** InitArchiveFmt_Tar(ArchiveHandle *AH)
*** 152,157 ****
--- 153,162 ----
  	AH->ClonePtr = NULL;
  	AH->DeClonePtr = NULL;
  
+ 	AH->StartCheckArchivePtr = NULL;
+ 	AH->CheckTocEntryPtr = NULL;
+ 	AH->EndCheckArchivePtr = NULL;
+ 
  	/*
  	 * Set up some special context used in compressing data.
  	 */
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 55ea684..39e68d9 100644
*** a/src/bin/pg_dump/pg_dump.c
--- b/src/bin/pg_dump/pg_dump.c
***************
*** 56,61 ****
--- 56,62 ----
  
  #include "pg_backup_archiver.h"
  #include "dumputils.h"
+ #include "compress_io.h"
  
  extern char *optarg;
  extern int	optind,
*************** static int	no_security_label = 0;
*** 137,142 ****
--- 138,144 ----
  
  
  static void help(const char *progname);
+ static ArchiveFormat parseArchiveFormat(const char *format);
  static void expand_schema_name_patterns(SimpleStringList *patterns,
  							SimpleOidList *oids);
  static void expand_table_name_patterns(SimpleStringList *patterns,
*************** main(int argc, char **argv)
*** 255,261 ****
  	int			numObjs;
  	int			i;
  	enum trivalue prompt_password = TRI_DEFAULT;
! 	int			compressLevel = -1;
  	int			plainText = 0;
  	int			outputClean = 0;
  	int			outputCreateDB = 0;
--- 257,263 ----
  	int			numObjs;
  	int			i;
  	enum trivalue prompt_password = TRI_DEFAULT;
! 	int			compressLevel = COMPRESSION_UNKNOWN;
  	int			plainText = 0;
  	int			outputClean = 0;
  	int			outputCreateDB = 0;
*************** main(int argc, char **argv)
*** 266,275 ****
--- 268,279 ----
  	int			my_version;
  	int			optindex;
  	RestoreOptions *ropt;
+ 	ArchiveFormat   archiveFormat = archUnknown;
  
  	static int	disable_triggers = 0;
  	static int	outputNoTablespaces = 0;
  	static int	use_setsessauth = 0;
+ 	static int	compressLZF = 0;
  
  	static struct option long_options[] = {
  		{"data-only", no_argument, NULL, 'a'},
*************** main(int argc, char **argv)
*** 311,316 ****
--- 315,321 ----
  		{"disable-triggers", no_argument, &disable_triggers, 1},
  		{"inserts", no_argument, &dump_inserts, 1},
  		{"lock-wait-timeout", required_argument, NULL, 2},
+ 		{"compress-lzf", no_argument, &compressLZF, 1},
  		{"no-tablespaces", no_argument, &outputNoTablespaces, 1},
  		{"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
  		{"role", required_argument, NULL, 3},
*************** main(int argc, char **argv)
*** 535,568 ****
  		exit(1);
  	}
  
! 	/* open the output file */
! 	if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
! 	{
! 		/* This is used by pg_dumpall, and is not documented */
  		plainText = 1;
! 		g_fout = CreateArchive(filename, archNull, 0, archModeAppend);
! 	}
! 	else if (pg_strcasecmp(format, "c") == 0 || pg_strcasecmp(format, "custom") == 0)
! 		g_fout = CreateArchive(filename, archCustom, compressLevel, archModeWrite);
! 	else if (pg_strcasecmp(format, "f") == 0 || pg_strcasecmp(format, "file") == 0)
  	{
! 		/*
! 		 * Dump files into the current directory; for demonstration only, not
! 		 * documented.
! 		 */
! 		g_fout = CreateArchive(filename, archFiles, compressLevel, archModeWrite);
  	}
! 	else if (pg_strcasecmp(format, "p") == 0 || pg_strcasecmp(format, "plain") == 0)
  	{
! 		plainText = 1;
! 		g_fout = CreateArchive(filename, archNull, 0, archModeWrite);
  	}
! 	else if (pg_strcasecmp(format, "t") == 0 || pg_strcasecmp(format, "tar") == 0)
! 		g_fout = CreateArchive(filename, archTar, compressLevel, archModeWrite);
! 	else
  	{
! 		write_msg(NULL, "invalid output format \"%s\" specified\n", format);
! 		exit(1);
  	}
  
  	if (g_fout == NULL)
--- 540,615 ----
  		exit(1);
  	}
  
! 	archiveFormat = parseArchiveFormat(format);
! 
! 	/* archiveFormat specific setup */
! 	if (archiveFormat == archNull || archiveFormat == archNullAppend)
  		plainText = 1;
! 
! 	if (compressLZF)
  	{
! 		if (archiveFormat != archCustom && archiveFormat != archDirectory)
! 		{
! 			write_msg(NULL, "LZF compression is currently only supported for the custom "
! 							" or directory format\n");
! 			exit(1);
! 		}
! 		else
! 			compressLevel = COMPR_LZF_CODE;
  	}
! 
! 	/*
! 	 * If AH->compression == UNKNOWN_COMPRESSION then it has not been set to some
! 	 * value explicitly.
! 	 *
! 	 * Fall back to default:
! 	 *
! 	 * zlib with Z_DEFAULT_COMPRESSION for those formats that support it.
! 	 * If either one is not available: use no compression at all.
! 	 */
! 
! 	if (compressLevel == COMPRESSION_UNKNOWN)
  	{
! #ifdef HAVE_LIBZ
! 		if (archiveFormat == archCustom || archiveFormat == archDirectory)
! 			compressLevel = Z_DEFAULT_COMPRESSION;
! 		else
! 			compressLevel = 0;
! #else
! 		compressLevel = 0;
! #endif
  	}
! 
! 	/* open the output file */
! 	switch(archiveFormat)
  	{
! 		case archCustom:
! 			g_fout = CreateArchive(filename, archCustom, compressLevel,
! 								   archModeWrite);
! 			break;
! 		case archDirectory:
! 			g_fout = CreateArchive(filename, archDirectory, compressLevel,
! 								   archModeWrite);
! 			break;
! 		case archFiles:
! 			g_fout = CreateArchive(filename, archFiles, compressLevel,
! 								   archModeWrite);
! 			break;
! 		case archNull:
! 			g_fout = CreateArchive(filename, archNull, 0, archModeWrite);
! 			break;
! 		case archNullAppend:
! 			g_fout = CreateArchive(filename, archNull, 0, archModeAppend);
! 			break;
! 		case archTar:
! 			g_fout = CreateArchive(filename, archTar, compressLevel,
! 								   archModeWrite);
! 			break;
! 
! 		default:
! 			/* we never reach here, because we check in parseArchiveFormat()
!  			 * already. */
! 			break;
  	}
  
  	if (g_fout == NULL)
*************** main(int argc, char **argv)
*** 671,677 ****
  	 */
  	do_sql_command(g_conn, "BEGIN");
  
! 	do_sql_command(g_conn, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE");
  
  	/* Select the appropriate subquery to convert user IDs to names */
  	if (g_fout->remoteVersion >= 80100)
--- 718,724 ----
  	 */
  	do_sql_command(g_conn, "BEGIN");
  
! 	do_sql_command(g_conn, "SET TRANSACTION READ ONLY ISOLATION LEVEL SERIALIZABLE");
  
  	/* Select the appropriate subquery to convert user IDs to names */
  	if (g_fout->remoteVersion >= 80100)
*************** help(const char *progname)
*** 832,840 ****
  
  	printf(_("\nGeneral options:\n"));
  	printf(_("  -f, --file=FILENAME         output file name\n"));
! 	printf(_("  -F, --format=c|t|p          output file format (custom, tar, plain text)\n"));
  	printf(_("  -v, --verbose               verbose mode\n"));
! 	printf(_("  -Z, --compress=0-9          compression level for compressed formats\n"));
  	printf(_("  --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
  	printf(_("  --help                      show this help, then exit\n"));
  	printf(_("  --version                   output version information, then exit\n"));
--- 879,888 ----
  
  	printf(_("\nGeneral options:\n"));
  	printf(_("  -f, --file=FILENAME         output file name\n"));
! 	printf(_("  -F, --format=c|d|t|p        output file format (custom, directory, tar, plain text)\n"));
  	printf(_("  -v, --verbose               verbose mode\n"));
! 	printf(_("  -Z, --compress=0-9          compression level of libz for compressed formats\n"));
! 	printf(_("  --compress-lzf              use liblzf compression instead of zlib\n"));
  	printf(_("  --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
  	printf(_("  --help                      show this help, then exit\n"));
  	printf(_("  --version                   output version information, then exit\n"));
*************** exit_nicely(void)
*** 889,894 ****
--- 937,980 ----
  	exit(1);
  }
  
+ static ArchiveFormat
+ parseArchiveFormat(const char *format)
+ {
+ 	ArchiveFormat archiveFormat;
+ 
+ 	if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
+ 		/* This is used by pg_dumpall, and is not documented */
+ 		archiveFormat = archNullAppend;
+ 	else if (pg_strcasecmp(format, "c") == 0)
+ 		archiveFormat = archCustom;
+ 	else if (pg_strcasecmp(format, "custom") == 0)
+ 		archiveFormat = archCustom;
+ 	else if (pg_strcasecmp(format, "d") == 0)
+ 		archiveFormat = archDirectory;
+ 	else if (pg_strcasecmp(format, "directory") == 0)
+ 		archiveFormat = archDirectory;
+ 	else if (pg_strcasecmp(format, "f") == 0 || pg_strcasecmp(format, "file") == 0)
+ 		/*
+ 		 * Dump files into the current directory; for demonstration only, not
+ 		 * documented.
+ 		 */
+ 		archiveFormat = archFiles;
+ 	else if (pg_strcasecmp(format, "p") == 0)
+ 		archiveFormat = archNull;
+ 	else if (pg_strcasecmp(format, "plain") == 0)
+ 		archiveFormat = archNull;
+ 	else if (pg_strcasecmp(format, "t") == 0)
+ 		archiveFormat = archTar;
+ 	else if (pg_strcasecmp(format, "tar") == 0)
+ 		archiveFormat = archTar;
+ 	else
+ 	{
+ 		write_msg(NULL, "invalid output format \"%s\" specified\n", format);
+ 		exit(1);
+ 	}
+ 	return archiveFormat;
+ }
+ 
  /*
   * Find the OIDs of all schemas matching the given list of patterns,
   * and append them to the given OID list.
*************** dumpBlobs(Archive *AH, void *arg)
*** 2174,2180 ****
  					exit_nicely();
  				}
  
! 				WriteData(AH, buf, cnt);
  			} while (cnt > 0);
  
  			lo_close(g_conn, loFd);
--- 2260,2267 ----
  					exit_nicely();
  				}
  
! 				if (cnt > 0)
! 					WriteData(AH, buf, cnt);
  			} while (cnt > 0);
  
  			lo_close(g_conn, loFd);
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 7885535..0f643b9 100644
*** a/src/bin/pg_dump/pg_dump.h
--- b/src/bin/pg_dump/pg_dump.h
*************** typedef struct
*** 39,44 ****
--- 39,45 ----
  } CatalogId;
  
  typedef int DumpId;
+ #define InvalidDumpId		(-1)
  
  /*
   * Data structures for simple lists of OIDs and strings.  The support for
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 1ddba72..3fbe264 100644
*** a/src/bin/pg_dump/pg_restore.c
--- b/src/bin/pg_dump/pg_restore.c
*************** main(int argc, char **argv)
*** 79,84 ****
--- 79,85 ----
  	static int	skip_seclabel = 0;
  
  	struct option cmdopts[] = {
+ 		{"check", 0, NULL, 'k'},
  		{"clean", 0, NULL, 'c'},
  		{"create", 0, NULL, 'C'},
  		{"data-only", 0, NULL, 'a'},
*************** main(int argc, char **argv)
*** 144,150 ****
  		}
  	}
  
! 	while ((c = getopt_long(argc, argv, "acCd:ef:F:h:iI:j:lL:n:Op:P:RsS:t:T:U:vwWxX:1",
  							cmdopts, NULL)) != -1)
  	{
  		switch (c)
--- 145,151 ----
  		}
  	}
  
! 	while ((c = getopt_long(argc, argv, "acCd:ef:F:h:iI:j:klL:n:Op:P:RsS:t:T:U:vwWxX:1",
  							cmdopts, NULL)) != -1)
  	{
  		switch (c)
*************** main(int argc, char **argv)
*** 182,188 ****
  			case 'j':			/* number of restore jobs */
  				opts->number_of_jobs = atoi(optarg);
  				break;
! 
  			case 'l':			/* Dump the TOC summary */
  				opts->tocSummary = 1;
  				break;
--- 183,191 ----
  			case 'j':			/* number of restore jobs */
  				opts->number_of_jobs = atoi(optarg);
  				break;
! 			case 'k':			/* check the archive */
! 				opts->checkArchive = 1;
! 				break;
  			case 'l':			/* Dump the TOC summary */
  				opts->tocSummary = 1;
  				break;
*************** main(int argc, char **argv)
*** 352,357 ****
--- 355,365 ----
  				opts->format = archCustom;
  				break;
  
+ 			case 'd':
+ 			case 'D':
+ 				opts->format = archDirectory;
+ 				break;
+ 
  			case 'f':
  			case 'F':
  				opts->format = archFiles;
*************** main(int argc, char **argv)
*** 363,369 ****
  				break;
  
  			default:
! 				write_msg(NULL, "unrecognized archive format \"%s\"; please specify \"c\" or \"t\"\n",
  						  opts->formatName);
  				exit(1);
  		}
--- 371,377 ----
  				break;
  
  			default:
! 				write_msg(NULL, "unrecognized archive format \"%s\"; please specify \"c\", \"d\" or \"t\"\n",
  						  opts->formatName);
  				exit(1);
  		}
*************** main(int argc, char **argv)
*** 392,397 ****
--- 400,413 ----
  
  	if (opts->tocSummary)
  		PrintTOCSummary(AH, opts);
+ 	else if (opts->checkArchive)
+ 	{
+ 		bool    checkOK;
+ 		checkOK = CheckArchive(AH, opts);
+ 		CloseArchive(AH);
+ 		if (!checkOK)
+ 			exit(1);
+ 	}
  	else
  		RestoreArchive(AH, opts);
  
*************** usage(const char *progname)
*** 418,425 ****
  	printf(_("\nGeneral options:\n"));
  	printf(_("  -d, --dbname=NAME        connect to database name\n"));
  	printf(_("  -f, --file=FILENAME      output file name\n"));
! 	printf(_("  -F, --format=c|t         backup file format (should be automatic)\n"));
  	printf(_("  -l, --list               print summarized TOC of the archive\n"));
  	printf(_("  -v, --verbose            verbose mode\n"));
  	printf(_("  --help                   show this help, then exit\n"));
  	printf(_("  --version                output version information, then exit\n"));
--- 434,442 ----
  	printf(_("\nGeneral options:\n"));
  	printf(_("  -d, --dbname=NAME        connect to database name\n"));
  	printf(_("  -f, --file=FILENAME      output file name\n"));
! 	printf(_("  -F, --format=c|d|t       backup file format (should be automatic)\n"));
  	printf(_("  -l, --list               print summarized TOC of the archive\n"));
+ 	printf(_("  -k                       check the directory archive\n"));
  	printf(_("  -v, --verbose            verbose mode\n"));
  	printf(_("  --help                   show this help, then exit\n"));
  	printf(_("  --version                output version information, then exit\n"));
diff --git a/src/bin/pg_dump/test.sh b/src/bin/pg_dump/test.sh
index ...23547fa .
*** a/src/bin/pg_dump/test.sh
--- b/src/bin/pg_dump/test.sh
***************
*** 0 ****
--- 1,68 ----
+ #!/bin/sh -x
+ 
+ 
+ # lzf compression
+ rm -rf out.dir
+ dropdb foodb
+ createdb --template=template0 foodb --lc-ctype=C
+ psql foodb -c "alter database foodb set lc_monetary to 'C'"
+ #./pg_dump --column-inserts --compress-lzf -Fd -f out.dir regression || exit 1
+ ./pg_dump --compress-lzf -Fd -f out.dir regression || exit 1
+ ./pg_restore out.dir -d foodb && ./pg_restore -k out.dir || exit 1
+ 
+ # zlib compression
+ rm -rf out.dir
+ dropdb foodb
+ createdb --template=template0 foodb --lc-ctype=C
+ psql foodb -c "alter database foodb set lc_monetary to 'C'"
+ ./pg_dump --compress=4 -Fd -f out.dir regression || exit 1
+ ./pg_restore out.dir -d foodb || exit 1
+ ./pg_restore -k out.dir || exit 1
+ 
+ rm out.custom
+ dropdb foodb
+ createdb --template=template0 foodb --lc-ctype=C
+ psql foodb -c "alter database foodb set lc_monetary to 'C'"
+ #./pg_dump --inserts --compress=8 -Fc -f out.custom regression || exit 1
+ ./pg_dump --compress=8 -Fc -f out.custom regression || exit 1
+ ./pg_restore out.custom -d foodb || exit 1
+ 
+ # no compression
+ rm -rf out.dir
+ dropdb foodb
+ createdb --template=template0 foodb --lc-ctype=C
+ psql foodb -c "alter database foodb set lc_monetary to 'C'"
+ ./pg_dump --disable-dollar-quoting --compress=0 -Fd -f out.dir regression || exit 1
+ ./pg_restore out.dir -d foodb || exit 1
+ ./pg_restore -k out.dir || exit 1
+ 
+ rm out.custom
+ dropdb foodb
+ createdb --template=template0 foodb --lc-ctype=C
+ psql foodb -c "alter database foodb set lc_monetary to 'C'"
+ ./pg_dump --quote-all-identifiers --compress=0 -Fc -f out.custom regression || exit 1
+ ./pg_restore out.custom -d foodb || exit 1
+ 
+ dropdb foodb
+ createdb --template=template0 foodb --lc-ctype=C
+ psql foodb -c "alter database foodb set lc_monetary to 'C'"
+ pg_dump -Ft regression  | pg_restore -d foodb || exit 1
+ 
+ dropdb foodb
+ createdb --template=template0 foodb --lc-ctype=C
+ psql foodb -c "alter database foodb set lc_monetary to 'C'"
+ pg_dump regression  | psql foodb || exit 1
+ 
+ # restore 9.0 archives
+ dropdb foodb
+ createdb --template=template0 foodb --lc-ctype=C
+ psql foodb -c "alter database foodb set lc_monetary to 'C'"
+ ./pg_restore out.cust.none.90 -d foodb || exit 1
+ 
+ dropdb foodb
+ createdb --template=template0 foodb --lc-ctype=C
+ psql foodb -c "alter database foodb set lc_monetary to 'C'"
+ ./pg_restore out.cust.z.90 -d foodb || exit 1
+ 
+ 
+ echo Success
diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in
index fd169b6..b79d765 100644
*** a/src/include/pg_config.h.in
--- b/src/include/pg_config.h.in
***************
*** 323,328 ****
--- 323,331 ----
  /* Define to 1 if you have the `z' library (-lz). */
  #undef HAVE_LIBZ
  
+ /* Define to 1 if you have the `lzf' library (-llzf). */
+ #undef HAVE_LIBLZF
+ 
  /* Define to 1 if constants of type 'long long int' should have the suffix LL.
     */
  #undef HAVE_LL_CONSTANTS
