diff --git a/contrib/pageinspect/Makefile b/contrib/pageinspect/Makefile
index f10229d..45b5b6c 100644
--- a/contrib/pageinspect/Makefile
+++ b/contrib/pageinspect/Makefile
@@ -1,7 +1,7 @@
 # contrib/pageinspect/Makefile
 
 MODULE_big	= pageinspect
-OBJS		= rawpage.o heapfuncs.o btreefuncs.o fsmfuncs.o $(WIN32RES)
+OBJS		= rawpage.o heapfuncs.o btreefuncs.o fsmfuncs.o mmfuncs.o $(WIN32RES)
 
 EXTENSION = pageinspect
 DATA = pageinspect--1.2.sql pageinspect--1.0--1.1.sql \
diff --git a/contrib/pageinspect/mmfuncs.c b/contrib/pageinspect/mmfuncs.c
new file mode 100644
index 0000000..b2584d3
--- /dev/null
+++ b/contrib/pageinspect/mmfuncs.c
@@ -0,0 +1,421 @@
+/*
+ * mmfuncs.c
+ *		Functions to investigate MinMax indexes
+ *
+ * Copyright (c) 2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ *		contrib/pageinspect/mmfuncs.c
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "access/minmax.h"
+#include "access/minmax_internal.h"
+#include "access/minmax_page.h"
+#include "access/minmax_revmap.h"
+#include "access/minmax_tuple.h"
+#include "catalog/index.h"
+#include "catalog/pg_type.h"
+#include "funcapi.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/rel.h"
+#include "miscadmin.h"
+
+Datum minmax_page_type(PG_FUNCTION_ARGS);
+Datum minmax_page_items(PG_FUNCTION_ARGS);
+Datum minmax_metapage_info(PG_FUNCTION_ARGS);
+Datum minmax_revmap_array_data(PG_FUNCTION_ARGS);
+Datum minmax_revmap_data(PG_FUNCTION_ARGS);
+
+PG_FUNCTION_INFO_V1(minmax_page_type);
+PG_FUNCTION_INFO_V1(minmax_page_items);
+PG_FUNCTION_INFO_V1(minmax_metapage_info);
+PG_FUNCTION_INFO_V1(minmax_revmap_array_data);
+PG_FUNCTION_INFO_V1(minmax_revmap_data);
+
+typedef struct mm_page_state
+{
+	TupleDesc	tupdesc;
+	Page		page;
+	OffsetNumber offset;
+	bool		unusedItem;
+	bool		done;
+	AttrNumber	attno;
+	DeformedMMTuple *dtup;
+	FmgrInfo	outputfn[FLEXIBLE_ARRAY_MEMBER];
+} mm_page_state;
+
+
+static Page verify_minmax_page(bytea *raw_page, uint16 type,
+				 const char *strtype);
+
+Datum
+minmax_page_type(PG_FUNCTION_ARGS)
+{
+	bytea	   *raw_page = PG_GETARG_BYTEA_P(0);
+	Page		page = VARDATA(raw_page);
+	MinmaxSpecialSpace *special;
+	char *type;
+
+	special = (MinmaxSpecialSpace *) PageGetSpecialPointer(page);
+
+	switch (special->type)
+	{
+		case MINMAX_PAGETYPE_META:
+			type = "meta";
+			break;
+		case MINMAX_PAGETYPE_REVMAP_ARRAY:
+			type = "revmap array";
+			break;
+		case MINMAX_PAGETYPE_REVMAP:
+			type = "revmap";
+			break;
+		case MINMAX_PAGETYPE_REGULAR:
+			type = "regular";
+			break;
+		default:
+			type = psprintf("unknown (%02x)", special->type);
+			break;
+	}
+
+	PG_RETURN_TEXT_P(cstring_to_text(type));
+}
+
+/*
+ * Verify that the given bytea contains a minmax page of the indicated page
+ * type, or die in the attempt.  A pointer to the page is returned.
+ */
+static Page
+verify_minmax_page(bytea *raw_page, uint16 type, const char *strtype)
+{
+	Page	page;
+	int		raw_page_size;
+	MinmaxSpecialSpace *special;
+
+	raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
+
+	if (raw_page_size < SizeOfPageHeaderData)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("input page too small"),
+				 errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
+
+	page = VARDATA(raw_page);
+
+	/* verify the special space says this page is what we want */
+	special = (MinmaxSpecialSpace *) PageGetSpecialPointer(page);
+	if (special->type != type)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("page is not a Minmax page of type \"%s\"", strtype),
+				 errdetail("Expected special type %08x, got %08x.",
+						   type, special->type)));
+
+	return page;
+}
+
+
+#ifdef NOT_YET
+/*
+ * Extract all item values from a minmax index page
+ *
+ * Usage: SELECT * FROM minmax_page_items(get_raw_page('idx', 1), 'idx'::regclass);
+ */
+Datum
+minmax_page_items(PG_FUNCTION_ARGS)
+{
+	mm_page_state *state;
+	FuncCallContext *fctx;
+
+	if (!superuser())
+		ereport(ERROR,
+				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+				 (errmsg("must be superuser to use raw page functions"))));
+
+	if (SRF_IS_FIRSTCALL())
+	{
+		bytea	   *raw_page = PG_GETARG_BYTEA_P(0);
+		Oid			indexRelid = PG_GETARG_OID(1);
+		Page		page;
+		TupleDesc	tupdesc;
+		MemoryContext mctx;
+		Relation	indexRel;
+		AttrNumber	attno;
+
+		/* minimally verify the page we got */
+		page = verify_minmax_page(raw_page, MINMAX_PAGETYPE_REGULAR, "regular");
+
+		/* create a function context for cross-call persistence */
+		fctx = SRF_FIRSTCALL_INIT();
+
+		/* switch to memory context appropriate for multiple function calls */
+		mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx);
+
+		/* Build a tuple descriptor for our result type */
+		if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+			elog(ERROR, "return type must be a row type");
+
+		indexRel = index_open(indexRelid, AccessShareLock);
+
+		state = palloc(offsetof(mm_page_state, outputfn) +
+					   sizeof(FmgrInfo) * RelationGetDescr(indexRel)->natts);
+
+		state->tupdesc = CreateTupleDescCopy(RelationGetDescr(indexRel));
+		state->page = page;
+		state->offset = FirstOffsetNumber;
+		state->unusedItem = false;
+		state->done = false;
+		state->dtup = NULL;
+
+		index_close(indexRel, AccessShareLock);
+
+		for (attno = 1; attno <= state->tupdesc->natts; attno++)
+		{
+			Oid		output;
+			bool	isVarlena;
+
+			getTypeOutputInfo(state->tupdesc->attrs[attno - 1]->atttypid,
+							  &output, &isVarlena);
+			fmgr_info(output, &state->outputfn[attno - 1]);
+		}
+
+		fctx->user_fctx = state;
+		fctx->tuple_desc = BlessTupleDesc(tupdesc);
+
+		MemoryContextSwitchTo(mctx);
+	}
+
+	fctx = SRF_PERCALL_SETUP();
+	state = fctx->user_fctx;
+
+	if (!state->done)
+	{
+		HeapTuple	result;
+		Datum		values[6];
+		bool		nulls[6];
+
+		/*
+		 * This loop is called once for every attribute of every tuple in the
+		 * page.  At the start of a tuple, we get a NULL dtup; that's our
+		 * signal for obtaining and decoding the next one.  If that's not the
+		 * case, we output the next attribute.
+		 */
+		if (state->dtup == NULL)
+		{
+			MMTuple	   *tup;
+			MemoryContext mctx;
+			ItemId		itemId;
+
+			/* deformed tuple must live across calls */
+			mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx);
+
+			/* verify item status: if there's no data, we can't decode */
+			itemId = PageGetItemId(state->page, state->offset);
+			if (ItemIdIsUsed(itemId))
+			{
+				tup = (MMTuple *) PageGetItem(state->page,
+											  PageGetItemId(state->page,
+															state->offset));
+				state->dtup = minmax_deform_tuple(state->tupdesc, tup);
+				state->attno = 1;
+				state->unusedItem = false;
+			}
+			else
+				state->unusedItem = true;
+
+			MemoryContextSwitchTo(mctx);
+		}
+		else
+			state->attno++;
+
+		MemSet(nulls, 0, sizeof(nulls));
+
+		if (state->unusedItem)
+		{
+			values[0] = UInt16GetDatum(state->offset);
+			nulls[1] = true;
+			nulls[2] = true;
+			nulls[3] = true;
+			nulls[4] = true;
+			nulls[5] = true;
+		}
+		else
+		{
+			int		att = state->attno - 1;
+
+			values[0] = UInt16GetDatum(state->offset);
+			values[1] = UInt16GetDatum(state->attno);
+			values[2] = BoolGetDatum(state->dtup->values[att].allnulls);
+			values[3] = BoolGetDatum(state->dtup->values[att].hasnulls);
+			if (!state->dtup->values[att].allnulls)
+			{
+				FmgrInfo   *outputfn = &state->outputfn[att];
+				MMValues   *mmvalues = &state->dtup->values[att];
+
+				values[4] = CStringGetTextDatum(OutputFunctionCall(outputfn,
+																   mmvalues->min));
+				values[5] = CStringGetTextDatum(OutputFunctionCall(outputfn,
+																   mmvalues->max));
+			}
+			else
+			{
+				nulls[4] = true;
+				nulls[5] = true;
+			}
+		}
+
+		result = heap_form_tuple(fctx->tuple_desc, values, nulls);
+
+		/*
+		 * If the item was unused, jump straight to the next one; otherwise,
+		 * the only cleanup needed here is to set our signal to go to the next
+		 * tuple in the following iteration, by freeing the current one.
+		 */
+		if (state->unusedItem)
+			state->offset = OffsetNumberNext(state->offset);
+		else if (state->attno >= state->tupdesc->natts)
+		{
+			pfree(state->dtup);
+			state->dtup = NULL;
+			state->offset = OffsetNumberNext(state->offset);
+		}
+
+		/*
+		 * If we're beyond the end of the page, set flag to end the function in
+		 * the following iteration.
+		 */
+		if (state->offset > PageGetMaxOffsetNumber(state->page))
+			state->done = true;
+
+		SRF_RETURN_NEXT(fctx, HeapTupleGetDatum(result));
+	}
+
+	SRF_RETURN_DONE(fctx);
+}
+#endif
+
+Datum
+minmax_metapage_info(PG_FUNCTION_ARGS)
+{
+	bytea	   *raw_page = PG_GETARG_BYTEA_P(0);
+	Page		page;
+	MinmaxMetaPageData *meta;
+	TupleDesc	tupdesc;
+	Datum		values[3];
+	bool		nulls[3];
+	ArrayBuildState *astate = NULL;
+	HeapTuple	htup;
+	int			i;
+
+	page = verify_minmax_page(raw_page, MINMAX_PAGETYPE_META, "metapage");
+
+	/* Build a tuple descriptor for our result type */
+	if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+		elog(ERROR, "return type must be a row type");
+	tupdesc = BlessTupleDesc(tupdesc);
+
+	/* Extract values from the metapage */
+	meta = (MinmaxMetaPageData *) PageGetContents(page);
+	MemSet(nulls, 0, sizeof(nulls));
+	values[0] = CStringGetTextDatum(psprintf("0x%08X", meta->minmaxMagic));
+	values[1] = Int32GetDatum(meta->minmaxVersion);
+
+	/* Extract (possibly empty) list of revmap array page numbers. */
+	for (i = 0; i < MAX_REVMAP_ARRAYPAGES; i++)
+	{
+		BlockNumber	blkno;
+
+		blkno = meta->revmapArrayPages[i];
+		if (blkno == InvalidBlockNumber)
+			break;	/* XXX or continue? */
+		astate = accumArrayResult(astate, Int64GetDatum((int64) blkno),
+								  false, INT8OID, CurrentMemoryContext);
+	}
+	if (astate == NULL)
+		nulls[2] = true;
+	else
+		values[2] = makeArrayResult(astate, CurrentMemoryContext);
+
+	htup = heap_form_tuple(tupdesc, values, nulls);
+
+	PG_RETURN_DATUM(HeapTupleGetDatum(htup));
+}
+
+/*
+ * Return the BlockNumber array stored in a revmap array page
+ */
+Datum
+minmax_revmap_array_data(PG_FUNCTION_ARGS)
+{
+	bytea	   *raw_page = PG_GETARG_BYTEA_P(0);
+	Page		page;
+	ArrayBuildState *astate = NULL;
+	RevmapArrayContents *contents;
+	Datum		blkarr;
+	int			i;
+
+	page = verify_minmax_page(raw_page, MINMAX_PAGETYPE_REVMAP_ARRAY,
+							  "revmap array");
+
+	contents = (RevmapArrayContents *) PageGetContents(page);
+
+	for (i = 0; i < contents->rma_nblocks; i++)
+		astate = accumArrayResult(astate,
+								  Int64GetDatum((int64) contents->rma_blocks[i]),
+								  false, INT8OID, CurrentMemoryContext);
+	Assert(astate != NULL);
+
+	blkarr = makeArrayResult(astate, CurrentMemoryContext);
+	PG_RETURN_DATUM(blkarr);
+}
+
+/*
+ * Return the TID array stored in a minmax revmap page
+ */
+Datum
+minmax_revmap_data(PG_FUNCTION_ARGS)
+{
+	bytea	   *raw_page = PG_GETARG_BYTEA_P(0);
+	Page		page;
+	RevmapContents *contents;
+	TupleDesc	tupdesc;
+	Datum		values[2];
+	bool		nulls[2];
+	HeapTuple	htup;
+	ArrayBuildState *astate = NULL;
+	int			i;
+
+	page = verify_minmax_page(raw_page, MINMAX_PAGETYPE_REVMAP, "revmap");
+
+	/* Build a tuple descriptor for our result type */
+	if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+		elog(ERROR, "return type must be a row type");
+	tupdesc = BlessTupleDesc(tupdesc);
+
+	/* Extract values from the revmap page */
+	contents = (RevmapContents *) PageGetContents(page);
+	MemSet(nulls, 0, sizeof(nulls));
+	values[0] = Int64GetDatum((uint64) contents->rmr_logblk);
+
+	/* Extract (possibly empty) list of TIDs in this page. */
+	for (i = 0; i < REGULAR_REVMAP_PAGE_MAXITEMS; i++)
+	{
+		ItemPointer	tid;
+
+		tid = &contents->rmr_tids[i];
+		astate = accumArrayResult(astate,
+								  PointerGetDatum(tid),
+								  false, TIDOID, CurrentMemoryContext);
+	}
+	if (astate == NULL)
+		nulls[1] = true;
+	else
+		values[1] = makeArrayResult(astate, CurrentMemoryContext);
+
+	htup = heap_form_tuple(tupdesc, values, nulls);
+
+	PG_RETURN_DATUM(HeapTupleGetDatum(htup));
+}
diff --git a/contrib/pageinspect/pageinspect--1.2.sql b/contrib/pageinspect/pageinspect--1.2.sql
index 15e8e1e..b6410aa 100644
--- a/contrib/pageinspect/pageinspect--1.2.sql
+++ b/contrib/pageinspect/pageinspect--1.2.sql
@@ -99,6 +99,52 @@ AS 'MODULE_PATHNAME', 'bt_page_items'
 LANGUAGE C STRICT;
 
 --
+-- minmax_page_type()
+--
+CREATE FUNCTION minmax_page_type(IN page bytea)
+RETURNS text
+AS 'MODULE_PATHNAME', 'minmax_page_type'
+LANGUAGE C STRICT;
+
+--
+-- minmax_metapage_info()
+--
+CREATE FUNCTION minmax_metapage_info(IN page bytea, OUT magic text,
+	OUT version integer, OUT revmap_array_pages BIGINT[])
+AS 'MODULE_PATHNAME', 'minmax_metapage_info'
+LANGUAGE C STRICT;
+
+--
+-- minmax_page_items()
+--
+/* needs more work
+CREATE FUNCTION minmax_page_items(IN page bytea, IN index_oid oid,
+	OUT itemoffset int,
+	OUT attnum int,
+	OUT allnulls bool,
+	OUT hasnulls bool,
+	OUT min text,
+	OUT max text)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'minmax_page_items'
+LANGUAGE C STRICT;
+*/
+
+--
+-- minmax_revmap_array_data()
+CREATE FUNCTION minmax_revmap_array_data(IN page bytea,
+	OUT revmap_pages BIGINT[])
+AS 'MODULE_PATHNAME', 'minmax_revmap_array_data'
+LANGUAGE C STRICT;
+
+--
+-- minmax_revmap_data()
+CREATE FUNCTION minmax_revmap_data(IN page bytea,
+	OUT logblk BIGINT, OUT pages tid[])
+AS 'MODULE_PATHNAME', 'minmax_revmap_data'
+LANGUAGE C STRICT;
+
+--
 -- fsm_page_contents()
 --
 CREATE FUNCTION fsm_page_contents(IN page bytea)
diff --git a/contrib/pg_xlogdump/rmgrdesc.c b/contrib/pg_xlogdump/rmgrdesc.c
index cbcaaa6..8ffff06 100644
--- a/contrib/pg_xlogdump/rmgrdesc.c
+++ b/contrib/pg_xlogdump/rmgrdesc.c
@@ -13,6 +13,7 @@
 #include "access/gist_private.h"
 #include "access/hash.h"
 #include "access/heapam_xlog.h"
+#include "access/minmax_xlog.h"
 #include "access/multixact.h"
 #include "access/nbtree.h"
 #include "access/rmgr.h"
diff --git a/minmax-proposal b/minmax-proposal
new file mode 100644
index 0000000..ededbcd
--- /dev/null
+++ b/minmax-proposal
@@ -0,0 +1,306 @@
+Minmax Range Indexes
+====================
+
+Minmax indexes are a new access method intended to enable very fast scanning of
+extremely large tables.
+
+The essential idea of a minmax index is to keep track of summarizing values in
+consecutive groups of heap pages (page ranges); for example, the minimum and
+maximum values for datatypes with a btree opclass, or the bounding box for
+geometric types.  These values can be used by constraint exclusion to avoid
+scanning such pages, depending on query quals.
+
+The main drawback of this is having to update the stored summary values of each
+page range as tuples are inserted into them.
+
+Other database systems already have similar features. Some examples:
+
+* Oracle Exadata calls this "storage indexes"
+  http://richardfoote.wordpress.com/category/storage-indexes/
+
+* Netezza has "zone maps"
+  http://nztips.com/2010/11/netezza-integer-join-keys/
+
+* Infobright has this automatically within their "data packs" according to a
+  May 3rd, 2009 blog post
+  http://www.infobright.org/index.php/organizing_data_and_more_about_rough_data_contest/
+
+* MonetDB also uses this technique, according to a published paper
+  http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.108.2662
+  "Cooperative Scans: Dynamic Bandwidth Sharing in a DBMS"
+
+Index creation
+--------------
+
+To create a minmax index, we use the standard wording:
+
+  CREATE INDEX foo_minmax_idx ON foo USING MINMAX (a, b, e);
+
+Partial indexes are not supported currently; since an index is concerned with
+summary values of the involved columns across all the pages in the table, it
+normally doesn't make sense to exclude some tuples.  These might be useful if
+the index predicates are also used in queries.  We exclude these for now for
+conceptual simplicity.
+
+Expressional indexes can probably be supported in the future, but we disallow
+them initially for conceptual simplicity.
+
+Having multiple minmax indexes in the same table is acceptable, though most of
+the time it would make more sense to have a single index covering all the
+interesting columns.  Multiple indexes might be useful for columns added later.
+
+Access Method Design
+--------------------
+
+Since item pointers are not stored inside indexes of this type, it is not
+possible to support the amgettuple interface.  Instead, we only provide
+amgetbitmap support; scanning a relation using this index requires a recheck
+node on top.  The amgetbitmap routine returns a TIDBitmap comprising all pages
+in those page groups that match the query qualifications.  The recheck node
+prunes tuples that are not visible according to the query qualifications.
+
+For each supported datatype, we need an operator class with the following
+catalog entries:
+
+- support operators (pg_amop): same as btree (<, <=, =, >=, >)
+- support procedures (pg_amproc):
+  * "opcinfo" (procno 1) initializes a structure for index creation or scanning
+  * "addValue" (procno 2) takes an index tuple and a heap item, and possibly
+    changes the index tuple so that it includes the heap item values
+  * "consistent" (procno 3) takes an index tuple and query quals, and returns
+    whether the index tuple values match the query quals.
+
+These are used pervasively:
+
+- The optimizer requires them to evaluate queries, so that the index is chosen
+  when queries on the indexed table are planned.
+- During index construction (ambuild), they are used to determine the boundary
+  values for each page range.
+- During index updates (aminsert), they are used to determine whether the new
+  heap tuple matches the existing index tuple; and if not, they are used to
+  construct the new index tuple.
+
+In each index tuple (corresponding to one page range), we store:
+- for each indexed column of a datatype with a btree-opclass:
+  * minimum value across all tuples in the range
+  * maximum value across all tuples in the range
+  * are there nulls present in any tuple?
+  * are null all the values in all tuples in the range?
+
+Different datatypes store other values instead of min/max, for example
+geometric types might store a bounding box.   The NULL bits are always present.
+
+These null bits are stored in a single null bitmask of length 2x number of
+columns.
+
+With the default INDEX_MAX_KEYS of 32, and considering columns of 8-byte length
+types such as timestamptz or bigint, each tuple would be 522 bytes in length,
+which seems reasonable.  There are 6 extra bytes for padding between the null
+bitmask and the first data item, assuming 64-bit alignment; so the total size
+for such an index would actually be 528 bytes.
+
+This maximum index tuple size is calculated as: mt_info (2 bytes) + null bitmap
+(8 bytes) + data value (8 bytes) * 32 * 2
+
+(Of course, larger columns are possible, such as varchar, but creating minmax
+indexes on such columns seems of little practical usefulness.  Also, the
+usefulness of an index containing so many columns is dubious.)
+
+There can be gaps where some pages have no covering index entry.
+
+The Range Reverse Map
+---------------------
+
+To find out the index tuple for a particular page range, we have an internal
+structure we call the range reverse map.  This stores one TID per page range,
+which is the address of the index tuple summarizing that range.  Since these
+map entries are fixed size, it is possible to compute the address of the range
+map entry for any given heap page by simple arithmetic.
+
+When a new heap tuple is inserted in a summarized page range, we compare the
+existing index tuple with the new heap tuple.  If the heap tuple is outside the
+summarization data given by the index tuple for any indexed column (or if the
+new heap tuple contains null values but the index tuple indicate there are no
+nulls), it is necessary to create a new index tuple with the new values.  To do
+this, a new index tuple is inserted, and the reverse range map is updated to
+point to it.  The old index tuple is left in place, for later garbage
+collection.  As an optimization, we sometimes overwrite the old index tuple in
+place with the new data, which avoids the need for later garbage collection.
+
+If the reverse range map points to an invalid TID, the corresponding page range
+is considered to be not summarized.
+
+To scan a table following a minmax index, we scan the reverse range map
+sequentially.  This yields index tuples in ascending page range order.  Query
+quals are matched to each index tuple; if they match, each page within the page
+range is returned as part of the output TID bitmap.  If there's no match, they
+are skipped.  Reverse range map entries returning invalid index TIDs, that is
+unsummarized page ranges, are also returned in the TID bitmap.
+
+To store the range reverse map, we map its logical page numbers to physical
+pages.  We use a large two-level BlockNumber array for this: The metapage
+contains an array of BlockNumbers; each of these points to a "revmap array
+page".  Each revmap array page contains BlockNumbers, which in turn point to
+"revmap regular pages", which are the ones that contain the revmap data itself.
+Therefore, to find a given index tuple, we need to examine the metapage and
+obtain the revmap array page number; then read the array page.  From there we
+obtain the revmap regular page number, and that one contains the TID we're
+interested in.  As an optimization, regular revmap page number 0 is stored in
+physical page number 1, that is, the page just after the metapage.  This means
+that scanning a table of about 1300 page ranges (the number of TIDs that fit in
+a single 8kB page) does not require accessing the metapage at all.
+
+When tuples are added to unsummarized pages, nothing needs to happen.
+
+Heap tuples can be removed from anywhere without restriction.  It might be
+useful to mark the corresponding index tuple somehow, if the heap tuple is one
+of the constraining values of the summary data (i.e. either min or max in the
+case of a btree-opclass-bearing datatype), so that in the future we are aware
+of the need to re-execute summarization on that range, leading to a possible
+tightening of the summary values.
+
+Index entries that are not referenced from the revmap can be removed from the
+main fork.  This currently happens at amvacuumcleanup, though it could be
+carried out separately; no heap scan is necessary to determine which tuples
+are unreachable.
+
+Summarization
+-------------
+
+At index creation time, the whole table is scanned; for each page range the
+summarizing values of each indexed column and nulls bitmap are collected and
+stored in the index.
+
+Once in a while, it is necessary to summarize a bunch of unsummarized pages
+(because the table has grown since the index was created), or re-summarize a
+range that has been marked invalid.  This is simple: scan the page range
+calculating the summary values for each indexed column, then insert the new
+index entry at the end of the index.
+
+The easiest way to go around this seems to have vacuum do it.  That way we can
+simply do re-summarization on the amvacuumcleanup routine.  Other answers would
+mean we need a separate AM routine, which appears unwarranted at this stage.
+
+Vacuuming
+---------
+
+Vacuuming a table that has a minmax index does not represent a significant
+challenge.  Since no heap TIDs are stored, it's not necessary to scan the index
+when heap tuples are removed.  It might be that some min() value can be
+incremented, or some max() value can be decremented; but this would represent
+an optimization opportunity only, not a correctness issue.  Perhaps it's
+simpler to represent this as the need to re-run summarization on the affected
+page range.
+
+Note that if there are no indexes on the table other than the minmax index,
+usage of maintenance_work_mem by vacuum can be decreased significantly, because
+no detailed index scan needs to take place (and thus it's not necessary for
+vacuum to save TIDs to remove).  This optimization opportunity is best left for
+future improvement.
+
+Locking considerations
+----------------------
+
+To read the TID during an index scan, we follow this protocol:
+
+* read revmap page
+* obtain share lock on the revmap buffer
+* read the TID
+* obtain share lock on buffer of main fork
+* LockTuple the TID (using the index as relation).  A shared lock is
+  sufficient.  We need the LockTuple to prevent VACUUM from recycling
+  the index tuple; see below.
+* release revmap buffer lock
+* read the index tuple
+* release the tuple lock
+* release main fork buffer lock
+
+
+To update the summary tuple for a page range, we use this protocol:
+
+* insert a new index tuple somewhere in the main fork; note its TID
+* read revmap page
+* obtain exclusive lock on revmap buffer
+* write the TID
+* release lock
+
+This ensures no concurrent reader can obtain a partially-written TID.
+Note we don't need a tuple lock here.  Concurrent scans don't have to
+worry about whether they got the old or new index tuple: if they get the
+old one, the tighter values are okay from a correctness standpoint because
+due to MVCC they can't possibly see the just-inserted heap tuples anyway.
+
+
+For vacuuming, we need to figure out which index tuples are no longer
+referenced from the reverse range map.  This requires some brute force,
+but is simple:
+
+1) scan the complete index, store each existing TIDs in a dynahash.
+   Hash key is the TID, hash value is a boolean initially set to false.
+2) scan the complete revmap sequentially, read the TIDs on each page.  Share
+   lock on each page is sufficient.  For each TID so obtained, grab the
+   element from the hash and update the boolean to true.
+3) Scan the index again; for each tuple found, search the hash table.
+   If the tuple is not present in hash, it must have been added after our
+   initial scan; ignore it.  If tuple is present in hash, and the hash flag is
+   true, then the tuple is referenced from the revmap; ignore it.  If the hash
+   flag is false, then the index tuple is no longer referenced by the revmap;
+   but it could be about to be accessed by a concurrent scan.  Do
+   ConditionalLockTuple.  If this fails, ignore the tuple (it's in use), it
+   will be deleted by a future vacuum.  If lock is acquired, then we can safely
+   remove the index tuple.
+4) Index pages with free space can be detected by this second scan.  Register
+   those with the FSM.
+
+Note this doesn't require scanning the heap at all, or being involved in
+the heap's cleanup procedure.  Also, there is no need to LockBufferForCleanup,
+which is a nice property because index scans keep pages pinned for long
+periods.
+
+
+
+Optimizer
+---------
+
+In order to make this all work, the only thing we need to do is ensure we have a
+good enough opclass and amcostestimate.  With this, the optimizer is able to pick
+up the index on its own.
+
+
+Open questions
+--------------
+
+* Same-size page ranges?
+  Current related literature seems to consider that each "index entry" in a
+  minmax index must cover the same number of pages.  There doesn't seem to be a
+  hard reason for this to be so; it might make sense to allow the index to
+  self-tune so that some index entries cover smaller page ranges, if this allows
+  the summary values to be more compact.  This would incur larger minmax
+  overhead for the index itself, but might allow better pruning of page ranges
+  during scan.  In the limit of one index tuple per page, the index itself would
+  occupy too much space, even though we would be able to skip reading the most
+  heap pages, because the summary values are tight; in the opposite limit of
+  a single tuple that summarizes the whole table, we wouldn't be able to prune
+  anything even though the index is very small.  This can probably be made to work
+  by using the reverse range map as an index in itself.
+
+* More compact representation for TIDBitmap?
+  TIDBitmap is the structure used to represent bitmap scans.  The
+  representation of lossy page ranges is not optimal for our purposes, because
+  it uses a Bitmapset to represent pages in the range; since we're going to return
+  all pages in a large range, it might be more convenient to allow for a
+  struct that uses start and end page numbers to represent the range, instead.
+
+
+
+References:
+
+Email thread on pgsql-hackers
+  http://www.postgresql.org/message-id/1199296574.7260.149.camel@ebony.site
+  From: Simon Riggs
+  To: pgsql-hackers
+  Subject: Dynamic Partitioning using Segment Visibility Map
+
+http://wiki.postgresql.org/wiki/Segment_Exclusion
+http://wiki.postgresql.org/wiki/Segment_Visibility_Map
+
diff --git a/src/backend/access/Makefile b/src/backend/access/Makefile
index c32088f..db46539 100644
--- a/src/backend/access/Makefile
+++ b/src/backend/access/Makefile
@@ -8,6 +8,6 @@ subdir = src/backend/access
 top_builddir = ../../..
 include $(top_builddir)/src/Makefile.global
 
-SUBDIRS	    = common gin gist hash heap index nbtree rmgrdesc spgist transam
+SUBDIRS	    = common gin gist hash heap index minmax nbtree rmgrdesc spgist transam
 
 include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index c7ad6f9..1bef404 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -209,6 +209,13 @@ static relopt_int intRelOpts[] =
 			RELOPT_KIND_HEAP | RELOPT_KIND_TOAST
 		}, -1, 0, 2000000000
 	},
+	{
+		{
+			"pages_per_range",
+			"Number of pages that each page range covers in a Minmax index",
+			RELOPT_KIND_MINMAX
+		}, 128, 1, 131072
+	},
 
 	/* list terminator */
 	{{NULL}}
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index d731f98..78f35b9 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -271,6 +271,8 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
 		scan->rs_startblock = 0;
 	}
 
+	scan->rs_initblock = 0;
+	scan->rs_numblocks = InvalidBlockNumber;
 	scan->rs_inited = false;
 	scan->rs_ctup.t_data = NULL;
 	ItemPointerSetInvalid(&scan->rs_ctup.t_self);
@@ -296,6 +298,14 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
 		pgstat_count_heap_scan(scan->rs_rd);
 }
 
+void
+heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
+{
+	scan->rs_startblock = startBlk;
+	scan->rs_initblock = startBlk;
+	scan->rs_numblocks = numBlks;
+}
+
 /*
  * heapgetpage - subroutine for heapgettup()
  *
@@ -636,7 +646,8 @@ heapgettup(HeapScanDesc scan,
 		 */
 		if (backward)
 		{
-			finished = (page == scan->rs_startblock);
+			finished = (page == scan->rs_startblock) ||
+				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
 			if (page == 0)
 				page = scan->rs_nblocks;
 			page--;
@@ -646,7 +657,8 @@ heapgettup(HeapScanDesc scan,
 			page++;
 			if (page >= scan->rs_nblocks)
 				page = 0;
-			finished = (page == scan->rs_startblock);
+			finished = (page == scan->rs_startblock) ||
+				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
 
 			/*
 			 * Report our new scan position for synchronization purposes. We
@@ -897,7 +909,8 @@ heapgettup_pagemode(HeapScanDesc scan,
 		 */
 		if (backward)
 		{
-			finished = (page == scan->rs_startblock);
+			finished = (page == scan->rs_startblock) ||
+				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
 			if (page == 0)
 				page = scan->rs_nblocks;
 			page--;
@@ -907,7 +920,8 @@ heapgettup_pagemode(HeapScanDesc scan,
 			page++;
 			if (page >= scan->rs_nblocks)
 				page = 0;
-			finished = (page == scan->rs_startblock);
+			finished = (page == scan->rs_startblock) ||
+				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
 
 			/*
 			 * Report our new scan position for synchronization purposes. We
diff --git a/src/backend/access/minmax/Makefile b/src/backend/access/minmax/Makefile
new file mode 100644
index 0000000..2c80a20
--- /dev/null
+++ b/src/backend/access/minmax/Makefile
@@ -0,0 +1,17 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+#    Makefile for access/minmax
+#
+# IDENTIFICATION
+#    src/backend/access/minmax/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/access/minmax
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+OBJS = minmax.o mmrevmap.o mmtuple.o mmxlog.o mmsortable.o
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/access/minmax/minmax.c b/src/backend/access/minmax/minmax.c
new file mode 100644
index 0000000..b07a90b
--- /dev/null
+++ b/src/backend/access/minmax/minmax.c
@@ -0,0 +1,1568 @@
+/*
+ * minmax.c
+ *		Implementation of Minmax indexes for Postgres
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *	  src/backend/access/minmax/minmax.c
+ *
+ * TODO
+ *		* support collatable datatypes
+ *		* ScalarArrayOpExpr
+ *		* Make use of the stored NULL bits
+ *		* we can support unlogged indexes now
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "access/minmax.h"
+#include "access/minmax_internal.h"
+#include "access/minmax_page.h"
+#include "access/minmax_revmap.h"
+#include "access/minmax_tuple.h"
+#include "access/minmax_xlog.h"
+#include "access/reloptions.h"
+#include "access/relscan.h"
+#include "access/xlogutils.h"
+#include "catalog/index.h"
+#include "catalog/pg_operator.h"
+#include "commands/vacuum.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "storage/bufmgr.h"
+#include "storage/freespace.h"
+#include "storage/lmgr.h"
+#include "utils/datum.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/syscache.h"
+
+
+/*
+ * We use a MMBuildState during initial construction of a Minmax index.
+ * The running state is kept in a DeformedMMTuple.
+ */
+typedef struct MMBuildState
+{
+	Relation	irel;
+	int			numtuples;
+	Buffer		currentInsertBuf;
+	BlockNumber pagesPerRange;
+	BlockNumber currRangeStart;
+	mmRevmapAccess *rmAccess;
+	MinmaxDesc *mmDesc;
+	DeformedMMTuple *dtuple;
+} MMBuildState;
+
+/*
+ * Struct used as "opaque" during index scans
+ */
+typedef struct MinmaxOpaque
+{
+	BlockNumber		pagesPerRange;
+	mmRevmapAccess *rmAccess;
+	MinmaxDesc	   *mmDesc;
+} MinmaxOpaque;
+
+static MinmaxDesc *minmax_build_mmdesc(Relation rel);
+static MMBuildState *initialize_mm_buildstate(Relation idxRel,
+						 mmRevmapAccess *rmAccess, BlockNumber pagesPerRange);
+static void remove_deletable_tuples(Relation idxRel, BlockNumber heapNumBlocks,
+						BufferAccessStrategy strategy,
+						BlockNumber **nonsummed, int *numnonsummed);
+static void rerun_summarization(Relation idxRel, Relation heapRel,
+					mmRevmapAccess *rmAccess, BlockNumber pagesPerRange,
+					BlockNumber *nonsummarized, int numnonsummarized);
+static void mm_doinsert(Relation idxrel, mmRevmapAccess *rmAccess,
+			Buffer *buffer, BlockNumber heapblkno, MMTuple *tup, Size itemsz);
+static bool mm_getinsertbuffer(Relation irel, Buffer *buffer, Size itemsz);
+static void form_and_insert_tuple(MMBuildState *mmstate);
+static int qsortCompareItemPointers(const void *a, const void *b);
+
+
+/*
+ * A tuple in the heap is being inserted.  To keep a minmax index up to date,
+ * we need to obtain the relevant index tuple, compare its min()/max() stored
+ * values with those of the new tuple; if the tuple values are in range,
+ * there's nothing to do; otherwise we need to update the index (either by
+ * a new index tuple and repointing the revmap, or by overwriting the existing
+ * index tuple).
+ *
+ * If the range is not currently summarized (i.e. the revmap returns InvalidTid
+ * for it), there's nothing to do either.
+ */
+Datum
+mminsert(PG_FUNCTION_ARGS)
+{
+	Relation	idxRel = (Relation) PG_GETARG_POINTER(0);
+	Datum	   *values = (Datum *) PG_GETARG_POINTER(1);
+	bool	   *nulls = (bool *) PG_GETARG_POINTER(2);
+	ItemPointer heaptid = (ItemPointer) PG_GETARG_POINTER(3);
+
+	/* we ignore the rest of our arguments */
+	MinmaxDesc *mmdesc;
+	mmRevmapAccess *rmAccess;
+	ItemId		origlp;
+	MMTuple    *mmtup;
+	DeformedMMTuple *dtup;
+	ItemPointerData idxtid;
+	BlockNumber heapBlk;
+	BlockNumber iblk;
+	OffsetNumber ioff;
+	Buffer		buf;
+	IndexInfo  *indexInfo;
+	Page		page;
+	int			keyno;
+	bool		need_insert = false;
+
+	rmAccess = mmRevmapAccessInit(idxRel, NULL);
+
+	heapBlk = ItemPointerGetBlockNumber(heaptid);
+	mmGetHeapBlockItemptr(rmAccess, heapBlk, &idxtid);
+	/* tuple lock on idxtid is grabbed by mmGetHeapBlockItemptr */
+
+	if (!ItemPointerIsValid(&idxtid))
+	{
+		/* nothing to do, range is unsummarized */
+		mmRevmapAccessTerminate(rmAccess);
+		return BoolGetDatum(false);
+	}
+
+	indexInfo = BuildIndexInfo(idxRel);
+	mmdesc = minmax_build_mmdesc(idxRel);
+
+	iblk = ItemPointerGetBlockNumber(&idxtid);
+	ioff = ItemPointerGetOffsetNumber(&idxtid);
+	Assert(iblk != InvalidBlockNumber);
+	buf = ReadBuffer(idxRel, iblk);
+
+	LockBuffer(buf, BUFFER_LOCK_SHARE);
+	UnlockTuple(idxRel, &idxtid, ShareLock);
+	page = BufferGetPage(buf);
+	origlp = PageGetItemId(page, ioff);
+	mmtup = (MMTuple *) PageGetItem(page, origlp);
+
+	dtup = minmax_deform_tuple(mmdesc, mmtup);
+
+	/*
+	 * Compare the key values of the new tuple to the stored index values; our
+	 * deformed tuple will get updated if the new tuple doesn't fit the
+	 * original range (note this means we can't break out of the loop early).
+	 * Make a note of whether this happens, so that we know to insert the
+	 * modified tuple later.
+	 */
+	for (keyno = 0; keyno < indexInfo->ii_NumIndexAttrs; keyno++)
+	{
+		Datum	result;
+		FmgrInfo   *addValue;
+
+		addValue = index_getprocinfo(idxRel, keyno + 1,
+									 MINMAX_PROCNUM_ADDVALUE);
+
+		result = FunctionCall5Coll(addValue,
+								   PG_GET_COLLATION(),
+								   PointerGetDatum(mmdesc),
+								   PointerGetDatum(dtup),
+								   UInt16GetDatum(keyno + 1),
+								   values[keyno],
+								   nulls[keyno]);
+		/* if that returned true, we need to insert the updated tuple */
+		need_insert |= DatumGetBool(result);
+	}
+
+	LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+
+	if (need_insert)
+	{
+		Size		tupsz;
+		MMTuple    *tup;
+
+		tup = minmax_form_tuple(mmdesc, dtup, &tupsz);
+
+		/*
+		 * If the size of the original tuple is greater or equal to the new
+		 * index tuple, we can overwrite.  This saves regular page bloat, and
+		 * also saves revmap traffic.  This might leave some unused space
+		 * before the start of the next tuple, but we don't worry about that
+		 * here.
+		 *
+		 * We avoid doing this when the itempointer of the index tuple would
+		 * change, because that would require an update to the revmap while
+		 * holding exclusive lock on this page, which would reduce concurrency.
+		 *
+		 * Note that we continue to acccess 'origlp' here, even though there
+		 * was an interval during which the page wasn't locked.  Since we hold
+		 * pin on the page, this is okay -- the buffer cannot go away from
+		 * under us, and also tuples cannot be shuffled around.
+		 */
+		if (tupsz >= ItemIdGetLength(origlp))
+		{
+			LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+
+			START_CRIT_SECTION();
+			PageOverwriteItemData(BufferGetPage(buf),
+								  ioff,
+								  (Item) tup, tupsz);
+			MarkBufferDirty(buf);
+
+			/* XLOG stuff */
+			if (RelationNeedsWAL(idxRel))
+			{
+				xl_minmax_insert	xlrec;
+				XLogRecPtr	recptr;
+				XLogRecData	rdata[2];
+				uint8		info = XLOG_MINMAX_INSERT;
+
+				xlrec.target.node = idxRel->rd_node;
+				xlrec.target.tid = idxtid;
+				xlrec.overwrite = true;
+				rdata[0].data = (char *) &xlrec;
+				rdata[0].len = SizeOfMinmaxInsert;
+				rdata[0].buffer = InvalidBuffer;
+				rdata[0].next = &(rdata[1]);
+
+				rdata[1].data = (char *) tup;
+				rdata[1].len = tupsz;
+				rdata[1].buffer = buf;
+				rdata[1].buffer_std = true;
+				rdata[1].next = NULL;
+
+				recptr = XLogInsert(RM_MINMAX_ID, info, rdata);
+
+				PageSetLSN(page, recptr);
+			}
+
+			END_CRIT_SECTION();
+
+			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+		}
+		else
+		{
+			/*
+			 * The new tuple is larger than the original one, so we must insert
+			 * a new one the slow way.
+			 */
+			mm_doinsert(idxRel, rmAccess, &buf, heapBlk, tup, tupsz);
+
+#ifdef NOT_YET
+			/*
+			 * Possible optimization: if we can grab an exclusive lock on the
+			 * buffer containing the old tuple right away, we can also seize
+			 * the opportunity to prune the old tuple and avoid some bloat.
+			 * This is not necessary for correctness.
+			 */
+			if (ConditionalLockBuffer(buf))
+			{
+				/* prune the old tuple */
+
+				LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+			}
+#endif
+		}
+	}
+
+	ReleaseBuffer(buf);
+
+	mmRevmapAccessTerminate(rmAccess);
+
+	return BoolGetDatum(false);
+}
+
+/*
+ * Initialize state for a Minmax index scan.
+ *
+ * We read the metapage here to determine the pages-per-range number that this
+ * index was built with.  Note that since this cannot be changed while we're
+ * holding lock on index, it's not necessary to recompute it during mmrescan.
+ */
+Datum
+mmbeginscan(PG_FUNCTION_ARGS)
+{
+	Relation	r = (Relation) PG_GETARG_POINTER(0);
+	int			nkeys = PG_GETARG_INT32(1);
+	int			norderbys = PG_GETARG_INT32(2);
+	IndexScanDesc scan;
+	MinmaxOpaque *opaque;
+
+	scan = RelationGetIndexScan(r, nkeys, norderbys);
+
+	opaque = (MinmaxOpaque *) palloc(sizeof(MinmaxOpaque));
+	opaque->rmAccess = mmRevmapAccessInit(r, &opaque->pagesPerRange);
+	scan->opaque = opaque;
+
+	PG_RETURN_POINTER(scan);
+}
+
+/*
+ * Execute the index scan.
+ *
+ * This works by reading index TIDs from the revmap, and obtaining the index
+ * tuples pointed to by them; the summary values in the index tuples are
+ * compared to the scan keys.  We return into the TID bitmap all the pages in
+ * ranges corresponding to index tuples that match the scan keys.
+ *
+ * If a TID from the revmap is read as InvalidTID, we know that range is
+ * unsummarized.  Pages in those ranges need to be returned regardless of scan
+ * keys.
+ *
+ * XXX see _bt_first for more ideas on processing the scan key.
+ */
+Datum
+mmgetbitmap(PG_FUNCTION_ARGS)
+{
+	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+	TIDBitmap  *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
+	Relation	idxRel = scan->indexRelation;
+	Buffer		currIdxBuf = InvalidBuffer;
+	MinmaxDesc *mmdesc = minmax_build_mmdesc(idxRel);
+	Oid			heapOid;
+	Relation	heapRel;
+	MinmaxOpaque *opaque;
+	BlockNumber nblocks;
+	BlockNumber heapBlk;
+	int			totalpages = 0;
+	int			keyno;
+	FmgrInfo   *consistentFn;
+
+	opaque = (MinmaxOpaque *) scan->opaque;
+	pgstat_count_index_scan(idxRel);
+
+	/*
+	 * XXX We need to know the size of the table so that we know how long to
+	 * iterate on the revmap.  There's room for improvement here, in that we
+	 * could have the revmap tell us when to stop iterating.
+	 */
+	heapOid = IndexGetRelation(RelationGetRelid(idxRel), false);
+	heapRel = heap_open(heapOid, AccessShareLock);
+	nblocks = RelationGetNumberOfBlocks(heapRel);
+	heap_close(heapRel, AccessShareLock);
+
+	/*
+	 * Obtain consistent functions for all indexed column.  Maybe it'd be
+	 * possible to do this lazily only the first time we see a scan key that
+	 * involves each particular attribute.
+	 */
+	consistentFn = palloc(sizeof(FmgrInfo) * mmdesc->md_tupdesc->natts);
+	for (keyno = 0; keyno < mmdesc->md_tupdesc->natts; keyno++)
+	{
+		FmgrInfo   *tmp;
+
+		tmp = index_getprocinfo(idxRel, keyno + 1, MINMAX_PROCNUM_CONSISTENT);
+		fmgr_info_copy(&consistentFn[keyno], tmp, CurrentMemoryContext);
+	}
+
+	/*
+	 * Now scan the revmap.  We start by querying for heap page 0,
+	 * incrementing by the number of pages per range; this gives us a full
+	 * view of the table.
+	 */
+	for (heapBlk = 0; heapBlk < nblocks; heapBlk += opaque->pagesPerRange)
+	{
+		ItemPointerData itupptr;
+		bool		addrange;
+
+		mmGetHeapBlockItemptr(opaque->rmAccess, heapBlk, &itupptr);
+
+		/*
+		 * For revmap items that return InvalidTID, we must return the whole
+		 * range; otherwise, fetch the index item and compare it to the scan
+		 * keys.
+		 */
+		if (!ItemPointerIsValid(&itupptr))
+		{
+			addrange = true;
+		}
+		else
+		{
+			Page		page;
+			OffsetNumber idxoffno;
+			BlockNumber idxblkno;
+			MMTuple    *tup;
+			DeformedMMTuple *dtup;
+			int			keyno;
+
+			/*
+			 * Obtain the buffer that contains the tuple.  We might already
+			 * have it pinned.
+			 */
+			idxoffno = ItemPointerGetOffsetNumber(&itupptr);
+			idxblkno = ItemPointerGetBlockNumber(&itupptr);
+			if (currIdxBuf == InvalidBuffer ||
+				idxblkno != BufferGetBlockNumber(currIdxBuf))
+			{
+				if (currIdxBuf != InvalidBuffer)
+					UnlockReleaseBuffer(currIdxBuf);
+
+				Assert(idxblkno != InvalidBlockNumber);
+				currIdxBuf = ReadBuffer(idxRel, idxblkno);
+				LockBuffer(currIdxBuf, BUFFER_LOCK_SHARE);
+			}
+
+			/*
+			 * We now have the containing buffer locked, so we can release the
+			 * tuple lock.
+			 */
+			UnlockTuple(idxRel, &itupptr, ShareLock);
+
+			page = BufferGetPage(currIdxBuf);
+			tup = (MMTuple *) PageGetItem(page, PageGetItemId(page, idxoffno));
+			dtup = minmax_deform_tuple(mmdesc, tup);
+
+			/*
+			 * Compare scan keys with summary values stored for the range.  If
+			 * scan keys are matched, the page range must be added to the
+			 * bitmap.  We initially assume the range needs to be added; in
+			 * particular this serves the case where there are no keys.
+			 */
+			addrange = true;
+			for (keyno = 0; keyno < scan->numberOfKeys; keyno++)
+			{
+				ScanKey		key = &scan->keyData[keyno];
+				AttrNumber	keyattno = key->sk_attno;
+				Datum		add;
+
+				/*
+				 * The collation of the scan key must match the collation used
+				 * in the index column.  Otherwise we shouldn't be using this
+				 * index ...
+				 */
+				Assert(key->sk_collation ==
+					   mmdesc->md_tupdesc->attrs[keyattno - 1]->attcollation);
+
+				/*
+				 * Check whether the scan key is consistent with the page range
+				 * values; if so, have the pages in the range added to the
+				 * output bitmap.
+				 *
+				 * When there are multiple scan keys, failure to meet the
+				 * criteria for a single one of them is enough to discard the
+				 * range as a whole, so break out of the loop as soon as a
+				 * false return value is obtained.
+				 */
+				add = FunctionCall5Coll(&consistentFn[keyattno - 1],
+										key->sk_collation,
+										PointerGetDatum(mmdesc),
+										PointerGetDatum(dtup),
+										Int16GetDatum(keyattno),
+										UInt16GetDatum(key->sk_strategy),
+										key->sk_argument);
+				addrange = DatumGetBool(add);
+				if (!addrange)
+					break;
+			}
+
+			pfree(dtup);
+		}
+
+		/* add the pages in the range to the output bitmap, if needed */
+		if (addrange)
+		{
+			BlockNumber pageno;
+
+			for (pageno = heapBlk;
+				 pageno <= heapBlk + opaque->pagesPerRange - 1;
+				 pageno++)
+			{
+				tbm_add_page(tbm, pageno);
+				totalpages++;
+			}
+		}
+	}
+
+	if (currIdxBuf != InvalidBuffer)
+		UnlockReleaseBuffer(currIdxBuf);
+
+	/*
+	 * XXX We have an approximation of the number of *pages* that our scan
+	 * returns, but we don't have a precise idea of the number of heap tuples
+	 * involved.
+	 */
+	PG_RETURN_INT64(totalpages * 10);
+}
+
+/*
+ * Re-initialize state for a minmax index scan
+ */
+Datum
+mmrescan(PG_FUNCTION_ARGS)
+{
+	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+	ScanKey		scankey = (ScanKey) PG_GETARG_POINTER(1);
+	/* other arguments ignored */
+
+	if (scankey && scan->numberOfKeys > 0)
+		memmove(scan->keyData, scankey,
+				scan->numberOfKeys * sizeof(ScanKeyData));
+
+	PG_RETURN_VOID();
+}
+
+/*
+ * Close down a minmax index scan
+ */
+Datum
+mmendscan(PG_FUNCTION_ARGS)
+{
+	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+	MinmaxOpaque *opaque = (MinmaxOpaque *) scan->opaque;
+
+	mmRevmapAccessTerminate(opaque->rmAccess);
+	pfree(opaque);
+
+	PG_RETURN_VOID();
+}
+
+Datum
+mmmarkpos(PG_FUNCTION_ARGS)
+{
+	elog(ERROR, "MinMax does not support mark/restore");
+	PG_RETURN_VOID();
+}
+
+Datum
+mmrestrpos(PG_FUNCTION_ARGS)
+{
+	elog(ERROR, "MinMax does not support mark/restore");
+	PG_RETURN_VOID();
+}
+
+/*
+ * Per-heap-tuple callback for IndexBuildHeapScan.
+ *
+ * Note we don't worry about the page range at the end of the table here; it is
+ * present in the build state struct after we're called the last time, but not
+ * inserted into the index.  Caller must ensure to do so, if appropriate.
+ */
+static void
+mmbuildCallback(Relation index,
+				HeapTuple htup,
+				Datum *values,
+				bool *isnull,
+				bool tupleIsAlive,
+				void *state)
+{
+	MMBuildState *mmstate = (MMBuildState *) state;
+	BlockNumber thisblock;
+	int			i;
+
+	thisblock = ItemPointerGetBlockNumber(&htup->t_self);
+
+	/*
+	 * If we're in a new block which belongs to the next range, summarize what
+	 * we've got and start afresh.
+	 */
+	if (thisblock > (mmstate->currRangeStart + mmstate->pagesPerRange - 1))
+	{
+
+		MINMAX_elog(DEBUG2, "mmbuildCallback: completed a range: %u--%u",
+					mmstate->currRangeStart,
+					mmstate->currRangeStart + mmstate->pagesPerRange);
+
+		/* create the index tuple and insert it */
+		form_and_insert_tuple(mmstate);
+
+		/* set state to correspond to the next range */
+		mmstate->currRangeStart += mmstate->pagesPerRange;
+
+		/* re-initialize state for it */
+		minmax_dtuple_initialize(mmstate->dtuple, mmstate->mmDesc);
+	}
+
+	/* Accumulate the current tuple into the running state */
+	mmstate->dtuple->dt_seentup = true;
+	for (i = 0; i < mmstate->mmDesc->md_tupdesc->natts; i++)
+	{
+		FmgrInfo   *addValue;
+
+		/* FIXME must be cached somewhere */
+		addValue = index_getprocinfo(index, i + 1,
+									 MINMAX_PROCNUM_ADDVALUE);
+
+		/*
+		 * Update dtuple state, if and as necessary.
+		 */
+		FunctionCall5Coll(addValue,
+						  mmstate->mmDesc->md_tupdesc->attrs[i]->attcollation,
+						  PointerGetDatum(mmstate->mmDesc),
+						  PointerGetDatum(mmstate->dtuple),
+						  UInt16GetDatum(i + 1), values[i], isnull[i]);
+	}
+}
+
+/*
+ * mmbuild() -- build a new minmax index.
+ */
+Datum
+mmbuild(PG_FUNCTION_ARGS)
+{
+	Relation	heap = (Relation) PG_GETARG_POINTER(0);
+	Relation	index = (Relation) PG_GETARG_POINTER(1);
+	IndexInfo  *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+	IndexBuildResult *result;
+	double		reltuples;
+	mmRevmapAccess *rmAccess;
+	MMBuildState *mmstate;
+	Buffer		meta;
+	BlockNumber pagesPerRange;
+
+	/*
+	 * We expect to be called exactly once for any index relation.
+	 */
+	if (RelationGetNumberOfBlocks(index) != 0)
+		elog(ERROR, "index \"%s\" already contains data",
+			 RelationGetRelationName(index));
+
+	/* partial indexes not supported */
+	if (indexInfo->ii_Predicate != NIL)
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("partial indexes not supported")));
+	/* expressions not supported (yet?) */
+	if (indexInfo->ii_Expressions != NIL)
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("expression indexes not supported")));
+
+	meta = mm_getnewbuffer(index);
+	START_CRIT_SECTION();
+	mm_metapage_init(BufferGetPage(meta), MinmaxGetPagesPerRange(index),
+					 MINMAX_CURRENT_VERSION);
+	MarkBufferDirty(meta);
+
+	if (RelationNeedsWAL(index))
+	{
+		xl_minmax_createidx xlrec;
+		XLogRecPtr	recptr;
+		XLogRecData	rdata;
+		Page		page;
+
+		xlrec.node = index->rd_node;
+		xlrec.version = MINMAX_CURRENT_VERSION;
+		xlrec.pagesPerRange = MinmaxGetPagesPerRange(index);
+
+		rdata.buffer = InvalidBuffer;
+		rdata.data = (char *) &xlrec;
+		rdata.len = SizeOfMinmaxCreateIdx;
+		rdata.next = NULL;
+
+		recptr = XLogInsert(RM_MINMAX_ID, XLOG_MINMAX_CREATE_INDEX, &rdata);
+
+		page = BufferGetPage(meta);
+		PageSetLSN(page, recptr);
+	}
+
+	UnlockReleaseBuffer(meta);
+	END_CRIT_SECTION();
+
+	/*
+	 * Set up an empty revmap, and get access to it
+	 */
+	mmRevmapCreate(index);
+	rmAccess = mmRevmapAccessInit(index, &pagesPerRange);
+
+	/*
+	 * Initialize our state, including the deformed tuple state.
+	 */
+	mmstate = initialize_mm_buildstate(index, rmAccess, pagesPerRange);
+
+	/*
+	 * Now scan the relation.  No syncscan allowed here because we want the
+	 * heap blocks in physical order.
+	 */
+	reltuples = IndexBuildHeapScan(heap, index, indexInfo, false,
+								   mmbuildCallback, (void *) mmstate);
+
+	/* process the final batch */
+	form_and_insert_tuple(mmstate);
+
+	/* release the last index buffer used */
+	if (!BufferIsInvalid(mmstate->currentInsertBuf))
+	{
+		ReleaseBuffer(mmstate->currentInsertBuf);
+		mmstate->currentInsertBuf = InvalidBuffer;
+	}
+
+	mmRevmapAccessTerminate(mmstate->rmAccess);
+
+	/*
+	 * Return statistics
+	 */
+	result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
+
+	result->heap_tuples = reltuples;
+	result->index_tuples = mmstate->numtuples;
+
+	PG_RETURN_POINTER(result);
+}
+
+Datum
+mmbuildempty(PG_FUNCTION_ARGS)
+{
+	ereport(ERROR,
+			(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+			 errmsg("unlogged MinMax indexes are not supported")));
+
+	PG_RETURN_VOID();
+}
+
+/*
+ * mmbulkdelete
+ *		Since there are no per-heap-tuple index tuples in minmax indexes,
+ *		there's not a lot we can do here.
+ *
+ * XXX we could mark item tuples as "dirty" (when a minimum or maximum heap
+ * tuple is deleted), meaning the need to re-run summarization on the affected
+ * range.  Need to an extra flag in mmtuples for that.
+ */
+Datum
+mmbulkdelete(PG_FUNCTION_ARGS)
+{
+	/* other arguments are not currently used */
+	IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+
+	/* allocate stats if first time through, else re-use existing struct */
+	if (stats == NULL)
+		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
+
+	PG_RETURN_POINTER(stats);
+}
+
+/*
+ * This routine is in charge of "vacuuming" a minmax index: 1) remove index
+ * tuples that are no longer referenced from the revmap.  2) summarize ranges
+ * that are currently unsummarized.
+ */
+Datum
+mmvacuumcleanup(PG_FUNCTION_ARGS)
+{
+	IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
+	IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+	mmRevmapAccess *rmAccess;
+	BlockNumber *nonsummarized = NULL;
+	int			numnonsummarized;
+	Relation	heapRel;
+	BlockNumber	heapNumBlocks;
+	BlockNumber pagesPerRange;
+
+	/* No-op in ANALYZE ONLY mode */
+	if (info->analyze_only)
+		PG_RETURN_POINTER(stats);
+
+	rmAccess = mmRevmapAccessInit(info->index, &pagesPerRange);
+
+	heapRel = heap_open(IndexGetRelation(RelationGetRelid(info->index), false),
+						AccessShareLock);
+
+	/*
+	 * First scan the index, removing index tuples that are no longer
+	 * referenced from the revmap.  While at it, collect the page numbers of
+	 * ranges that are not summarized.
+	 */
+	heapNumBlocks = RelationGetNumberOfBlocks(heapRel);
+	remove_deletable_tuples(info->index, heapNumBlocks, info->strategy,
+							&nonsummarized, &numnonsummarized);
+
+	/* and summarize the ranges collected above */
+	if (nonsummarized)
+	{
+		rerun_summarization(info->index, heapRel, rmAccess, pagesPerRange,
+							nonsummarized, numnonsummarized);
+		pfree(nonsummarized);
+	}
+
+	mmRevmapAccessTerminate(rmAccess);
+	heap_close(heapRel, AccessShareLock);
+
+	PG_RETURN_POINTER(stats);
+}
+
+/*
+ * reloptions processor for minmax indexes
+ */
+Datum
+mmoptions(PG_FUNCTION_ARGS)
+{
+	Datum		reloptions = PG_GETARG_DATUM(0);
+	bool		validate = PG_GETARG_BOOL(1);
+	relopt_value *options;
+	MinmaxOptions *rdopts;
+	int			numoptions;
+	static const relopt_parse_elt tab[] = {
+		{"pages_per_range", RELOPT_TYPE_INT, offsetof(MinmaxOptions, pagesPerRange)}
+	};
+
+	options = parseRelOptions(reloptions, validate, RELOPT_KIND_MINMAX,
+							  &numoptions);
+
+	/* if none set, we're done */
+	if (numoptions == 0)
+		PG_RETURN_NULL();
+
+	rdopts = allocateReloptStruct(sizeof(MinmaxOptions), options, numoptions);
+
+	fillRelOptions((void *) rdopts, sizeof(MinmaxOptions), options, numoptions,
+				   validate, tab, lengthof(tab));
+
+	pfree(options);
+
+	PG_RETURN_BYTEA_P(rdopts);
+}
+
+/*
+ * Return an exclusively-locked buffer resulting from extending the relation.
+ */
+Buffer
+mm_getnewbuffer(Relation irel)
+{
+	Buffer	buffer;
+	bool	needLock = !RELATION_IS_LOCAL(irel);
+
+	/* FIXME need to request a MaxFSMRequestSize page from the FSM here */
+
+	if (needLock)
+		LockRelationForExtension(irel, ExclusiveLock);
+
+	buffer = ReadBuffer(irel, P_NEW);
+	LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+	MINMAX_elog(DEBUG2, "mm_getnewbuffer: extending to page %u",
+				BufferGetBlockNumber(buffer));
+
+	if (needLock)
+		UnlockRelationForExtension(irel, ExclusiveLock);
+
+	return buffer;
+}
+
+/*
+ * Initialize a page with the given type.
+ *
+ * Caller is responsible for marking it dirty, as appropriate.
+ */
+void
+mm_page_init(Page page, uint16 type)
+{
+	MinmaxSpecialSpace *special;
+
+	PageInit(page, BLCKSZ, sizeof(MinmaxSpecialSpace));
+
+	special = (MinmaxSpecialSpace *) PageGetSpecialPointer(page);
+	special->type = type;
+}
+
+/*
+ * Initialize a new minmax index' metapage.
+ */
+void
+mm_metapage_init(Page page, BlockNumber pagesPerRange, uint16 version)
+{
+	MinmaxMetaPageData	*metadata;
+	int			i;
+
+	mm_page_init(page, MINMAX_PAGETYPE_META);
+
+	metadata = (MinmaxMetaPageData *) PageGetContents(page);
+
+	metadata->minmaxMagic = MINMAX_META_MAGIC;
+	metadata->pagesPerRange = pagesPerRange;
+	metadata->minmaxVersion = version;
+	for (i = 0; i < MAX_REVMAP_ARRAYPAGES; i++)
+		metadata->revmapArrayPages[i] = InvalidBlockNumber;
+}
+
+/*
+ * Build a MinmaxDesc used to create or scan a minmax index
+ */
+static MinmaxDesc *
+minmax_build_mmdesc(Relation rel)
+{
+	MinmaxOpcInfo **opcinfo;
+	MinmaxDesc *mmdesc;
+	TupleDesc	tupdesc;
+	int			totalstored = 0;
+	int			keyno;
+	long		totalsize;
+	Datum		indclassDatum;
+	oidvector  *indclass;
+	bool		isnull;
+
+	tupdesc = RelationGetDescr(rel);
+
+	/*
+	 * Obtain MinmaxOpcInfo for each indexed column.  While at it, accumulate
+	 * the number of columns stored, since the number is opclass-defined.
+	 */
+	indclassDatum = SysCacheGetAttr(INDEXRELID, rel->rd_indextuple,
+									Anum_pg_index_indclass, &isnull);
+	Assert(!isnull);
+	indclass = (oidvector *) DatumGetPointer(indclassDatum);
+	opcinfo = (MinmaxOpcInfo **) palloc(sizeof(MinmaxOpcInfo *) * tupdesc->natts);
+	for (keyno = 0; keyno < tupdesc->natts; keyno++)
+	{
+		Oid		opfam = get_opclass_family(indclass->values[keyno]);
+		Oid		idxtypid = tupdesc->attrs[keyno]->atttypid;
+		FmgrInfo *opcInfoFn;
+
+		opcInfoFn = index_getprocinfo(rel, keyno + 1, MINMAX_PROCNUM_OPCINFO);
+
+		opcinfo[keyno] = (MinmaxOpcInfo *)
+			DatumGetPointer(FunctionCall2(opcInfoFn,
+										  ObjectIdGetDatum(opfam),
+										  ObjectIdGetDatum(idxtypid)));
+		totalstored += opcinfo[keyno]->oi_nstored;
+	}
+
+	/* Allocate our result struct and fill it in */
+	totalsize = offsetof(MinmaxDesc, md_info) +
+		sizeof(MinmaxOpcInfo *) * tupdesc->natts;
+
+	mmdesc = palloc(totalsize);
+	mmdesc->md_index = rel;
+	mmdesc->md_tupdesc = CreateTupleDescCopy(tupdesc);
+	mmdesc->md_disktdesc = NULL;	/* generated lazily */
+	mmdesc->md_totalstored = totalstored;
+
+	for (keyno = 0; keyno < tupdesc->natts; keyno++)
+		mmdesc->md_info[keyno] = opcinfo[keyno];
+
+	return mmdesc;
+}
+
+/*
+ * Initialize a MMBuildState appropriate to create tuples on the given index.
+ */
+static MMBuildState *
+initialize_mm_buildstate(Relation idxRel, mmRevmapAccess *rmAccess,
+						 BlockNumber pagesPerRange)
+{
+	MMBuildState *mmstate;
+
+	mmstate = palloc(sizeof(MMBuildState));
+
+	mmstate->irel = idxRel;
+	mmstate->numtuples = 0;
+	mmstate->currentInsertBuf = InvalidBuffer;
+	mmstate->pagesPerRange = pagesPerRange;
+	mmstate->currRangeStart = 0;
+	mmstate->rmAccess = rmAccess;
+	mmstate->mmDesc = minmax_build_mmdesc(idxRel);
+	mmstate->dtuple = minmax_new_dtuple(mmstate->mmDesc);
+
+	minmax_dtuple_initialize(mmstate->dtuple, mmstate->mmDesc);
+
+	return mmstate;
+}
+
+/*
+ * Remove index tuples that are no longer useful.
+ *
+ * While at it, return in nonsummed the array (and in numnonsummed its size) of
+ * block numbers for which the revmap returns InvalidTid; this is used in a
+ * later stage to execute re-summarization.  (Each block number returned
+ * corresponds to the heap page number with which each unsummarized range
+ * starts.)	Space for the array is palloc'ed, and must be freed by caller.
+ *
+ * idxRel is the index relation; heapNumBlocks is the size of the heap
+ * relation; strategy is appropriate for bulk scanning.
+ */
+static void
+remove_deletable_tuples(Relation idxRel, BlockNumber heapNumBlocks,
+						BufferAccessStrategy strategy,
+						BlockNumber **nonsummed, int *numnonsummed)
+{
+	HASHCTL		hctl;
+	HTAB	   *tuples;
+	HASH_SEQ_STATUS status;
+	BlockNumber nblocks;
+	BlockNumber blk;
+	mmRevmapAccess *rmAccess;
+	BlockNumber heapBlk;
+	BlockNumber	pagesPerRange;
+	int			numitems = 0;
+	int			numdeletable = 0;
+	ItemPointerData *deletable;
+	int			start;
+	int			i;
+	BlockNumber *nonsumm = NULL;
+	int			maxnonsumm = 0;
+	int			numnonsumm = 0;
+
+	typedef struct DeletableTuple
+	{
+		ItemPointerData tid;
+		bool		referenced;
+	} DeletableTuple;
+
+	nblocks = RelationGetNumberOfBlocks(idxRel);
+
+	/* Initialize hash used to track deletable tuples */
+	memset(&hctl, 0, sizeof(hctl));
+	hctl.keysize = sizeof(ItemPointerData);
+	hctl.entrysize = sizeof(DeletableTuple);
+	hctl.hcxt = CurrentMemoryContext;
+	hctl.hash = tag_hash;
+
+	/* assume ten entries per page.  No harm in getting this wrong */
+	tuples = hash_create("mmvacuumcleanup", nblocks * 10, &hctl,
+						 HASH_CONTEXT | HASH_FUNCTION | HASH_ELEM);
+
+	/*
+	 * Scan the index sequentially, entering each item into a hash table.
+	 * Initially, the items are marked as not referenced.
+	 */
+	for (blk = 0; blk < nblocks; blk++)
+	{
+		Buffer		buf;
+		Page		page;
+		OffsetNumber offno;
+		MinmaxSpecialSpace *special;
+
+		vacuum_delay_point();
+
+		buf = ReadBufferExtended(idxRel, MAIN_FORKNUM, blk, RBM_NORMAL,
+								 strategy);
+		page = BufferGetPage(buf);
+
+		/*
+		 * Verify the type of the page we got; if it's not a regular page,
+		 * ignore it.
+		 */
+		special = (MinmaxSpecialSpace *) PageGetSpecialPointer(page);
+		if (special->type != MINMAX_PAGETYPE_REGULAR)
+		{
+			ReleaseBuffer(buf);
+			continue;
+		}
+
+		/*
+		 * Enter each live tuple into the hash table
+		 */
+		LockBuffer(buf, BUFFER_LOCK_SHARE);
+		for (offno = 1; offno <= PageGetMaxOffsetNumber(page); offno++)
+		{
+			ItemPointerData tid;
+			ItemId		itemid;
+			bool		found;
+			DeletableTuple *hitem;
+
+			itemid = PageGetItemId(page, offno);
+			if (!ItemIdHasStorage(itemid))
+				continue;
+
+			ItemPointerSet(&tid, blk, offno);
+			hitem = (DeletableTuple *)
+				hash_search(tuples, &tid, HASH_ENTER, &found);
+			Assert(!found);
+			hitem->referenced = false;
+			numitems++;
+		}
+		UnlockReleaseBuffer(buf);
+	}
+
+	/*
+	 * Now scan the revmap, and determine which of these TIDs are still
+	 * referenced
+	 */
+	rmAccess = mmRevmapAccessInit(idxRel, &pagesPerRange);
+	for (heapBlk = 0; heapBlk < heapNumBlocks; heapBlk += pagesPerRange)
+	{
+		ItemPointerData itupptr;
+		DeletableTuple *hitem;
+		bool		found;
+
+		mmGetHeapBlockItemptr(rmAccess, heapBlk, &itupptr);
+
+		if (!ItemPointerIsValid(&itupptr))
+		{
+			/*
+			 * Ignore revmap entries set to invalid.  Before doing so, if the
+			 * heap page range is complete but not summarized, store its
+			 * initial page number in the unsummarized array, for later
+			 * summarization.
+			 */
+			if (heapBlk + pagesPerRange < heapNumBlocks)
+			{
+				if (maxnonsumm == 0)
+				{
+					Assert(!nonsumm);
+					maxnonsumm = 8;
+					nonsumm = palloc(sizeof(BlockNumber) * maxnonsumm);
+				}
+				else if (numnonsumm >= maxnonsumm)
+				{
+					maxnonsumm *= 2;
+					nonsumm = repalloc(nonsumm, sizeof(BlockNumber) * maxnonsumm);
+				}
+
+				nonsumm[numnonsumm++] = heapBlk;
+			}
+
+			continue;
+		}
+		else
+			UnlockTuple(idxRel, &itupptr, ShareLock);
+
+		hitem = (DeletableTuple *) hash_search(tuples,
+											   &itupptr,
+											   HASH_FIND,
+											   &found);
+		/*
+		 * If the item is not in the hash, it must have been inserted after the
+		 * index was scanned, and therefore we should leave things well alone.
+		 * (There might be a leftover entry, but it's okay because next vacuum
+		 * will remove it.)
+		 */
+		if (!found)
+			continue;
+
+		hitem->referenced = true;
+
+		/* discount items set as referenced */
+		numitems--;
+	}
+	Assert(numitems >= 0);
+
+	mmRevmapAccessTerminate(rmAccess);
+
+	/*
+	 * Now scan the hash, and keep track of the removable (i.e. not referenced,
+	 * not locked) tuples.
+	 */
+	deletable = palloc(sizeof(ItemPointerData) * numitems);
+
+	hash_freeze(tuples);
+	hash_seq_init(&status, tuples);
+	for (;;)
+	{
+		DeletableTuple *hitem;
+
+		hitem = hash_seq_search(&status);
+		if (!hitem)
+			break;
+		if (hitem->referenced)
+			continue;
+		if (!ConditionalLockTuple(idxRel, &hitem->tid, ExclusiveLock))
+			continue;
+
+		/*
+		 * By here, we know this tuple is not referenced from the revmap.
+		 * Also, since we hold the tuple lock, we know that if there is a
+		 * concurrent scan that had obtained the tuple before the reference
+		 * got removed, either that scan is not looking at the tuple (because
+		 * that would have prevented us from getting the tuple lock) or it is
+		 * holding the containing buffer's lock.  If the former, then there's
+		 * no problem with removing the tuple immediately; if the latter, we
+		 * will block below trying to acquire that lock, so by the time we are
+		 * unblocked, the concurrent scan will no longer be interested in the
+		 * tuple contents anymore.	Therefore, this tuple can be removed from
+		 * the block.
+		 */
+		UnlockTuple(idxRel, &hitem->tid, ExclusiveLock);
+
+		deletable[numdeletable++] = hitem->tid;
+	}
+
+	/*
+	 * Now sort the array of deletable index tuples, and walk this array by
+	 * pages doing bulk deletion of items on each page; the free space map is
+	 * updated for pages on which we delete item.
+	 */
+	qsort(deletable, numdeletable, sizeof(ItemPointerData),
+		  qsortCompareItemPointers);
+
+	for (start = 0, i = 0; i < numdeletable; i++)
+	{
+		/*
+		 * Are we at the end of the items that together belong in one
+		 * particular page?  If so, then it's deletion time.
+		 */
+		if (i == numdeletable - 1 ||
+			(ItemPointerGetBlockNumber(&deletable[start]) !=
+			 ItemPointerGetBlockNumber(&deletable[i + 1])))
+		{
+			OffsetNumber *offnos;
+			int			noffs;
+			Buffer		buf;
+			Page		page;
+			int			j;
+			BlockNumber	blk;
+			int			freespace;
+
+			vacuum_delay_point();
+
+			blk = ItemPointerGetBlockNumber(&deletable[start]);
+			buf = ReadBufferExtended(idxRel, MAIN_FORKNUM, blk,
+									 RBM_NORMAL, strategy);
+			LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+			page = BufferGetPage(buf);
+
+			noffs = i + 1 - start;
+			offnos = palloc(sizeof(OffsetNumber) * noffs);
+
+			for (j = 0; j < noffs; j++)
+				offnos[j] = ItemPointerGetOffsetNumber(&deletable[start + j]);
+
+			/*
+			 * Now defragment the page.
+			 */
+			START_CRIT_SECTION();
+
+			PageIndexDeleteNoCompact(page, offnos, noffs);
+			MarkBufferDirty(buf);
+
+			/* XLOG stuff */
+			if (RelationNeedsWAL(idxRel))
+			{
+				xl_minmax_bulkremove	xlrec;
+				XLogRecPtr	recptr;
+				XLogRecData	rdata[2];
+
+				xlrec.node = idxRel->rd_node;
+				xlrec.block = blk;
+				rdata[0].data = (char *) &xlrec;
+				rdata[0].len = SizeOfMinmaxBulkRemove;
+				rdata[0].buffer = InvalidBuffer;
+				rdata[0].buffer_std = false;
+				rdata[0].next = &(rdata[1]);
+
+				/*
+				 * The OffsetNumber array is not actually in the buffer, but we
+				 * pretend that it is.  When XLogInsert stores the whole
+				 * buffer, the offset array need not be stored too.
+				 */
+				rdata[1].data = (char *) offnos;
+				rdata[1].len = sizeof(OffsetNumber) * noffs;
+				rdata[1].buffer = buf;
+				rdata[1].buffer_std = true;
+				rdata[1].next = NULL;
+
+				recptr = XLogInsert(RM_MINMAX_ID, XLOG_MINMAX_BULKREMOVE,
+									rdata);
+
+				PageSetLSN(page, recptr);
+			}
+
+			END_CRIT_SECTION();
+
+			/* next iteration starts where this one ended */
+			start = i + 1;
+
+			/* remember free space while we have the buffer locked */
+			freespace = PageGetFreeSpace(page);
+
+			UnlockReleaseBuffer(buf);
+			pfree(offnos);
+
+			RecordPageWithFreeSpace(idxRel, blk, freespace);
+		}
+	}
+
+	pfree(deletable);
+
+	/* Finally, ensure the index' FSM is consistent */
+	FreeSpaceMapVacuum(idxRel);
+
+	*nonsummed = nonsumm;
+	*numnonsummed = numnonsumm;
+
+	hash_destroy(tuples);
+}
+
+/*
+ * Summarize the given page ranges of the given index.
+ */
+static void
+rerun_summarization(Relation idxRel, Relation heapRel,
+					mmRevmapAccess *rmAccess, BlockNumber pagesPerRange,
+					BlockNumber *nonsummarized, int numnonsummarized)
+{
+	int			i;
+	IndexInfo  *indexInfo;
+	MMBuildState *mmstate;
+
+	indexInfo = BuildIndexInfo(idxRel);
+
+	mmstate = initialize_mm_buildstate(idxRel, rmAccess, pagesPerRange);
+
+	for (i = 0; i < numnonsummarized; i++)
+	{
+		BlockNumber blk = nonsummarized[i];
+		ItemPointerData iptr;
+
+		mmstate->currRangeStart = blk;
+
+		mmGetHeapBlockItemptr(rmAccess, blk, &iptr);
+		/* it can't have been re-summarized concurrently .. */
+		Assert(!ItemPointerIsValid(&iptr));
+
+		/*
+		 * Execute the partial heap scan covering the heap blocks in the
+		 * specified page range, summarizing the heap tuples in it.  This scan
+		 * stops just short of mmbuildCallback creating the new index entry.
+		 */
+		IndexBuildHeapRangeScan(heapRel, idxRel, indexInfo, false,
+								blk, pagesPerRange,
+								mmbuildCallback, (void *) mmstate);
+
+		/*
+		 * Create the index tuple and insert it.  Note mmbuildCallback didn't
+		 * have the chance to actually insert anything into the index, because
+		 * the heapscan should have ended just as it reached the final tuple in
+		 * the range.
+		 */
+		form_and_insert_tuple(mmstate);
+
+		/* and re-initialize state for the next range */
+		minmax_dtuple_initialize(mmstate->dtuple, mmstate->mmDesc);
+	}
+
+	if (!BufferIsInvalid(mmstate->currentInsertBuf))
+	{
+		ReleaseBuffer(mmstate->currentInsertBuf);
+		mmstate->currentInsertBuf = InvalidBuffer;
+	}
+}
+
+/*
+ * Insert an index tuple into the index relation.  The revmap is updated to
+ * mark the range containing the given page as pointing to the inserted entry.
+ * A WAL record is written.
+ *
+ * The buffer, if valid, is checked for free space to insert the new entry;
+ * if there isn't enough, a new buffer is obtained and pinned.
+ *
+ * The buffer is marked dirty.
+ */
+static void
+mm_doinsert(Relation idxrel, mmRevmapAccess *rmAccess, Buffer *buffer,
+			BlockNumber heapblkno, MMTuple *tup, Size itemsz)
+{
+	Page		page;
+	BlockNumber blk;
+	OffsetNumber off;
+	bool		extended;
+
+	itemsz = MAXALIGN(itemsz);
+
+	/*
+	 * Obtain a locked buffer to insert the new tuple.  Note mm_getinsertbuffer
+	 * ensures there's enough space in the returned buffer and should have
+	 * thrown a user-facing error message if there's not, so it's a program
+	 * error if there isn't.
+	 */
+	extended = mm_getinsertbuffer(idxrel, buffer, itemsz);
+	page = BufferGetPage(*buffer);
+	if (PageGetFreeSpace(page) < itemsz)
+		elog(ERROR, "index row size %lu exceeds maximum for index \"%s\"",
+			 itemsz, RelationGetRelationName(idxrel));
+
+	START_CRIT_SECTION();
+	off = PageAddItem(page, (Item) tup, itemsz, InvalidOffsetNumber,
+					  false, false);
+	MarkBufferDirty(*buffer);
+
+	blk = BufferGetBlockNumber(*buffer);
+	MINMAX_elog(DEBUG2, "inserted tuple (%u,%u) for range starting at %u",
+				blk, off, heapblkno);
+
+	/* XLOG stuff */
+	if (RelationNeedsWAL(idxrel))
+	{
+		xl_minmax_insert	xlrec;
+		XLogRecPtr	recptr;
+		XLogRecData	rdata[2];
+		uint8		info = XLOG_MINMAX_INSERT;
+
+		xlrec.target.node = idxrel->rd_node;
+		ItemPointerSet(&xlrec.target.tid, blk, off);
+		xlrec.overwrite = false;
+		rdata[0].data = (char *) &xlrec;
+		rdata[0].len = SizeOfMinmaxInsert;
+		rdata[0].buffer = InvalidBuffer;
+		rdata[0].buffer_std = false;
+		rdata[0].next = &(rdata[1]);
+
+		rdata[1].data = (char *) tup;
+		rdata[1].len = itemsz;
+		rdata[1].buffer = *buffer;
+		rdata[1].buffer_std = true;
+		rdata[1].next = NULL;
+
+		/*
+		 * If this is the first tuple in the page, we can reinit the page
+		 * instead of restoring the whole thing.  Set flag, and hide buffer
+		 * references from XLogInsert.
+		 */
+		if (extended)
+		{
+			info |= XLOG_MINMAX_INIT_PAGE;
+			rdata[1].buffer = InvalidBuffer;
+		}
+
+		recptr = XLogInsert(RM_MINMAX_ID, info, rdata);
+
+		PageSetLSN(page, recptr);
+	}
+
+	END_CRIT_SECTION();
+
+	/*
+	 * Note we need to keep the lock on the buffer until after the revmap
+	 * has been updated.  Otherwise, a concurrent scanner could try to obtain
+	 * the index tuple from the revmap before we're done writing it.
+	 */
+	mmSetHeapBlockItemptr(rmAccess, heapblkno, blk, off);
+
+	LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+}
+
+/*
+ * Return a pinned and locked buffer which can be used to insert an index item
+ * of size itemsz.
+ *
+ * The passed buffer argument is tested for free space; if it has enough, it is
+ * locked and returned.  Otherwise, that buffer (if valid) is unpinned, a new
+ * buffer is obtained, and returned pinned and locked.
+ *
+ * If there's no existing page with enough free to accomodate the new item,
+ * the relation is extended.  This function returns true if this happens, false
+ * otherwise.
+ */
+static bool
+mm_getinsertbuffer(Relation irel, Buffer *buffer, Size itemsz)
+{
+	Buffer		buf;
+	Page		page;
+	bool		extended = false;
+
+gib_restart:
+	buf = *buffer;
+
+	if (BufferIsInvalid(buf) ||
+		(PageGetFreeSpace(BufferGetPage(buf)) < itemsz))
+	{
+		/*
+		 * By the time we break out of this loop, buf is a locked and pinned
+		 * buffer.  It was tested for free space, but in some cases only before
+		 * locking it, so a recheck is necessary because a concurrent inserter
+		 * might have put items in it.
+		 */
+		for (;;)
+		{
+			BlockNumber	blk;
+			int			freespace;
+
+			blk = GetPageWithFreeSpace(irel, itemsz);
+			if (blk == InvalidBlockNumber)
+			{
+				/*
+				 * There's not enough free space in any existing index page,
+				 * according to the FSM: extend the relation to obtain a shiny
+				 * new page.
+				 */
+				buf = mm_getnewbuffer(irel);
+				page = BufferGetPage(buf);
+				mm_page_init(page, MINMAX_PAGETYPE_REGULAR);
+
+				/*
+				 * If an entirely new page does not contain enough free space
+				 * for the new item, then surely that item is oversized.
+				 * Complain loudly; but first make sure we record the page as
+				 * free, for next time.
+				 */
+				freespace = PageGetFreeSpace(page);
+				RecordPageWithFreeSpace(irel, BufferGetBlockNumber(buf),
+										freespace);
+				if (freespace < itemsz)
+					ereport(ERROR,
+							(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+							 errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
+									(unsigned long) itemsz,
+									(unsigned long) freespace,
+									RelationGetRelationName(irel))));
+				extended = true;
+				break;
+			}
+
+			/*
+			 * We have a block number from FSM now.  Check that it has enough
+			 * free space, and break out to return it if it does; otherwise
+			 * start over.  Note that we allow for the FSM to be out of date
+			 * here, and in that case we update it and move on.
+			 */
+			Assert(blk != InvalidBlockNumber);
+			buf = ReadBuffer(irel, blk);
+			page = BufferGetPage(buf);
+			freespace = PageGetFreeSpace(page);
+			if (freespace >= itemsz)
+			{
+				LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+				break;
+			}
+
+			/* Not really enough space: register reality and start over */
+			ReleaseBuffer(buf);
+			RecordPageWithFreeSpace(irel, blk, freespace);
+		}
+
+		if (!BufferIsInvalid(*buffer))
+			ReleaseBuffer(*buffer);
+		*buffer = buf;
+	}
+	else
+		LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+
+	/*
+	 * Now recheck free space with exclusive lock held, and start over if it's
+	 * not enough.
+	 */
+	Assert(!BufferIsInvalid(*buffer));
+	page = BufferGetPage(*buffer);
+	if (PageGetFreeSpace(page) < itemsz)
+	{
+		UnlockReleaseBuffer(*buffer);
+		*buffer = InvalidBuffer;
+		goto gib_restart;
+	}
+
+	/*
+	 * XXX we could perhaps avoid this if we used RelationSetTargetBlock ...
+	 */
+	if (extended)
+		FreeSpaceMapVacuum(irel);
+
+	return extended;
+}
+
+/*
+ * Given a deformed tuple in the build state, convert it into the on-disk
+ * format and insert it into the index, making the revmap point to it.
+ */
+static void
+form_and_insert_tuple(MMBuildState *mmstate)
+{
+	MMTuple    *tup;
+	Size		size;
+
+	/* if this dtuple didn't see any heap tuple at all, don't insert it */
+	if (!mmstate->dtuple->dt_seentup)
+		return;
+
+	tup = minmax_form_tuple(mmstate->mmDesc, mmstate->dtuple, &size);
+	mm_doinsert(mmstate->irel, mmstate->rmAccess,
+				&mmstate->currentInsertBuf, mmstate->currRangeStart, tup,
+				size);
+	mmstate->numtuples++;
+	pfree(tup);
+}
+
+/*
+ * qsort comparator for ItemPointerData items
+ */
+static int
+qsortCompareItemPointers(const void *a, const void *b)
+{
+	return ItemPointerCompare((ItemPointer) a, (ItemPointer) b);
+}
diff --git a/src/backend/access/minmax/mmrevmap.c b/src/backend/access/minmax/mmrevmap.c
new file mode 100644
index 0000000..9c269c1
--- /dev/null
+++ b/src/backend/access/minmax/mmrevmap.c
@@ -0,0 +1,683 @@
+/*
+ * mmrevmap.c
+ *		Reverse range map for MinMax indexes
+ *
+ * The reverse range map (revmap) is a translation structure for minmax
+ * indexes: for each page range, there is one most-up-to-date summary tuple,
+ * and its location is tracked by the revmap.  Whenever a new tuple is inserted
+ * into a table that violates the previously recorded min/max values, a new
+ * tuple is inserted into the index and the revmap is updated to point to it.
+ *
+ * The pages of the revmap are interspersed in the index's main fork.  The
+ * first revmap page is always the index's page number one (that is,
+ * immediately after the metapage).  Subsequent revmap pages are allocated as
+ * they are needed; their locations are tracked by "array pages".  The metapage
+ * contains a large BlockNumber array, which correspond to array pages.  Thus,
+ * to find the second revmap page, we read the metapage and obtain the block
+ * number of the first array page; we then read that page, and the first
+ * element in it is the revmap page we're looking for.
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *	  src/backend/access/minmax/mmrevmap.c
+ */
+#include "postgres.h"
+
+#include "access/heapam_xlog.h"
+#include "access/minmax.h"
+#include "access/minmax_internal.h"
+#include "access/minmax_page.h"
+#include "access/minmax_revmap.h"
+#include "access/minmax_xlog.h"
+#include "access/rmgr.h"
+#include "miscadmin.h"
+#include "storage/bufmgr.h"
+#include "storage/lmgr.h"
+#include "storage/relfilenode.h"
+#include "storage/smgr.h"
+#include "utils/memutils.h"
+
+
+
+/*
+ * In regular revmap pages, each item stores an ItemPointerData.  These defines
+ * let one find the logical revmap page number and index number of the revmap
+ * item for the given heap block number.
+ */
+#define HEAPBLK_TO_REVMAP_BLK(pagesPerRange, heapBlk) \
+	((heapBlk / pagesPerRange) / REGULAR_REVMAP_PAGE_MAXITEMS)
+#define HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk) \
+	((heapBlk / pagesPerRange) % REGULAR_REVMAP_PAGE_MAXITEMS)
+
+/*
+ * In array revmap pages, each item stores a BlockNumber.  These defines let
+ * one find the page and index number of a given revmap block number.  Note
+ * that the first revmap page (revmap logical page number 0) is always stored
+ * in physical block number 1, so array pages do not store that one.
+ */
+#define MAPBLK_TO_RMARRAY_BLK(rmBlk)	((rmBlk - 1) / ARRAY_REVMAP_PAGE_MAXITEMS)
+#define MAPBLK_TO_RMARRAY_INDEX(rmBlk)	((rmBlk - 1) % ARRAY_REVMAP_PAGE_MAXITEMS)
+
+
+struct mmRevmapAccess
+{
+	Relation	idxrel;
+	BlockNumber pagesPerRange;
+	Buffer		metaBuf;
+	Buffer		currBuf;
+	Buffer		currArrayBuf;
+	BlockNumber *revmapArrayPages;
+};
+/* typedef appears in minmax_revmap.h */
+
+
+/*
+ * Initialize an access object for a reverse range map, which can be used to
+ * read stuff from it.	This must be freed by mmRevmapAccessTerminate when caller
+ * is done with it.
+ */
+mmRevmapAccess *
+mmRevmapAccessInit(Relation idxrel, BlockNumber *pagesPerRange)
+{
+	mmRevmapAccess *rmAccess;
+	Buffer		meta;
+	MinmaxMetaPageData *metadata;
+
+	meta = ReadBuffer(idxrel, MINMAX_METAPAGE_BLKNO);
+	metadata = (MinmaxMetaPageData *) PageGetContents(BufferGetPage(meta));
+
+	rmAccess = palloc(sizeof(mmRevmapAccess));
+	rmAccess->metaBuf = meta;
+	rmAccess->idxrel = idxrel;
+	rmAccess->pagesPerRange = metadata->pagesPerRange;
+	rmAccess->currBuf = InvalidBuffer;
+	rmAccess->currArrayBuf = InvalidBuffer;
+	rmAccess->revmapArrayPages = NULL;
+
+	if (pagesPerRange)
+		*pagesPerRange = metadata->pagesPerRange;
+
+	return rmAccess;
+}
+
+/*
+ * Release resources associated with a revmap access object.
+ */
+void
+mmRevmapAccessTerminate(mmRevmapAccess *rmAccess)
+{
+	if (rmAccess->revmapArrayPages != NULL)
+		pfree(rmAccess->revmapArrayPages);
+	if (rmAccess->metaBuf != InvalidBuffer)
+		ReleaseBuffer(rmAccess->metaBuf);
+	if (rmAccess->currBuf != InvalidBuffer)
+		ReleaseBuffer(rmAccess->currBuf);
+	if (rmAccess->currArrayBuf != InvalidBuffer)
+		ReleaseBuffer(rmAccess->currArrayBuf);
+	pfree(rmAccess);
+}
+
+/*
+ * In the given revmap page, which is used in a minmax index of pagesPerRange
+ * pages per range, set the element corresponding to heap block number heapBlk
+ * to the value (blkno, offno).
+ *
+ * Caller must have obtained the correct revmap page.
+ *
+ * This is used both in regular operation and during WAL replay.
+ */
+void
+rm_page_set_iptr(Page page, BlockNumber pagesPerRange, BlockNumber heapBlk,
+				 BlockNumber blkno, OffsetNumber offno)
+{
+	RevmapContents *contents;
+	ItemPointerData *iptr;
+
+	contents = (RevmapContents *) PageGetContents(page);
+	iptr = (ItemPointerData *) contents->rmr_tids;
+	iptr += HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk);
+
+	ItemPointerSet(iptr, blkno, offno);
+}
+
+/*
+ * Initialize a new regular revmap page, which stores the given revmap logical
+ * page number.  The newly allocated physical block number is returned.
+ *
+ * Used both by regular code path as well as during xlog replay.
+ */
+BlockNumber
+initialize_rmr_page(Buffer newbuf, BlockNumber mapBlk)
+{
+	BlockNumber	blkno;
+	Page		page;
+	RevmapContents *contents;
+
+	page = BufferGetPage(newbuf);
+
+	mm_page_init(page, MINMAX_PAGETYPE_REVMAP);
+	contents = (RevmapContents *) PageGetContents(page);
+	contents->rmr_logblk = mapBlk;
+	/* the rmr_tids array is initialized to all invalid by PageInit */
+
+	blkno = BufferGetBlockNumber(newbuf);
+
+	return blkno;
+}
+
+/*
+ * Lock the metapage as specified by called, and update the given rmAccess with
+ * the metapage data.  The metapage buffer is locked when this function
+ * returns; it's the caller's responsibility to unlock it.
+ */
+static void
+rmaccess_get_metapage(mmRevmapAccess *rmAccess, int lockmode)
+{
+	MinmaxMetaPageData *metadata;
+	MinmaxSpecialSpace *special PG_USED_FOR_ASSERTS_ONLY;
+	Page		metapage;
+
+	LockBuffer(rmAccess->metaBuf, lockmode);
+	metapage = BufferGetPage(rmAccess->metaBuf);
+
+#ifdef USE_ASSERT_CHECKING
+	/* ensure we really got the metapage */
+	special = (MinmaxSpecialSpace *) PageGetSpecialPointer(metapage);
+	Assert(special->type == MINMAX_PAGETYPE_META);
+#endif
+
+	/* first time through? allocate the array */
+	if (rmAccess->revmapArrayPages == NULL)
+		rmAccess->revmapArrayPages =
+			palloc(sizeof(BlockNumber) * MAX_REVMAP_ARRAYPAGES);
+
+	metadata = (MinmaxMetaPageData *) PageGetContents(metapage);
+	memcpy(rmAccess->revmapArrayPages, metadata->revmapArrayPages,
+		   sizeof(BlockNumber) * MAX_REVMAP_ARRAYPAGES);
+}
+
+/*
+ * Given a buffer (hopefully containing a blank page), set it up as a revmap
+ * array page.
+ *
+ * Used both by regular code path as well as during xlog replay.
+ */
+void
+initialize_rma_page(Buffer buf)
+{
+	Page	arrayPg;
+	RevmapArrayContents *contents;
+
+	arrayPg = BufferGetPage(buf);
+	mm_page_init(arrayPg, MINMAX_PAGETYPE_REVMAP_ARRAY);
+	contents = (RevmapArrayContents *) PageGetContents(arrayPg);
+	contents->rma_nblocks = 0;
+	/* set the whole array to InvalidBlockNumber */
+	memset(contents->rma_blocks, 0xFF,
+		   sizeof(BlockNumber) * ARRAY_REVMAP_PAGE_MAXITEMS);
+}
+
+/*
+ * Update the metapage, so that item arrayBlkIdx in the array of revmap array
+ * pages points to block number newPgBlkno.
+ */
+static void
+update_minmax_metapg(Relation idxrel, Buffer meta, uint32 arrayBlkIdx,
+					 BlockNumber newPgBlkno)
+{
+	MinmaxMetaPageData *metadata;
+
+	metadata = (MinmaxMetaPageData *) PageGetContents(BufferGetPage(meta));
+
+	START_CRIT_SECTION();
+	metadata->revmapArrayPages[arrayBlkIdx] = newPgBlkno;
+	MarkBufferDirty(meta);
+	if (RelationNeedsWAL(idxrel))
+	{
+		xl_minmax_metapg_set	xlrec;
+		XLogRecPtr	recptr;
+		XLogRecData	rdata;
+
+		xlrec.node = idxrel->rd_node;
+		xlrec.blkidx = arrayBlkIdx;
+		xlrec.newpg = newPgBlkno;
+
+		rdata.data = (char *) &xlrec;
+		rdata.len = SizeOfMinmaxMetapgSet;
+		rdata.buffer = InvalidBuffer;
+		rdata.buffer_std = false;
+		rdata.next = NULL;
+
+		recptr = XLogInsert(RM_MINMAX_ID, XLOG_MINMAX_METAPG_SET, &rdata);
+		PageSetLSN(BufferGetPage(meta), recptr);
+	}
+	END_CRIT_SECTION();
+}
+
+/*
+ * Given a logical revmap block number, find its physical block number.
+ *
+ * Note this might involve up to two buffer reads, including a possible
+ * update to the metapage.
+ *
+ * If extend is set to true, and the page hasn't been set yet, extend the
+ * array to point to a newly allocated page.
+ */
+static BlockNumber
+rm_get_phys_blkno(mmRevmapAccess *rmAccess, BlockNumber mapBlk, bool extend)
+{
+	int		arrayBlkIdx;
+	BlockNumber arrayBlk;
+	RevmapArrayContents *contents;
+	int		revmapIdx;
+	BlockNumber targetblk;
+
+	/* the first revmap page is always block number 1 */
+	if (mapBlk == 0)
+		return (BlockNumber) 1;
+
+	/*
+	 * For all other cases, take the long route of checking the metapage and
+	 * revmap array pages.
+	 */
+
+	/*
+	 * Copy the revmap array from the metapage into private storage, if not
+	 * done already in this scan.
+	 */
+	if (rmAccess->revmapArrayPages == NULL)
+	{
+		rmaccess_get_metapage(rmAccess, BUFFER_LOCK_SHARE);
+		LockBuffer(rmAccess->metaBuf, BUFFER_LOCK_UNLOCK);
+	}
+
+	/*
+	 * Consult the metapage array; if the array page we need is not set there,
+	 * we need to extend the index to allocate the array page, and update the
+	 * metapage array.
+	 */
+	arrayBlkIdx = MAPBLK_TO_RMARRAY_BLK(mapBlk);
+	if (arrayBlkIdx > MAX_REVMAP_ARRAYPAGES)
+		elog(ERROR, "non-existant revmap array page requested");
+
+	arrayBlk = rmAccess->revmapArrayPages[arrayBlkIdx];
+	if (arrayBlk == InvalidBlockNumber)
+	{
+		/* if not asked to extend, there's no further work to do here */
+		if (!extend)
+			return InvalidBlockNumber;
+
+		/*
+		 * If we need to create a new array page, check the metapage again;
+		 * someone might have created it after the last time we read the
+		 * metapage.  This time we acquire an exclusive lock, since we may need
+		 * to extend.  Lock before doing the physical relation extension, to
+		 * avoid leaving an unused page around in case someone does this
+		 * concurrently.  Note that, unfortunately, we will be keeping the lock
+		 * on the metapage alongside the relation extension lock, while doing a
+		 * syscall involving disk I/O.  Extending to add a new revmap array page
+		 * is fairly infrequent, so it shouldn't be too bad.
+		 *
+		 * XXX it is possible to extend the relation unconditionally before
+		 * locking the metapage, and later if we find that someone else had
+		 * already added this page, save the page in FSM as MaxFSMRequestSize.
+		 * That would be better for concurrency.  Explore someday.
+		 */
+		rmaccess_get_metapage(rmAccess, BUFFER_LOCK_EXCLUSIVE);
+
+		if (rmAccess->revmapArrayPages[arrayBlkIdx] == InvalidBlockNumber)
+		{
+			BlockNumber	newPgBlkno;
+
+			/*
+			 * Ok, definitely need to allocate a new revmap array page;
+			 * initialize a new page to the initial (empty) array revmap state
+			 * and register it in metapage.
+			 */
+			rmAccess->currArrayBuf = mm_getnewbuffer(rmAccess->idxrel);
+			START_CRIT_SECTION();
+			initialize_rma_page(rmAccess->currArrayBuf);
+			MarkBufferDirty(rmAccess->currArrayBuf);
+			if (RelationNeedsWAL(rmAccess->idxrel))
+			{
+				xl_minmax_init_rmpg	xlrec;
+				XLogRecPtr		recptr;
+				XLogRecData		rdata;
+
+				xlrec.node = rmAccess->idxrel->rd_node;
+				xlrec.blkno = BufferGetBlockNumber(rmAccess->currArrayBuf);
+				xlrec.array = true;
+				xlrec.logblk = InvalidBlockNumber;
+
+				rdata.data = (char *) &xlrec;
+				rdata.len = SizeOfMinmaxInitRmpg;
+				rdata.buffer = InvalidBuffer;	/* FIXME */
+				rdata.buffer_std = false;
+				rdata.next = NULL;
+
+				recptr = XLogInsert(RM_MINMAX_ID, XLOG_MINMAX_INIT_RMPG, &rdata);
+				PageSetLSN(BufferGetPage(rmAccess->currArrayBuf), recptr);
+			}
+			END_CRIT_SECTION();
+			LockBuffer(rmAccess->currArrayBuf, BUFFER_LOCK_UNLOCK);
+			newPgBlkno = BufferGetBlockNumber(rmAccess->currArrayBuf);
+			rmAccess->revmapArrayPages[arrayBlkIdx] = newPgBlkno;
+
+			MINMAX_elog(DEBUG2, "allocated block for revmap array page: %u",
+				 BufferGetBlockNumber(rmAccess->currArrayBuf));
+
+			/* Update the metapage to point to the new array page. */
+			update_minmax_metapg(rmAccess->idxrel, rmAccess->metaBuf, arrayBlkIdx,
+								 newPgBlkno);
+		}
+
+		LockBuffer(rmAccess->metaBuf, BUFFER_LOCK_UNLOCK);
+		arrayBlk = rmAccess->revmapArrayPages[arrayBlkIdx];
+	}
+
+	/*
+	 * By here, we know the array page is set in the metapage array.  Read that
+	 * page; except that if we just allocated it, or we already hold pin on it,
+	 * we don't need to read it again.  XXX but we didn't hold lock!
+	 */
+	Assert(arrayBlk != InvalidBlockNumber);
+
+	if (rmAccess->currArrayBuf == InvalidBuffer ||
+		BufferGetBlockNumber(rmAccess->currArrayBuf) != arrayBlk)
+	{
+		if (rmAccess->currArrayBuf != InvalidBuffer)
+			ReleaseBuffer(rmAccess->currArrayBuf);
+
+		rmAccess->currArrayBuf =
+			ReadBuffer(rmAccess->idxrel, arrayBlk);
+	}
+
+	LockBuffer(rmAccess->currArrayBuf, BUFFER_LOCK_SHARE);
+
+	/*
+	 * And now we can inspect its contents; if the target page is set, we can
+	 * just return.  Even if not set, we can also return if caller asked us not
+	 * to extend the revmap.
+	 */
+	contents = (RevmapArrayContents *)
+		PageGetContents(BufferGetPage(rmAccess->currArrayBuf));
+	revmapIdx = MAPBLK_TO_RMARRAY_INDEX(mapBlk);
+	if (!extend || revmapIdx <= contents->rma_nblocks - 1)
+	{
+		LockBuffer(rmAccess->currArrayBuf, BUFFER_LOCK_UNLOCK);
+
+		return contents->rma_blocks[revmapIdx];
+	}
+
+	/*
+	 * Trade our shared lock in the array page for exclusive, because we now
+	 * need to allocate one more revmap page and modify the array page.
+	 */
+	LockBuffer(rmAccess->currArrayBuf, BUFFER_LOCK_UNLOCK);
+	LockBuffer(rmAccess->currArrayBuf, BUFFER_LOCK_EXCLUSIVE);
+
+	contents = (RevmapArrayContents *)
+		PageGetContents(BufferGetPage(rmAccess->currArrayBuf));
+
+	/*
+	 * If someone else already set the value while we were waiting for the
+	 * exclusive lock, we're done; otherwise, allocate a new block as the
+	 * new revmap page, and update the array page to point to it.
+	 *
+	 * FIXME -- what if we were asked not to extend?
+	 */
+	if (contents->rma_blocks[revmapIdx] != InvalidBlockNumber)
+	{
+		targetblk = contents->rma_blocks[revmapIdx];
+	}
+	else
+	{
+		Buffer		newbuf;
+
+		newbuf = mm_getnewbuffer(rmAccess->idxrel);
+		START_CRIT_SECTION();
+		targetblk = initialize_rmr_page(newbuf, mapBlk);
+		MarkBufferDirty(newbuf);
+		if (RelationNeedsWAL(rmAccess->idxrel))
+		{
+			xl_minmax_init_rmpg	xlrec;
+			XLogRecPtr	recptr;
+			XLogRecData	rdata;
+
+			xlrec.node = rmAccess->idxrel->rd_node;
+			xlrec.blkno = BufferGetBlockNumber(newbuf);
+			xlrec.array = false;
+			xlrec.logblk = mapBlk;
+
+			rdata.data = (char *) &xlrec;
+			rdata.len = SizeOfMinmaxInitRmpg;
+			rdata.buffer = InvalidBuffer;
+			rdata.buffer_std = false;
+			rdata.next = NULL;
+
+			recptr = XLogInsert(RM_MINMAX_ID, XLOG_MINMAX_INIT_RMPG, &rdata);
+			PageSetLSN(BufferGetPage(newbuf), recptr);
+		}
+		END_CRIT_SECTION();
+
+		UnlockReleaseBuffer(newbuf);
+
+		/*
+		 * Modify the revmap array page to point to the newly allocated revmap
+		 * page.
+		 */
+		START_CRIT_SECTION();
+
+		contents->rma_blocks[revmapIdx] = targetblk;
+		/*
+		 * XXX this rma_nblocks assignment should probably be conditional on the
+		 * current rma_blocks value.
+		 */
+		contents->rma_nblocks = revmapIdx + 1;
+		MarkBufferDirty(rmAccess->currArrayBuf);
+
+		/* XLOG stuff */
+		if (RelationNeedsWAL(rmAccess->idxrel))
+		{
+			xl_minmax_rmarray_set	xlrec;
+			XLogRecPtr		recptr;
+			XLogRecData		rdata[2];
+			uint8			info;
+
+			info = XLOG_MINMAX_RMARRAY_SET;
+
+			xlrec.node = rmAccess->idxrel->rd_node;
+			xlrec.rmarray = BufferGetBlockNumber(rmAccess->currArrayBuf);
+			xlrec.blkidx = revmapIdx;
+			xlrec.newpg = targetblk;
+
+			rdata[0].data = (char *) &xlrec;
+			rdata[0].len = SizeOfMinmaxRmarraySet;
+			rdata[0].buffer = InvalidBuffer;
+			rdata[0].buffer_std = false;
+			rdata[0].next = &rdata[1];
+
+			rdata[1].data = NULL;
+			rdata[1].len = 0;
+			rdata[1].buffer = rmAccess->currArrayBuf;
+			rdata[1].buffer_std = false;
+			rdata[1].next = NULL;
+
+			recptr = XLogInsert(RM_MINMAX_ID, info, rdata);
+			PageSetLSN(BufferGetPage(rmAccess->currArrayBuf), recptr);
+		}
+
+		END_CRIT_SECTION();
+	}
+
+	LockBuffer(rmAccess->currArrayBuf, BUFFER_LOCK_UNLOCK);
+
+	return targetblk;
+}
+
+/*
+ * Set the TID of the index entry corresponding to the range that includes
+ * the given heap page to the given item pointer.
+ *
+ * The map is extended, if necessary.
+ */
+void
+mmSetHeapBlockItemptr(mmRevmapAccess *rmAccess, BlockNumber heapBlk,
+					  BlockNumber blkno, OffsetNumber offno)
+{
+	BlockNumber mapBlk;
+	bool		extend = false;
+
+	mapBlk = HEAPBLK_TO_REVMAP_BLK(rmAccess->pagesPerRange, heapBlk);
+
+	/* Translate the map block number to physical location */
+	mapBlk = rm_get_phys_blkno(rmAccess, mapBlk, true);
+
+	MINMAX_elog(DEBUG2, "setting %u/%u in logical page %lu (physical %u) for heap %u",
+				blkno, offno,
+				HEAPBLK_TO_REVMAP_BLK(rmAccess->pagesPerRange, heapBlk),
+				mapBlk, heapBlk);
+
+	/*
+	 * Obtain the buffer from which we need to read.  If we already have the
+	 * correct buffer in our access struct, use that; otherwise, release that,
+	 * (if valid) and read the one we need.
+	 */
+	if (rmAccess->currBuf == InvalidBuffer ||
+		mapBlk != BufferGetBlockNumber(rmAccess->currBuf))
+	{
+		if (rmAccess->currBuf != InvalidBuffer)
+			ReleaseBuffer(rmAccess->currBuf);
+
+		Assert(mapBlk != InvalidBlockNumber);
+		rmAccess->currBuf = ReadBuffer(rmAccess->idxrel, mapBlk);
+	}
+
+	LockBuffer(rmAccess->currBuf, BUFFER_LOCK_EXCLUSIVE);
+	START_CRIT_SECTION();
+
+	rm_page_set_iptr(BufferGetPage(rmAccess->currBuf),
+					 rmAccess->pagesPerRange,
+					 heapBlk,
+					 blkno, offno);
+
+	MarkBufferDirty(rmAccess->currBuf);
+
+	/* XLOG stuff */
+	if (RelationNeedsWAL(rmAccess->idxrel))
+	{
+		xl_minmax_rm_set	xlrec;
+		XLogRecPtr	recptr;
+		XLogRecData	rdata[2];
+		uint8		info;
+
+		info = XLOG_MINMAX_REVMAP_SET;
+
+		xlrec.node = rmAccess->idxrel->rd_node;
+		xlrec.mapBlock = mapBlk;
+		xlrec.pagesPerRange = rmAccess->pagesPerRange;
+		xlrec.heapBlock = heapBlk;
+		ItemPointerSet(&(xlrec.newval), blkno, offno);
+
+		rdata[0].data = (char *) &xlrec;
+		rdata[0].len = SizeOfMinmaxRevmapSet;
+		rdata[0].buffer = InvalidBuffer;
+		rdata[0].buffer_std = false;
+		rdata[0].next = &(rdata[1]);
+
+		rdata[1].data = NULL;
+		rdata[1].len = 0;
+		rdata[1].buffer = rmAccess->currBuf;
+		rdata[1].buffer_std = false;
+		rdata[1].next = NULL;
+
+		if (extend)
+		{
+			info |= XLOG_MINMAX_INIT_PAGE;
+			/* If the page is new, there's no need for a full page image */
+			rdata[0].next = NULL;
+		}
+
+		recptr = XLogInsert(RM_MINMAX_ID, info, rdata);
+		PageSetLSN(BufferGetPage(rmAccess->currBuf), recptr);
+	}
+
+	END_CRIT_SECTION();
+
+	LockBuffer(rmAccess->currBuf, BUFFER_LOCK_UNLOCK);
+}
+
+
+/*
+ * Return the TID of the index entry corresponding to the range that includes
+ * the given heap page.  If the TID is valid, the tuple is locked with
+ * LockTuple.  It is the caller's responsibility to release that lock.
+ */
+void
+mmGetHeapBlockItemptr(mmRevmapAccess *rmAccess, BlockNumber heapBlk,
+					  ItemPointerData *out)
+{
+	BlockNumber mapBlk;
+	RevmapContents *contents;
+	ItemPointerData *iptr;
+
+	mapBlk = HEAPBLK_TO_REVMAP_BLK(rmAccess->pagesPerRange, heapBlk);
+	/* Translate the map block number to physical location */
+	mapBlk = rm_get_phys_blkno(rmAccess, mapBlk, false);
+	if (mapBlk == InvalidBlockNumber)
+	{
+		ItemPointerSetInvalid(out);
+		return;
+	}
+
+	if (rmAccess->currBuf == InvalidBuffer ||
+		BufferGetBlockNumber(rmAccess->currBuf) != mapBlk)
+	{
+		if (rmAccess->currBuf != InvalidBuffer)
+			ReleaseBuffer(rmAccess->currBuf);
+
+		Assert(mapBlk != InvalidBlockNumber);
+		rmAccess->currBuf = ReadBuffer(rmAccess->idxrel, mapBlk);
+	}
+
+	LockBuffer(rmAccess->currBuf, BUFFER_LOCK_SHARE);
+
+	contents = (RevmapContents *)
+		PageGetContents(BufferGetPage(rmAccess->currBuf));
+	iptr = contents->rmr_tids;
+	iptr += HEAPBLK_TO_REVMAP_INDEX(rmAccess->pagesPerRange, heapBlk);
+
+	ItemPointerCopy(iptr, out);
+
+	if (ItemPointerIsValid(iptr))
+		LockTuple(rmAccess->idxrel, iptr, ShareLock);
+
+	LockBuffer(rmAccess->currBuf, BUFFER_LOCK_UNLOCK);
+}
+
+/*
+ * Initialize the revmap of a new minmax index.
+ *
+ * NB -- caller is assumed to WAL-log this operation
+ */
+void
+mmRevmapCreate(Relation idxrel)
+{
+	Buffer		buf;
+
+	/*
+	 * The first page of the revmap is always stored in block number 1 of the
+	 * main fork.  Because of this, the only thing we need to do is request
+	 * a new page; we assume we are called immediately after the metapage has
+	 * been initialized.
+	 */
+	buf = mm_getnewbuffer(idxrel);
+	Assert(BufferGetBlockNumber(buf) == 1);
+
+	mm_page_init(BufferGetPage(buf), MINMAX_PAGETYPE_REVMAP);
+	MarkBufferDirty(buf);
+
+	UnlockReleaseBuffer(buf);
+}
diff --git a/src/backend/access/minmax/mmsortable.c b/src/backend/access/minmax/mmsortable.c
new file mode 100644
index 0000000..17095af
--- /dev/null
+++ b/src/backend/access/minmax/mmsortable.c
@@ -0,0 +1,265 @@
+/*
+ * minmax_sortable.c
+ *		Implementation of Minmax indexes for sortable datatypes
+ *		(that is, anything with a btree opclass)
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *	  src/backend/access/minmax/mmsortable.c
+ */
+#include "postgres.h"
+
+#include "access/genam.h"
+#include "access/minmax_internal.h"
+#include "access/minmax_tuple.h"
+#include "access/skey.h"
+#include "utils/datum.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+
+
+/*
+ * Procedure numbers must not collide with MINMAX_PROCNUM defines in
+ * minmax_internal.h.  Note we only need inequality functions.
+ */
+#define		SORTABLE_NUM_PROCNUMS	4	/* # support procs we need */
+#define		PROCNUM_LESS			4
+#define		PROCNUM_LESSEQUAL		5
+#define		PROCNUM_GREATER			6
+#define		PROCNUM_GREATEREQUAL	7
+
+/* subtract this from procnum to obtain index in SortableOpaque arrays */
+#define		PROCNUM_BASE			4
+
+static FmgrInfo *mmsrt_get_procinfo(MinmaxDesc *mmdesc, uint16 attno,
+				   uint16 procnum);
+
+PG_FUNCTION_INFO_V1(mmSortableOpcInfo);
+PG_FUNCTION_INFO_V1(mmSortableAddValue);
+PG_FUNCTION_INFO_V1(mmSortableConsistent);
+
+Datum mmSortableOpcInfo(PG_FUNCTION_ARGS);
+Datum mmSortableAddValue(PG_FUNCTION_ARGS);
+Datum mmSortableConsistent(PG_FUNCTION_ARGS);
+
+typedef struct SortableOpaque
+{
+	FmgrInfo	operators[SORTABLE_NUM_PROCNUMS];
+	bool		inited[SORTABLE_NUM_PROCNUMS];
+} SortableOpaque;
+
+/*
+ * Return the number and OIDs of (the functions that underlie) operators we
+ * need to build a minmax index, as a pointer to a newly palloc'ed MinmaxOpers.
+ */
+Datum
+mmSortableOpcInfo(PG_FUNCTION_ARGS)
+{
+	SortableOpaque *opaque;
+	MinmaxOpcInfo *result;
+
+	opaque = palloc0(sizeof(SortableOpaque));
+	/*
+	 * 'operators' is initialized lazily, as indicated by 'inited' which was
+	 * initialized to all false by palloc0.
+	 */
+
+	result = palloc(sizeof(MinmaxOpcInfo));
+	result->oi_nstored = 2;		/* min, max */
+	result->oi_opaque = opaque;
+
+	PG_RETURN_POINTER(result);
+}
+
+/*
+ * Examine the given index tuple (which contains partial status of a certain
+ * page range) by comparing it to the given value that comes from another heap
+ * tuple.  If the new value is outside the domain specified by the existing
+ * tuple values, update the index range and return true.  Otherwise, return
+ * false and do not modify in this case.
+ */
+Datum
+mmSortableAddValue(PG_FUNCTION_ARGS)
+{
+	MinmaxDesc	   *mmdesc = (MinmaxDesc *) PG_GETARG_POINTER(0);
+	DeformedMMTuple *dtuple = (DeformedMMTuple *) PG_GETARG_POINTER(1);
+	AttrNumber		attno = PG_GETARG_UINT16(2);
+	Datum			newval = PG_GETARG_DATUM(3);
+	bool			isnull = PG_GETARG_DATUM(4);
+	Oid				colloid = PG_GET_COLLATION();
+	FmgrInfo	   *cmpFn;
+	Datum			compar;
+	bool			updated = false;
+
+	/*
+	 * If the new value is null, we record that we saw it if it's the first
+	 * one; otherwise, there's nothing to do.
+	 */
+	if (isnull)
+	{
+		if (dtuple->dt_columns[attno - 1].hasnulls)
+			PG_RETURN_BOOL(false);
+
+		dtuple->dt_columns[attno - 1].hasnulls = true;
+		PG_RETURN_BOOL(true);
+	}
+
+	/*
+	 * If the recorded value is null, store the new value (which we know to be
+	 * not null) as both minimum and maximum, and we're done.
+	 */
+	if (dtuple->dt_columns[attno - 1].allnulls)
+	{
+		dtuple->dt_columns[attno - 1].values[0] =
+			datumCopy(newval, mmdesc->md_tupdesc->attrs[attno - 1]->attbyval,
+					  mmdesc->md_tupdesc->attrs[attno - 1]->attlen);
+		dtuple->dt_columns[attno - 1].values[1] =
+			datumCopy(newval, mmdesc->md_tupdesc->attrs[attno - 1]->attbyval,
+					  mmdesc->md_tupdesc->attrs[attno - 1]->attlen);
+		dtuple->dt_columns[attno - 1].allnulls = false;
+		PG_RETURN_BOOL(true);
+	}
+
+	/*
+	 * Otherwise, need to compare the new value with the existing boundaries
+	 * and update them accordingly.  First check if it's less than the existing
+	 * minimum.
+	 */
+	cmpFn = mmsrt_get_procinfo(mmdesc, attno, PROCNUM_LESS);
+	compar = FunctionCall2Coll(cmpFn, colloid, newval,
+							   dtuple->dt_columns[attno - 1].values[0]);
+	if (DatumGetBool(compar))
+	{
+		dtuple->dt_columns[attno - 1].values[0] =
+			datumCopy(newval, mmdesc->md_tupdesc->attrs[attno - 1]->attbyval,
+					  mmdesc->md_tupdesc->attrs[attno - 1]->attlen);
+		updated = true;
+	}
+
+	/*
+	 * And now compare it to the existing maximum.
+	 */
+	cmpFn = mmsrt_get_procinfo(mmdesc, attno, PROCNUM_GREATER);
+	compar = FunctionCall2Coll(cmpFn, colloid, newval,
+							   dtuple->dt_columns[attno - 1].values[1]);
+	if (DatumGetBool(compar))
+	{
+		dtuple->dt_columns[attno - 1].values[1] =
+			datumCopy(newval, mmdesc->md_tupdesc->attrs[attno - 1]->attbyval,
+					  mmdesc->md_tupdesc->attrs[attno - 1]->attlen);
+		updated = true;
+	}
+
+	PG_RETURN_BOOL(updated);
+}
+
+/*
+ * Given an index tuple corresponding to a certain page range, and a scan key
+ * (represented by its index attribute number, the value and an operator
+ * strategy number), return whether the scan key is consistent with the page
+ * range.  Return true if so, false otherwise.
+ *
+ * XXX what do we need to do with NULL values here?
+ *
+ * XXX would it be better to pass the ScanKey as a whole rather than parts of
+ * it?
+ */
+Datum
+mmSortableConsistent(PG_FUNCTION_ARGS)
+{
+	MinmaxDesc *mmdesc = (MinmaxDesc *) PG_GETARG_POINTER(0);
+	DeformedMMTuple *dtup = (DeformedMMTuple *) PG_GETARG_POINTER(1);
+	AttrNumber	attno = PG_GETARG_INT16(2);
+	StrategyNumber strat = PG_GETARG_UINT16(3);
+	Datum		value = PG_GETARG_DATUM(4);
+	Datum		matches;
+	Oid			colloid = PG_GET_COLLATION();
+
+	switch (strat)
+	{
+		case BTLessStrategyNumber:
+			matches = FunctionCall2Coll(mmsrt_get_procinfo(mmdesc, attno,
+														   PROCNUM_LESS),
+										colloid,
+										dtup->dt_columns[attno - 1].values[0],
+										value);
+			break;
+		case BTLessEqualStrategyNumber:
+			matches = FunctionCall2Coll(mmsrt_get_procinfo(mmdesc, attno,
+														   PROCNUM_LESSEQUAL),
+										colloid,
+										dtup->dt_columns[attno - 1].values[0],
+										value);
+			break;
+		case BTEqualStrategyNumber:
+
+			/*
+			 * In the equality case (WHERE col = someval), we want to return
+			 * the current page range if the minimum value in the range <= scan
+			 * key, and the maximum value >= scan key.
+			 */
+			matches = FunctionCall2Coll(mmsrt_get_procinfo(mmdesc, attno,
+														   PROCNUM_LESSEQUAL),
+										colloid,
+										dtup->dt_columns[attno - 1].values[0],
+										value);
+			if (!DatumGetBool(matches))
+				break;
+			/* max() >= scankey */
+			matches = FunctionCall2Coll(mmsrt_get_procinfo(mmdesc, attno,
+														   PROCNUM_GREATEREQUAL),
+										colloid,
+										dtup->dt_columns[attno - 1].values[1],
+										value);
+			break;
+		case BTGreaterEqualStrategyNumber:
+			matches = FunctionCall2Coll(mmsrt_get_procinfo(mmdesc, attno,
+														   PROCNUM_GREATEREQUAL),
+										colloid,
+										dtup->dt_columns[attno - 1].values[1],
+										value);
+			break;
+		case BTGreaterStrategyNumber:
+			matches = FunctionCall2Coll(mmsrt_get_procinfo(mmdesc, attno,
+														   PROCNUM_GREATER),
+										colloid,
+										dtup->dt_columns[attno - 1].values[1],
+										value);
+			break;
+		default:
+			/* shouldn't happen */
+			elog(ERROR, "invalid strategy number %d", strat);
+			matches = 0;
+			break;
+	}
+
+	PG_RETURN_DATUM(matches);
+}
+
+/*
+ * Return the procedure corresponding to the given function support number.
+ */
+static FmgrInfo *
+mmsrt_get_procinfo(MinmaxDesc *mmdesc, uint16 attno, uint16 procnum)
+{
+	SortableOpaque *opaque;
+	uint16	basenum = procnum - PROCNUM_BASE;
+
+	opaque = (SortableOpaque *) mmdesc->md_info[attno - 1]->oi_opaque;
+
+	/*
+	 * We cache these in the opaque struct, to avoid repetitive syscache
+	 * lookups.
+	 */
+	if (!opaque->inited[basenum])
+	{
+		fmgr_info_copy(&opaque->operators[basenum],
+					   index_getprocinfo(mmdesc->md_index, attno, procnum),
+					   CurrentMemoryContext);
+		opaque->inited[basenum] = true;
+	}
+
+	return &opaque->operators[basenum];
+}
diff --git a/src/backend/access/minmax/mmtuple.c b/src/backend/access/minmax/mmtuple.c
new file mode 100644
index 0000000..445f558
--- /dev/null
+++ b/src/backend/access/minmax/mmtuple.c
@@ -0,0 +1,454 @@
+/*
+ * MinMax-specific tuples
+ *		Method implementations for tuples in minmax indexes.
+ *
+ * Intended usage is that code outside this file only deals with
+ * DeformedMMTuples, and convert to and from the on-disk representation through
+ * functions in this file.
+ *
+ * NOTES
+ *
+ * A minmax tuple is similar to a heap tuple, with a few key differences.  The
+ * first interesting difference is that the tuple header is much simpler, only
+ * containing its total length and a small area for flags.	Also, the stored
+ * data does not match the relation tuple descriptor exactly: for each
+ * attribute in the descriptor, the index tuple carries an arbitrary number
+ * of values, depending on the opclass.
+ *
+ * Also, for each column of the index relation there are two null bits: one
+ * (hasnulls) stores whether any tuple within the page range has that column
+ * set to null; the other one (allnulls) stores whether the column values are
+ * all null.  If allnulls is true, then the tuple data area does not contain
+ * values for that column at all; whereas it does if the hasnulls is set.
+ * Note the size of the null bitmask may not be the same as that of the
+ * datum array.
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *	  src/backend/access/minmax/mmtuple.c
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "access/minmax_tuple.h"
+#include "access/tupdesc.h"
+#include "access/tupmacs.h"
+
+
+static inline void mm_deconstruct_tuple(MinmaxDesc *mmdesc,
+					 char *tp, bits8 *nullbits, bool nulls,
+					 Datum *values, bool *allnulls, bool *hasnulls);
+
+
+/*
+ * Return a tuple descriptor used for on-disk storage of minmax tuples.
+ */
+static TupleDesc
+mmtuple_disk_tupdesc(MinmaxDesc *mmdesc)
+{
+	/* We cache these in the MinmaxDesc */
+	if (mmdesc->md_disktdesc == NULL)
+	{
+		int			i;
+		int			j;
+		AttrNumber	attno = 1;
+		TupleDesc	tupdesc;
+
+		tupdesc = CreateTemplateTupleDesc(mmdesc->md_totalstored, false);
+
+		for (i = 0; i < mmdesc->md_tupdesc->natts; i++)
+		{
+			for (j = 0; j < mmdesc->md_info[i]->oi_nstored; j++)
+				TupleDescInitEntry(tupdesc, attno++, NULL,
+								   mmdesc->md_tupdesc->attrs[i]->atttypid,
+								   mmdesc->md_tupdesc->attrs[i]->atttypmod,
+								   0);
+		}
+
+		mmdesc->md_disktdesc = tupdesc;
+	}
+
+	return mmdesc->md_disktdesc;
+}
+
+/*
+ * Generate a new on-disk tuple to be inserted in a minmax index.
+ */
+MMTuple *
+minmax_form_tuple(MinmaxDesc *mmdesc, DeformedMMTuple *tuple, Size *size)
+{
+	Datum	   *values;
+	bool	   *nulls;
+	bool		anynulls = false;
+	MMTuple    *rettuple;
+	int			keyno;
+	int			idxattno;
+	uint16		phony_infomask;
+	bits8	   *phony_nullbitmap;
+	Size		len,
+				hoff,
+				data_len;
+
+	Assert(mmdesc->md_totalstored > 0);
+
+	values = palloc(sizeof(Datum) * mmdesc->md_totalstored);
+	nulls = palloc0(sizeof(bool) * mmdesc->md_totalstored);
+	phony_nullbitmap = palloc(sizeof(bits8) * BITMAPLEN(mmdesc->md_totalstored));
+
+	/*
+	 * Set up the values/nulls arrays for heap_fill_tuple
+	 */
+	for (idxattno = 0, keyno = 0; keyno < mmdesc->md_tupdesc->natts; keyno++)
+	{
+		int		datumno;
+
+		/*
+		 * "allnulls" is set when there's no nonnull value in any row in
+		 * the column; when this happens, there is no data to store.  Thus
+		 * set the nullable bits for all data elements of this column and
+		 * we're done.
+		 */
+		if (tuple->dt_columns[keyno].allnulls)
+		{
+			for (datumno = 0;
+				 datumno < mmdesc->md_info[keyno]->oi_nstored;
+				 datumno++)
+				nulls[idxattno++] = true;
+			anynulls = true;
+			continue;
+		}
+
+		/*
+		 * The "hasnulls" bit is set when there are some null values in the
+		 * data.  We still need to store a real value, but the presence of this
+		 * means we need a null bitmap.
+		 */
+		if (tuple->dt_columns[keyno].hasnulls)
+			anynulls = true;
+
+		for (datumno = 0;
+			 datumno < mmdesc->md_info[keyno]->oi_nstored;
+			 datumno++)
+			/* XXX datumCopy ?? */
+			values[idxattno++] = tuple->dt_columns[keyno].values[datumno];
+	}
+
+	/* compute total space needed */
+	len = SizeOfMinMaxTuple;
+	if (anynulls)
+	{
+		/*
+		 * We need a double-length bitmap on an on-disk minmax index tuple;
+		 * the first half stores the "allnulls" bits, the second stores
+		 * "hasnulls".
+		 */
+		len += BITMAPLEN(mmdesc->md_tupdesc->natts * 2);
+	}
+
+	/*
+	 * TODO: we can probably do away with alignment here, and save some
+	 * precious disk space.  When there's no bitmap we can save 6 bytes. Maybe
+	 * we can use the first col's type alignment instead of maxalign.
+	 */
+	len = hoff = MAXALIGN(len);
+
+	data_len = heap_compute_data_size(mmtuple_disk_tupdesc(mmdesc),
+									  values, nulls);
+
+	len += data_len;
+
+	rettuple = palloc0(len);
+	rettuple->mt_info = hoff;
+	Assert((rettuple->mt_info & MMIDX_OFFSET_MASK) == hoff);
+
+	/*
+	 * The infomask and null bitmap as computed by heap_fill_tuple are useless
+	 * to us.  However, that function will not accept a null infomask; and we
+	 * need to pass a valid null bitmap so that it will correctly skip
+	 * outputting null attributes in the data area.
+	 */
+	heap_fill_tuple(mmtuple_disk_tupdesc(mmdesc),
+					values,
+					nulls,
+					(char *) rettuple + hoff,
+					data_len,
+					&phony_infomask,
+					phony_nullbitmap);
+
+	/* done with these */
+	pfree(values);
+	pfree(nulls);
+	pfree(phony_nullbitmap);
+
+	/*
+	 * Now fill in the real null bitmasks.	allnulls first.
+	 */
+	if (anynulls)
+	{
+		bits8	   *bitP;
+		int			bitmask;
+
+		rettuple->mt_info |= MMIDX_NULLS_MASK;
+
+		bitP = ((bits8 *) (rettuple + SizeOfMinMaxTuple)) - 1;
+		bitmask = HIGHBIT;
+		for (keyno = 0; keyno < mmdesc->md_tupdesc->natts; keyno++)
+		{
+			if (bitmask != HIGHBIT)
+				bitmask <<= 1;
+			else
+			{
+				bitP += 1;
+				*bitP = 0x0;
+				bitmask = 1;
+			}
+
+			if (tuple->dt_columns[keyno].allnulls)
+				continue;
+
+			*bitP |= bitmask;
+		}
+		/* hasnulls bits follow */
+		for (keyno = 0; keyno < mmdesc->md_tupdesc->natts; keyno++)
+		{
+			if (bitmask != HIGHBIT)
+				bitmask <<= 1;
+			else
+			{
+				bitP += 1;
+				*bitP = 0x0;
+				bitmask = 1;
+			}
+
+			if (tuple->dt_columns[keyno].hasnulls)
+				continue;
+
+			*bitP |= bitmask;
+		}
+	}
+
+	*size = len;
+	return rettuple;
+}
+
+/*
+ * Free a tuple created by minmax_form_tuple
+ */
+void
+minmax_free_tuple(MMTuple *tuple)
+{
+	pfree(tuple);
+}
+
+DeformedMMTuple *
+minmax_new_dtuple(MinmaxDesc *mmdesc)
+{
+	DeformedMMTuple *dtup;
+	char   *currdatum;
+	long	basesize;
+	int		i;
+
+	basesize = MAXALIGN(sizeof(DeformedMMTuple) +
+						sizeof(MMValues) * mmdesc->md_tupdesc->natts);
+	dtup = palloc0(basesize + sizeof(Datum) * mmdesc->md_totalstored);
+	currdatum = (char *) dtup + basesize;
+	for (i = 0; i < mmdesc->md_tupdesc->natts; i++)
+	{
+		dtup->dt_columns[i].allnulls = true;
+		dtup->dt_columns[i].hasnulls = false;
+		dtup->dt_columns[i].values = (Datum *) currdatum;
+		currdatum += sizeof(Datum) * mmdesc->md_info[i]->oi_nstored;
+	}
+
+	return dtup;
+}
+
+/*
+ * Reset a DeformedMMTuple to initial state
+ */
+void
+minmax_dtuple_initialize(DeformedMMTuple *dtuple, MinmaxDesc *mmdesc)
+{
+	int		i;
+
+	dtuple->dt_seentup = false;
+
+	for (i = 0; i < mmdesc->md_tupdesc->natts; i++)
+	{
+		/*
+		 * FIXME -- we may need to pfree() some datums here before clobbering
+		 * the whole thing
+		 */
+		dtuple->dt_columns[i].allnulls = true;
+		dtuple->dt_columns[i].hasnulls = false;
+		memset(dtuple->dt_columns[i].values, 0,
+			   sizeof(Datum) * mmdesc->md_info[i]->oi_nstored);
+	}
+}
+
+/*
+ * Convert a MMTuple back to a DeformedMMTuple.  This is the reverse of
+ * minmax_form_tuple.
+ *
+ * Note we don't need the "on disk tupdesc" here; we rely on our own routine to
+ * deconstruct the tuple from the on-disk format.
+ *
+ * XXX some callers might need copies of each datum; if so we need to apply
+ * datumCopy inside the loop.	We probably also need a minmax_free_dtuple()
+ * function.
+ */
+DeformedMMTuple *
+minmax_deform_tuple(MinmaxDesc *mmdesc, MMTuple *tuple)
+{
+	DeformedMMTuple *dtup;
+	Datum	   *values;
+	bool	   *allnulls;
+	bool	   *hasnulls;
+	char	   *tp;
+	bits8	   *nullbits;
+	int			keyno;
+	int			valueno;
+
+	dtup = minmax_new_dtuple(mmdesc);
+
+	values = palloc(sizeof(Datum) * mmdesc->md_totalstored);
+	allnulls = palloc(sizeof(bool) * mmdesc->md_tupdesc->natts);
+	hasnulls = palloc(sizeof(bool) * mmdesc->md_tupdesc->natts);
+
+	tp = (char *) tuple + MMTupleDataOffset(tuple);
+
+	if (MMTupleHasNulls(tuple))
+		nullbits = (bits8 *) ((char *) tuple + SizeOfMinMaxTuple);
+	else
+		nullbits = NULL;
+	mm_deconstruct_tuple(mmdesc,
+						 tp, nullbits, MMTupleHasNulls(tuple),
+						 values, allnulls, hasnulls);
+
+	/*
+	 * Iterate to assign each of the values to the corresponding item
+	 * in the values array of each column.
+	 */
+	for (valueno = 0, keyno = 0; keyno < mmdesc->md_tupdesc->natts; keyno++)
+	{
+		int		i;
+
+		if (allnulls[keyno])
+		{
+			valueno += mmdesc->md_info[keyno]->oi_nstored;
+			continue;
+		}
+
+		dtup->dt_columns[keyno].values =
+			palloc(sizeof(Datum) * mmdesc->md_totalstored);
+
+		/* XXX optional datumCopy()? */
+		for (i = 0; i < mmdesc->md_info[keyno]->oi_nstored; i++)
+			dtup->dt_columns[keyno].values[i] = values[valueno++];
+
+		dtup->dt_columns[keyno].hasnulls = hasnulls[keyno];
+		dtup->dt_columns[keyno].allnulls = false;
+	}
+
+	pfree(values);
+	pfree(allnulls);
+	pfree(hasnulls);
+
+	return dtup;
+}
+
+/*
+ * mm_deconstruct_tuple
+ *		Guts of attribute extraction from an on-disk minmax tuple.
+ *
+ * Its arguments are:
+ *	mmdesc		minmax descriptor for the stored tuple
+ *	tp			pointer to the tuple data area
+ *	nullbits	pointer to the tuple nulls bitmask
+ *	nulls		"has nulls" bit in tuple infomask
+ *	values		output values, array of size mmdesc->md_totalstored
+ *	allnulls	output "allnulls", size mmdesc->md_tupdesc->natts
+ *	hasnulls	output "hasnulls", size mmdesc->md_tupdesc->natts
+ *
+ * Output arrays must have been allocated by caller.
+ */
+static inline void
+mm_deconstruct_tuple(MinmaxDesc *mmdesc,
+					 char *tp, bits8 *nullbits, bool nulls,
+					 Datum *values, bool *allnulls, bool *hasnulls)
+{
+	int			attnum;
+	int			stored;
+	TupleDesc	diskdsc;
+	long		off = 0;
+
+	/*
+	 * First iterate to natts to obtain both null flags for each attribute.
+	 */
+	for (attnum = 0; attnum < mmdesc->md_tupdesc->natts; attnum++)
+	{
+		/*
+		 * the "all nulls" bit means that all values in the page range for
+		 * this column are nulls.  Therefore there are no values in the tuple
+		 * data area.
+		 */
+		if (nulls && att_isnull(attnum, nullbits))
+		{
+			allnulls[attnum] = true;
+			continue;
+		}
+
+		allnulls[attnum] = false;
+
+		/*
+		 * the "has nulls" bit means that some tuples have nulls, but others
+		 * have not-null values.  Therefore we know the tuple contains data for
+		 * this column.
+		 *
+		 * The hasnulls bits follow the allnulls bits in the same bitmask.
+		 */
+		hasnulls[attnum] =
+			nulls && att_isnull(mmdesc->md_tupdesc->natts + attnum, hasnulls);
+	}
+
+	/*
+	 * Iterate to obtain each attribute's stored values.  Note that since we
+	 * may reuse attribute entries for more than one column, we cannot cache
+	 * offsets here.
+	 */
+	diskdsc = mmtuple_disk_tupdesc(mmdesc);
+	for (stored = 0, attnum = 0; attnum < mmdesc->md_tupdesc->natts; attnum++)
+	{
+		int		datumno;
+
+		if (allnulls[attnum])
+		{
+			stored += mmdesc->md_info[attnum]->oi_nstored;
+			continue;
+		}
+
+		for (datumno = 0;
+			 datumno < mmdesc->md_info[attnum]->oi_nstored;
+			 datumno++)
+		{
+			Form_pg_attribute thisatt = diskdsc->attrs[stored];
+
+			if (thisatt->attlen == -1)
+			{
+				off = att_align_pointer(off, thisatt->attalign, -1,
+										tp + off);
+			}
+			else
+			{
+				/* not varlena, so safe to use att_align_nominal */
+				off = att_align_nominal(off, thisatt->attalign);
+			}
+
+			values[stored++] = fetchatt(thisatt, tp + off);
+
+			off = att_addlength_pointer(off, thisatt->attlen, tp + off);
+		}
+	}
+}
diff --git a/src/backend/access/minmax/mmxlog.c b/src/backend/access/minmax/mmxlog.c
new file mode 100644
index 0000000..c9b1461
--- /dev/null
+++ b/src/backend/access/minmax/mmxlog.c
@@ -0,0 +1,305 @@
+/*
+ * mmxlog.c
+ *		XLog replay routines for MinMax indexes
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *	  src/backend/access/minmax/mmxlog.c
+ */
+#include "postgres.h"
+
+#include "access/minmax.h"
+#include "access/minmax_internal.h"
+#include "access/minmax_page.h"
+#include "access/minmax_revmap.h"
+#include "access/minmax_tuple.h"
+#include "access/minmax_xlog.h"
+#include "access/xlogutils.h"
+#include "storage/freespace.h"
+
+
+/*
+ * xlog replay routines
+ */
+static void
+minmax_xlog_createidx(XLogRecPtr lsn, XLogRecord *record)
+{
+	xl_minmax_createidx *xlrec = (xl_minmax_createidx *) XLogRecGetData(record);
+	Buffer		buf;
+	Page		page;
+
+	/* Backup blocks are not used in create_index records */
+	Assert(!(record->xl_info & XLR_BKP_BLOCK_MASK));
+
+	/* create the index' metapage */
+	buf = XLogReadBuffer(xlrec->node, MINMAX_METAPAGE_BLKNO, true);
+	Assert(BufferIsValid(buf));
+	page = (Page) BufferGetPage(buf);
+	mm_metapage_init(page, xlrec->pagesPerRange, xlrec->version);
+	PageSetLSN(page, lsn);
+	MarkBufferDirty(buf);
+	UnlockReleaseBuffer(buf);
+
+	/* also initialize its first revmap page */
+	buf = XLogReadBuffer(xlrec->node, 1, true);
+	Assert(BufferIsValid(buf));
+	page = (Page) BufferGetPage(buf);
+	mm_page_init(page, MINMAX_PAGETYPE_REVMAP);
+	PageSetLSN(page, lsn);
+	MarkBufferDirty(buf);
+	UnlockReleaseBuffer(buf);
+}
+
+static void
+minmax_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
+{
+	xl_minmax_insert *xlrec = (xl_minmax_insert *) XLogRecGetData(record);
+	BlockNumber	blkno;
+	Buffer		buffer;
+	Page		page;
+	OffsetNumber offnum;
+	int			tuplen;
+	MMTuple	   *mmtuple;
+
+	/* If we have a full-page image, restore it and we're done */
+	if (record->xl_info & XLR_BKP_BLOCK(0))
+	{
+		(void) RestoreBackupBlock(lsn, record, 0, false, false);
+		return;
+	}
+
+	blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
+	if (record->xl_info & XLOG_MINMAX_INIT_PAGE)
+	{
+		buffer = XLogReadBuffer(xlrec->target.node, blkno, true);
+		Assert(BufferIsValid(buffer));
+		page = (Page) BufferGetPage(buffer);
+
+		mm_page_init(page, MINMAX_PAGETYPE_REGULAR);
+	}
+	else
+	{
+		buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
+		if (!BufferIsValid(buffer))
+			return;
+		page = (Page) BufferGetPage(buffer);
+
+		if (lsn <= PageGetLSN(page))	/* changes are applied */
+		{
+			UnlockReleaseBuffer(buffer);
+			return;
+		}
+	}
+	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+	if (PageGetMaxOffsetNumber(page) + 1 < offnum)
+		elog(PANIC, "minmax_xlog_insert: invalid max offset number");
+
+	tuplen = record->xl_len - SizeOfMinmaxInsert;
+	mmtuple = (MMTuple *) ((char *) xlrec + SizeOfMinmaxInsert);
+
+	if (xlrec->overwrite)
+		PageOverwriteItemData(page, offnum, (Item) mmtuple, tuplen);
+	else
+	{
+		offnum = PageAddItem(page, (Item) mmtuple, tuplen, offnum, true, false);
+		if (offnum == InvalidOffsetNumber)
+			elog(PANIC, "minmax_xlog_insert: failed to add tuple");
+	}
+
+	PageSetLSN(page, lsn);
+
+	MarkBufferDirty(buffer);
+	UnlockReleaseBuffer(buffer);
+
+	/* XXX no FSM updates here ... */
+}
+
+static void
+minmax_xlog_bulkremove(XLogRecPtr lsn, XLogRecord *record)
+{
+	xl_minmax_bulkremove *xlrec = (xl_minmax_bulkremove *) XLogRecGetData(record);
+	Buffer		buffer;
+	Page		page;
+	OffsetNumber *offnos;
+	int			noffs;
+	Size		freespace;
+
+	/* If we have a full-page image, restore it and we're done */
+	if (record->xl_info & XLR_BKP_BLOCK(0))
+	{
+		(void) RestoreBackupBlock(lsn, record, 0, false, false);
+		return;
+	}
+
+	buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
+	if (!BufferIsValid(buffer))
+		return;
+	page = (Page) BufferGetPage(buffer);
+
+	if (lsn <= PageGetLSN(page))	/* changes are applied */
+	{
+		UnlockReleaseBuffer(buffer);
+		return;
+	}
+
+	offnos = (OffsetNumber *) ((char *) xlrec + SizeOfMinmaxBulkRemove);
+	noffs = (record->xl_len - SizeOfMinmaxBulkRemove) / sizeof(OffsetNumber);
+
+	PageIndexDeleteNoCompact(page, offnos, noffs);
+	freespace = PageGetFreeSpace(page);
+
+	PageSetLSN(page, lsn);
+
+	MarkBufferDirty(buffer);
+	UnlockReleaseBuffer(buffer);
+
+	/* update FSM as well */
+	XLogRecordPageWithFreeSpace(xlrec->node, xlrec->block, freespace);
+}
+
+static void
+minmax_xlog_revmap_set(XLogRecPtr lsn, XLogRecord *record)
+{
+	xl_minmax_rm_set *xlrec = (xl_minmax_rm_set *) XLogRecGetData(record);
+	bool	init;
+	Buffer	buffer;
+	Page	page;
+
+	/* If we have a full-page image, restore it and we're done */
+	if (record->xl_info & XLR_BKP_BLOCK(0))
+	{
+		(void) RestoreBackupBlock(lsn, record, 0, false, false);
+		return;
+	}
+
+	init = (record->xl_info & XLOG_MINMAX_INIT_PAGE) != 0;
+	buffer = XLogReadBuffer(xlrec->node, xlrec->mapBlock, init);
+	Assert(BufferIsValid(buffer));
+	page = BufferGetPage(buffer);
+	if (init)
+		mm_page_init(page, MINMAX_PAGETYPE_REVMAP);
+
+	rm_page_set_iptr(page, xlrec->pagesPerRange, xlrec->heapBlock,
+					 ItemPointerGetBlockNumber(&(xlrec->newval)),
+					 ItemPointerGetOffsetNumber(&(xlrec->newval)));
+
+	PageSetLSN(page, lsn);
+	MarkBufferDirty(buffer);
+	UnlockReleaseBuffer(buffer);
+}
+
+static void
+minmax_xlog_metapg_set(XLogRecPtr lsn, XLogRecord *record)
+{
+	xl_minmax_metapg_set *xlrec = (xl_minmax_metapg_set *) XLogRecGetData(record);
+	Buffer	meta;
+	Page	metapg;
+	MinmaxMetaPageData *metadata;
+
+	/* If we have a full-page image, restore it and we're done */
+	if (record->xl_info & XLR_BKP_BLOCK(0))
+	{
+		(void) RestoreBackupBlock(lsn, record, 0, false, false);
+		return;
+	}
+
+	meta = XLogReadBuffer(xlrec->node, MINMAX_METAPAGE_BLKNO, false);
+	Assert(BufferIsValid(meta));
+
+	metapg = BufferGetPage(meta);
+	metadata = (MinmaxMetaPageData *) PageGetContents(metapg);
+	metadata->revmapArrayPages[xlrec->blkidx] = xlrec->newpg;
+
+	PageSetLSN(metapg, lsn);
+	MarkBufferDirty(meta);
+	UnlockReleaseBuffer(meta);
+}
+
+static void
+minmax_xlog_init_rmpg(XLogRecPtr lsn, XLogRecord *record)
+{
+	xl_minmax_init_rmpg *xlrec = (xl_minmax_init_rmpg *) XLogRecGetData(record);
+	Buffer		buffer;
+
+	if (record->xl_info & XLR_BKP_BLOCK(0))
+	{
+		(void) RestoreBackupBlock(lsn, record, 0, false, false);
+		return;
+	}
+
+	buffer = XLogReadBuffer(xlrec->node, xlrec->blkno, true);
+	Assert(BufferIsValid(buffer));
+
+	if (xlrec->array)
+		initialize_rma_page(buffer);
+	else
+		initialize_rmr_page(buffer, xlrec->logblk);
+
+	PageSetLSN(BufferGetPage(buffer), lsn);
+	MarkBufferDirty(buffer);
+	UnlockReleaseBuffer(buffer);
+}
+
+static void
+minmax_xlog_rmarray_set(XLogRecPtr lsn, XLogRecord *record)
+{
+	xl_minmax_rmarray_set *xlrec = (xl_minmax_rmarray_set *) XLogRecGetData(record);
+	Buffer	buffer;
+	Page	page;
+	RevmapArrayContents *contents;
+
+	/* If we have a full-page image, restore it and we're done */
+	if (record->xl_info & XLR_BKP_BLOCK(0))
+	{
+		(void) RestoreBackupBlock(lsn, record, 0, false, false);
+		return;
+	}
+
+	buffer = XLogReadBuffer(xlrec->node, xlrec->rmarray, false);
+	Assert(BufferIsValid(buffer));
+
+	page = BufferGetPage(buffer);
+
+	contents = (RevmapArrayContents *) PageGetContents(page);
+	contents->rma_blocks[xlrec->blkidx] = xlrec->newpg;
+	contents->rma_nblocks = xlrec->blkidx + 1;	/* XXX is this okay? */
+
+	PageSetLSN(page, lsn);
+	MarkBufferDirty(buffer);
+	UnlockReleaseBuffer(buffer);
+}
+
+void
+minmax_redo(XLogRecPtr lsn, XLogRecord *record)
+{
+	uint8		info = record->xl_info & ~XLR_INFO_MASK;
+
+	switch (info & XLOG_MINMAX_OPMASK)
+	{
+		case XLOG_MINMAX_CREATE_INDEX:
+			minmax_xlog_createidx(lsn, record);
+			break;
+		case XLOG_MINMAX_INSERT:
+			minmax_xlog_insert(lsn, record);
+			break;
+		case XLOG_MINMAX_BULKREMOVE:
+			minmax_xlog_bulkremove(lsn, record);
+			break;
+		case XLOG_MINMAX_REVMAP_SET:
+			minmax_xlog_revmap_set(lsn, record);
+			break;
+		case XLOG_MINMAX_METAPG_SET:
+			minmax_xlog_metapg_set(lsn, record);
+			break;
+		case XLOG_MINMAX_RMARRAY_SET:
+			minmax_xlog_rmarray_set(lsn, record);
+			break;
+		case XLOG_MINMAX_INIT_RMPG:
+			minmax_xlog_init_rmpg(lsn, record);
+			break;
+		default:
+			elog(PANIC, "minmax_redo: unknown op code %u", info);
+	}
+}
diff --git a/src/backend/access/rmgrdesc/Makefile b/src/backend/access/rmgrdesc/Makefile
index 7d092d2..5575a71 100644
--- a/src/backend/access/rmgrdesc/Makefile
+++ b/src/backend/access/rmgrdesc/Makefile
@@ -9,7 +9,8 @@ top_builddir = ../../../..
 include $(top_builddir)/src/Makefile.global
 
 OBJS = clogdesc.o dbasedesc.o gindesc.o gistdesc.o hashdesc.o heapdesc.o \
-	   mxactdesc.o nbtdesc.o relmapdesc.o seqdesc.o smgrdesc.o spgdesc.o \
+	   minmaxdesc.o mxactdesc.o nbtdesc.o relmapdesc.o seqdesc.o \
+	   smgrdesc.o spgdesc.o \
 	   standbydesc.o tblspcdesc.o xactdesc.o xlogdesc.o
 
 include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/access/rmgrdesc/minmaxdesc.c b/src/backend/access/rmgrdesc/minmaxdesc.c
new file mode 100644
index 0000000..efcff67
--- /dev/null
+++ b/src/backend/access/rmgrdesc/minmaxdesc.c
@@ -0,0 +1,95 @@
+/*-------------------------------------------------------------------------
+ *
+ * minmaxdesc.c
+ *	  rmgr descriptor routines for MinMax indexes
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ *	  src/backend/access/rmgrdesc/minmaxdesc.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/minmax_xlog.h"
+
+static void
+out_target(StringInfo buf, xl_minmax_tid *target)
+{
+	appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
+			 target->node.spcNode, target->node.dbNode, target->node.relNode,
+					 ItemPointerGetBlockNumber(&(target->tid)),
+					 ItemPointerGetOffsetNumber(&(target->tid)));
+}
+
+void
+minmax_desc(StringInfo buf, XLogRecord *record)
+{
+	char	   *rec = XLogRecGetData(record);
+	uint8		info = record->xl_info & ~XLR_INFO_MASK;
+
+	info &= XLOG_MINMAX_OPMASK;
+	if (info == XLOG_MINMAX_CREATE_INDEX)
+	{
+		xl_minmax_createidx *xlrec = (xl_minmax_createidx *) rec;
+
+		appendStringInfo(buf, "create index: %u/%u/%u",
+						 xlrec->node.spcNode, xlrec->node.dbNode,
+						 xlrec->node.relNode);
+	}
+	else if (info == XLOG_MINMAX_INSERT)
+	{
+		xl_minmax_insert *xlrec = (xl_minmax_insert *) rec;
+
+		if (record->xl_info & XLOG_MINMAX_INIT_PAGE)
+			appendStringInfo(buf, "insert(init): ");
+		else
+			appendStringInfo(buf, "insert: ");
+		out_target(buf, &(xlrec->target));
+	}
+	else if (info == XLOG_MINMAX_BULKREMOVE)
+	{
+		xl_minmax_bulkremove *xlrec = (xl_minmax_bulkremove *) rec;
+
+		appendStringInfo(buf, "bulkremove: rel %u/%u/%u blk %u",
+						 xlrec->node.spcNode, xlrec->node.dbNode,
+						 xlrec->node.relNode, xlrec->block);
+	}
+	else if (info == XLOG_MINMAX_REVMAP_SET)
+	{
+		xl_minmax_rm_set *xlrec = (xl_minmax_rm_set *) rec;
+
+		appendStringInfo(buf, "revmap set: rel %u/%u/%u mapblk %u pagesPerRange %u item %u value %u/%u",
+						 xlrec->node.spcNode, xlrec->node.dbNode,
+						 xlrec->node.relNode, xlrec->mapBlock,
+						 xlrec->pagesPerRange, xlrec->heapBlock,
+						 ItemPointerGetBlockNumber(&(xlrec->newval)),
+						 ItemPointerGetOffsetNumber(&(xlrec->newval)));
+	}
+	else if (info == XLOG_MINMAX_METAPG_SET)
+	{
+		xl_minmax_metapg_set *xlrec = (xl_minmax_metapg_set *) rec;
+
+		appendStringInfo(buf, "metapg: rel %u/%u/%u array revmap idx %d block %u",
+						 xlrec->node.spcNode, xlrec->node.dbNode,
+						 xlrec->node.relNode,
+						 xlrec->blkidx, xlrec->newpg);
+	}
+	else if (info == XLOG_MINMAX_RMARRAY_SET)
+	{
+		xl_minmax_rmarray_set *xlrec = (xl_minmax_rmarray_set *) rec;
+
+		appendStringInfoString(buf, "revmap array: ");
+		appendStringInfo(buf, "rel %u/%u/%u array pg %u revmap idx %d block %u",
+						 xlrec->node.spcNode, xlrec->node.dbNode,
+						 xlrec->node.relNode,
+						 xlrec->rmarray,
+						 xlrec->blkidx, xlrec->newpg);
+	}
+
+	else
+		appendStringInfo(buf, "UNKNOWN");
+}
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index c0a7a6f..e285e50 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -12,6 +12,7 @@
 #include "access/gist_private.h"
 #include "access/hash.h"
 #include "access/heapam_xlog.h"
+#include "access/minmax_xlog.h"
 #include "access/multixact.h"
 #include "access/nbtree.h"
 #include "access/spgist.h"
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index a5a204e..cbb0ab8 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -2096,6 +2096,27 @@ IndexBuildHeapScan(Relation heapRelation,
 				   IndexBuildCallback callback,
 				   void *callback_state)
 {
+	return IndexBuildHeapRangeScan(heapRelation, indexRelation,
+								   indexInfo, allow_sync,
+								   0, InvalidBlockNumber,
+								   callback, callback_state);
+}
+
+/*
+ * As above, except that instead of scanning the complete heap, only the given
+ * number of blocks are scanned.  Scan to end-of-rel can be signalled by
+ * passing InvalidBlockNumber as numblocks.
+ */
+double
+IndexBuildHeapRangeScan(Relation heapRelation,
+						Relation indexRelation,
+						IndexInfo *indexInfo,
+						bool allow_sync,
+						BlockNumber start_blockno,
+						BlockNumber numblocks,
+						IndexBuildCallback callback,
+						void *callback_state)
+{
 	bool		is_system_catalog;
 	bool		checking_uniqueness;
 	HeapScanDesc scan;
@@ -2166,6 +2187,9 @@ IndexBuildHeapScan(Relation heapRelation,
 								true,	/* buffer access strategy OK */
 								allow_sync);	/* syncscan OK? */
 
+	/* set our endpoints */
+	heap_setscanlimits(scan, start_blockno, numblocks);
+
 	reltuples = 0;
 
 	/*
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 9f1b20e..55e375f 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -132,6 +132,7 @@ LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogRecord *record)
 		case RM_GIST_ID:
 		case RM_SEQ_ID:
 		case RM_SPGIST_ID:
+		case RM_MINMAX_ID:
 			break;
 		case RM_NEXT_ID:
 			elog(ERROR, "unexpected RM_NEXT_ID rmgr_id: %u", (RmgrIds) buf.record.xl_rmid);
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 6351a9b..5000f39 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -324,6 +324,41 @@ PageAddItem(Page page,
 }
 
 /*
+ * PageOverwriteItemData
+ * 		Overwrite the data for the item at the given offset.
+ *
+ * The new data must fit in the existing data space for the old tuple.
+ */
+void
+PageOverwriteItemData(Page page, OffsetNumber offset, Item item, Size size)
+{
+	PageHeader	phdr = (PageHeader) page;
+	ItemId		itemId;
+
+	/*
+	 * Be wary about corrupted page pointers
+	 */
+	if (phdr->pd_lower < SizeOfPageHeaderData ||
+		phdr->pd_lower > phdr->pd_upper ||
+		phdr->pd_upper > phdr->pd_special ||
+		phdr->pd_special > BLCKSZ)
+		ereport(PANIC,
+				(errcode(ERRCODE_DATA_CORRUPTED),
+				 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
+						phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
+
+	itemId = PageGetItemId(phdr, offset);
+	if (!ItemIdIsUsed(itemId) || !ItemIdHasStorage(itemId))
+		elog(ERROR, "existing item to overwrite is not used");
+
+	if (ItemIdGetLength(itemId) < size)
+		elog(ERROR, "existing item is not large enough to be overwritten");
+
+	memcpy((char *) page + ItemIdGetOffset(itemId), item, size);
+	ItemIdSetNormal(itemId, ItemIdGetOffset(itemId), size);
+}
+
+/*
  * PageGetTempPage
  *		Get a temporary page in local memory for special processing.
  *		The returned page is not initialized at all; caller must do that.
@@ -399,7 +434,8 @@ PageRestoreTempPage(Page tempPage, Page oldPage)
 }
 
 /*
- * sorting support for PageRepairFragmentation and PageIndexMultiDelete
+ * sorting support for PageRepairFragmentation, PageIndexMultiDelete,
+ * PageIndexDeleteNoCompact
  */
 typedef struct itemIdSortData
 {
@@ -896,6 +932,182 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
 	phdr->pd_upper = upper;
 }
 
+/*
+ * PageIndexDeleteNoCompact
+ *		Delete the given items for an index page, and defragment the resulting
+ *		free space, but do not compact the item pointers array.
+ *
+ * itemnos is the array of tuples to delete; nitems is its size.  maxIdxTuples
+ * is the maximum number of tuples that can exist in a page.
+ *
+ * Unused items at the end of the array are removed.
+ *
+ * This is used for index AMs that require that existing TIDs of live tuples
+ * remain unchanged.
+ */
+void
+PageIndexDeleteNoCompact(Page page, OffsetNumber *itemnos, int nitems)
+{
+	PageHeader	phdr = (PageHeader) page;
+	LocationIndex pd_lower = phdr->pd_lower;
+	LocationIndex pd_upper = phdr->pd_upper;
+	LocationIndex pd_special = phdr->pd_special;
+	int			nline;
+	bool		empty;
+	OffsetNumber offnum;
+	int			nextitm;
+
+	/*
+	 * As with PageRepairFragmentation, paranoia seems justified.
+	 */
+	if (pd_lower < SizeOfPageHeaderData ||
+		pd_lower > pd_upper ||
+		pd_upper > pd_special ||
+		pd_special > BLCKSZ ||
+		pd_special != MAXALIGN(pd_special))
+		ereport(ERROR,
+				(errcode(ERRCODE_DATA_CORRUPTED),
+				 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
+						pd_lower, pd_upper, pd_special)));
+
+	/*
+	 * Scan the existing item pointer array and mark as unused those that are
+	 * in our kill-list; make sure any non-interesting ones are marked unused
+	 * as well.
+	 */
+	nline = PageGetMaxOffsetNumber(page);
+	empty = true;
+	nextitm = 0;
+	for (offnum = FirstOffsetNumber; offnum <= nline; offnum = OffsetNumberNext(offnum))
+	{
+		ItemId		lp;
+		ItemLength	itemlen;
+		ItemOffset	offset;
+
+		lp = PageGetItemId(page, offnum);
+
+		itemlen = ItemIdGetLength(lp);
+		offset = ItemIdGetOffset(lp);
+
+		if (ItemIdIsUsed(lp))
+		{
+			if (offset < pd_upper ||
+				(offset + itemlen) > pd_special ||
+				offset != MAXALIGN(offset))
+				ereport(ERROR,
+						(errcode(ERRCODE_DATA_CORRUPTED),
+						 errmsg("corrupted item pointer: offset = %u, length = %u",
+								offset, (unsigned int) itemlen)));
+
+			if (nextitm < nitems && offnum == itemnos[nextitm])
+			{
+				/* this one is on our list to delete, so mark it unused */
+				ItemIdSetUnused(lp);
+				nextitm++;
+			}
+			else if (ItemIdHasStorage(lp))
+			{
+				/* This one's live -- must do the compaction dance */
+				empty = false;
+			}
+			else
+			{
+				/* get rid of this one too */
+				ItemIdSetUnused(lp);
+			}
+		}
+	}
+
+	/* this will catch invalid or out-of-order itemnos[] */
+	if (nextitm != nitems)
+		elog(ERROR, "incorrect index offsets supplied");
+
+	if (empty)
+	{
+		/* Page is completely empty, so just reset it quickly */
+		phdr->pd_lower = SizeOfPageHeaderData;
+		phdr->pd_upper = pd_special;
+	}
+	else
+	{
+		/* There are live items: need to compact the page the hard way */
+		itemIdSortData itemidbase[MaxOffsetNumber];
+		itemIdSort	itemidptr;
+		int			i;
+		Size		totallen;
+		Offset		upper;
+
+		/*
+		 * Scan the page taking note of each item that we need to preserve.
+		 * This includes both live items (those that contain data) and
+		 * interspersed unused ones.  It's critical to preserve these unused
+		 * items, because otherwise the offset numbers for later live items
+		 * would change, which is not acceptable.  Unused items might get used
+		 * again later; that is fine.
+		 */
+		itemidptr = itemidbase;
+		totallen = 0;
+		for (i = 0; i < nline; i++, itemidptr++)
+		{
+			ItemId		lp;
+
+			itemidptr->offsetindex = i;
+
+			lp = PageGetItemId(page, i + 1);
+			if (ItemIdHasStorage(lp))
+			{
+				itemidptr->itemoff = ItemIdGetOffset(lp);
+				itemidptr->alignedlen = MAXALIGN(ItemIdGetLength(lp));
+				totallen += itemidptr->alignedlen;
+			}
+			else
+			{
+				itemidptr->itemoff = 0;
+				itemidptr->alignedlen = 0;
+			}
+		}
+		/* By here, there are exactly nline elements in itemidbase array */
+
+		if (totallen > (Size) (pd_special - pd_lower))
+			ereport(ERROR,
+					(errcode(ERRCODE_DATA_CORRUPTED),
+					 errmsg("corrupted item lengths: total %u, available space %u",
+							(unsigned int) totallen, pd_special - pd_lower)));
+
+		/* sort itemIdSortData array into decreasing itemoff order */
+		qsort((char *) itemidbase, nline, sizeof(itemIdSortData),
+			  itemoffcompare);
+
+		/*
+		 * Defragment the data areas of each tuple, being careful to preserve
+		 * each item's position in the linp array.
+		 */
+		upper = pd_special;
+		PageClearHasFreeLinePointers(page);
+		for (i = 0, itemidptr = itemidbase; i < nline; i++, itemidptr++)
+		{
+			ItemId		lp;
+
+			lp = PageGetItemId(page, itemidptr->offsetindex + 1);
+			if (itemidptr->alignedlen == 0)
+			{
+				PageSetHasFreeLinePointers(page);
+				ItemIdSetUnused(lp);
+				continue;
+			}
+			upper -= itemidptr->alignedlen;
+			memmove((char *) page + upper,
+					(char *) page + itemidptr->itemoff,
+					itemidptr->alignedlen);
+			lp->lp_off = upper;
+			/* lp_flags and lp_len remain the same as originally */
+		}
+
+		/* Set the new page limits */
+		phdr->pd_upper = upper;
+		phdr->pd_lower = SizeOfPageHeaderData + i * sizeof(ItemIdData);
+	}
+}
 
 /*
  * Set checksum for a page in shared buffers.
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index e932ccf..61e1a28 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -7349,3 +7349,27 @@ gincostestimate(PG_FUNCTION_ARGS)
 
 	PG_RETURN_VOID();
 }
+
+Datum
+mmcostestimate(PG_FUNCTION_ARGS)
+{
+	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+	IndexPath  *path = (IndexPath *) PG_GETARG_POINTER(1);
+	double		loop_count = PG_GETARG_FLOAT8(2);
+	Cost	   *indexStartupCost = (Cost *) PG_GETARG_POINTER(3);
+	Cost	   *indexTotalCost = (Cost *) PG_GETARG_POINTER(4);
+	Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(5);
+	double	   *indexCorrelation = (double *) PG_GETARG_POINTER(6);
+	IndexOptInfo *index = path->indexinfo;
+
+	*indexStartupCost = (Cost) seq_page_cost * index->pages * loop_count;
+	*indexTotalCost = *indexStartupCost;
+
+	*indexSelectivity =
+		clauselist_selectivity(root, path->indexquals,
+							   path->indexinfo->rel->relid,
+							   JOIN_INNER, NULL);
+	*indexCorrelation = 1;
+
+	PG_RETURN_VOID();
+}
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 493839f..5354a3b 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -112,6 +112,8 @@ extern HeapScanDesc heap_beginscan_strat(Relation relation, Snapshot snapshot,
 					 bool allow_strat, bool allow_sync);
 extern HeapScanDesc heap_beginscan_bm(Relation relation, Snapshot snapshot,
 				  int nkeys, ScanKey key);
+extern void heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk,
+		   BlockNumber endBlk);
 extern void heap_rescan(HeapScanDesc scan, ScanKey key);
 extern void heap_endscan(HeapScanDesc scan);
 extern HeapTuple heap_getnext(HeapScanDesc scan, ScanDirection direction);
diff --git a/src/include/access/minmax.h b/src/include/access/minmax.h
new file mode 100644
index 0000000..edb88ba
--- /dev/null
+++ b/src/include/access/minmax.h
@@ -0,0 +1,52 @@
+/*
+ * AM-callable functions for MinMax indexes
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *		src/include/access/minmax.h
+ */
+#ifndef MINMAX_H
+#define MINMAX_H
+
+#include "fmgr.h"
+#include "nodes/execnodes.h"
+#include "utils/relcache.h"
+
+
+/*
+ * prototypes for functions in minmax.c (external entry points for minmax)
+ */
+extern Datum mmbuild(PG_FUNCTION_ARGS);
+extern Datum mmbuildempty(PG_FUNCTION_ARGS);
+extern Datum mminsert(PG_FUNCTION_ARGS);
+extern Datum mmbeginscan(PG_FUNCTION_ARGS);
+extern Datum mmgettuple(PG_FUNCTION_ARGS);
+extern Datum mmgetbitmap(PG_FUNCTION_ARGS);
+extern Datum mmrescan(PG_FUNCTION_ARGS);
+extern Datum mmendscan(PG_FUNCTION_ARGS);
+extern Datum mmmarkpos(PG_FUNCTION_ARGS);
+extern Datum mmrestrpos(PG_FUNCTION_ARGS);
+extern Datum mmbulkdelete(PG_FUNCTION_ARGS);
+extern Datum mmvacuumcleanup(PG_FUNCTION_ARGS);
+extern Datum mmcanreturn(PG_FUNCTION_ARGS);
+extern Datum mmcostestimate(PG_FUNCTION_ARGS);
+extern Datum mmoptions(PG_FUNCTION_ARGS);
+
+/*
+ * Storage type for MinMax' reloptions
+ */
+typedef struct MinmaxOptions
+{
+	int32		vl_len_;		/* varlena header (do not touch directly!) */
+	BlockNumber	pagesPerRange;
+} MinmaxOptions;
+
+#define MINMAX_DEFAULT_PAGES_PER_RANGE	128
+#define MinmaxGetPagesPerRange(relation) \
+	((relation)->rd_options ? \
+	 ((MinmaxOptions *) (relation)->rd_options)->pagesPerRange : \
+	  MINMAX_DEFAULT_PAGES_PER_RANGE)
+
+#endif   /* MINMAX_H */
diff --git a/src/include/access/minmax_internal.h b/src/include/access/minmax_internal.h
new file mode 100644
index 0000000..b7c28be
--- /dev/null
+++ b/src/include/access/minmax_internal.h
@@ -0,0 +1,83 @@
+/*
+ * minmax_internal.h
+ *		internal declarations for MinMax indexes
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *		src/include/access/minmax_internal.h
+ */
+#ifndef MINMAX_INTERNAL_H
+#define MINMAX_INTERNAL_H
+
+#include "fmgr.h"
+#include "storage/buf.h"
+#include "storage/bufpage.h"
+#include "storage/off.h"
+#include "utils/relcache.h"
+
+
+/*
+ * A MinmaxDesc is a struct designed to enable decoding a MinMax tuple from the
+ * on-disk format to a DeformedMMTuple and vice-versa.
+ *
+ * Note: we assume, for now, that the data stored for each column is the same
+ * datatype as the indexed heap column.  This restriction can be lifted by
+ * having an Oid array pointer on the PerCol struct, where each member of the
+ * array indicates the typid of the stored data.
+ */
+
+/* struct returned by "OpcInfo" amproc */
+typedef struct MinmaxOpcInfo
+{
+	/* Number of columns stored in an index column of this opclass */
+	uint16	oi_nstored;
+
+	/* Opaque pointer for the opclass' private use */
+	void   *oi_opaque;
+} MinmaxOpcInfo;
+
+typedef struct MinmaxDesc
+{
+	/* the index relation itself */
+	Relation	md_index;
+
+	/* tuple descriptor of the index relation */
+	TupleDesc	md_tupdesc;
+
+	/* cached copy for on-disk tuples; generated at first use */
+	TupleDesc	md_disktdesc;
+
+	/* total number of Datum entries that are stored on-disk for all columns */
+	int			md_totalstored;
+
+	/* per-column info */
+	MinmaxOpcInfo *md_info[FLEXIBLE_ARRAY_MEMBER];	/* tupdesc->natts entries long */
+} MinmaxDesc;
+
+/*
+ * Globally-known function support numbers for Minmax indexes.  Individual
+ * opclasses define their own function support numbers, which must not collide
+ * with the definitions here.
+ */
+#define MINMAX_PROCNUM_OPCINFO		1
+#define MINMAX_PROCNUM_ADDVALUE		2
+#define MINMAX_PROCNUM_CONSISTENT	3
+
+#define MINMAX_DEBUG
+
+/* we allow debug if using GCC; otherwise don't bother */
+#if defined(MINMAX_DEBUG) && defined(__GNUC__)
+#define MINMAX_elog(level, ...)		elog(level, __VA_ARGS__)
+#else
+#define MINMAX_elog(a)	void(0)
+#endif
+
+/* minmax.c */
+extern Buffer mm_getnewbuffer(Relation irel);
+extern void mm_page_init(Page page, uint16 type);
+extern void mm_metapage_init(Page page, BlockNumber pagesPerRange,
+				 uint16 version);
+
+#endif   /* MINMAX_INTERNAL_H */
diff --git a/src/include/access/minmax_page.h b/src/include/access/minmax_page.h
new file mode 100644
index 0000000..04f40d8
--- /dev/null
+++ b/src/include/access/minmax_page.h
@@ -0,0 +1,88 @@
+/*
+ * Prototypes and definitions for minmax page layouts
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *		src/include/access/minmax_page.h
+ *
+ * NOTES
+ *
+ * These structs should really be private to specific minmax files, but it's
+ * useful to have them here so that they can be used by pageinspect and similar
+ * tools.
+ */
+#ifndef MINMAX_PAGE_H
+#define MINMAX_PAGE_H
+
+
+/* special space on all minmax pages stores a "type" identifier */
+#define		MINMAX_PAGETYPE_META			0xF091
+#define		MINMAX_PAGETYPE_REVMAP_ARRAY	0xF092
+#define		MINMAX_PAGETYPE_REVMAP			0xF093
+#define		MINMAX_PAGETYPE_REGULAR			0xF094
+
+typedef struct MinmaxSpecialSpace
+{
+	uint16	type;
+} MinmaxSpecialSpace;
+
+/* Metapage definitions */
+typedef struct MinmaxMetaPageData
+{
+	uint32	minmaxMagic;
+	uint32	minmaxVersion;
+	BlockNumber	pagesPerRange;
+	BlockNumber revmapArrayPages[1];	/* actually MAX_REVMAP_ARRAYPAGES */
+} MinmaxMetaPageData;
+
+/*
+ * Number of array pages listed in metapage.  Need to consider leaving enough
+ * space for the page header, the metapage struct, and the minmax special
+ * space.
+ */
+#define MAX_REVMAP_ARRAYPAGES	\
+	((BLCKSZ - \
+	  MAXALIGN(SizeOfPageHeaderData) - \
+	  offsetof(MinmaxMetaPageData, revmapArrayPages) - \
+	  MAXALIGN(sizeof(MinmaxSpecialSpace)) ) / \
+	 sizeof(BlockNumber))
+
+#define MINMAX_CURRENT_VERSION		1
+#define MINMAX_META_MAGIC			0xA8109CFA
+
+#define MINMAX_METAPAGE_BLKNO	0
+
+/* Definitions for regular revmap pages */
+typedef struct RevmapContents
+{
+	int32	rmr_logblk;			/* logical blkno of this revmap page */
+	ItemPointerData rmr_tids[1];	/* really REGULAR_REVMAP_PAGE_MAXITEMS */
+} RevmapContents;
+
+#define REGULAR_REVMAP_CONTENT_SIZE	\
+	(BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \
+	 offsetof(RevmapContents, rmr_tids) - \
+	 MAXALIGN(sizeof(MinmaxSpecialSpace)))
+/* max num of items in the array */
+#define REGULAR_REVMAP_PAGE_MAXITEMS \
+	(REGULAR_REVMAP_CONTENT_SIZE / sizeof(ItemPointerData))
+
+/* Definitions for array revmap pages */
+typedef struct RevmapArrayContents
+{
+	int32	rma_nblocks;
+	BlockNumber	rma_blocks[1];	/* really ARRAY_REVMAP_PAGE_MAXITEMS */
+} RevmapArrayContents;
+
+#define REVMAP_ARRAY_CONTENT_SIZE \
+	(BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \
+	 offsetof(RevmapArrayContents, rma_blocks) - \
+	 MAXALIGN(sizeof(MinmaxSpecialSpace)))
+/* max num of items in the array */
+#define ARRAY_REVMAP_PAGE_MAXITEMS \
+	(REVMAP_ARRAY_CONTENT_SIZE / sizeof(BlockNumber))
+
+
+#endif		/* MINMAX_PAGE_H */
diff --git a/src/include/access/minmax_revmap.h b/src/include/access/minmax_revmap.h
new file mode 100644
index 0000000..1c968f3
--- /dev/null
+++ b/src/include/access/minmax_revmap.h
@@ -0,0 +1,40 @@
+/*
+ * prototypes for minmax reverse range maps
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *		src/include/access/minmax_revmap.h
+ */
+
+#ifndef MINMAX_REVMAP_H
+#define MINMAX_REVMAP_H
+
+#include "storage/block.h"
+#include "storage/itemptr.h"
+#include "storage/off.h"
+#include "utils/relcache.h"
+
+/* struct definition lives in mmrevmap.c */
+typedef struct mmRevmapAccess mmRevmapAccess;
+
+extern mmRevmapAccess *mmRevmapAccessInit(Relation idxrel, BlockNumber *pagesPerRange);
+extern void mmRevmapAccessTerminate(mmRevmapAccess *rmAccess);
+
+extern void mmRevmapCreate(Relation idxrel);
+extern void mmSetHeapBlockItemptr(mmRevmapAccess *rmAccess, BlockNumber blk,
+					  BlockNumber blkno, OffsetNumber offno);
+extern void mmGetHeapBlockItemptr(mmRevmapAccess *rmAccess, BlockNumber blk,
+					  ItemPointerData *iptr);
+extern void mmRevmapTruncate(mmRevmapAccess *rmAccess,
+				 BlockNumber heapNumBlocks);
+
+/* internal stuff also used by xlog replay */
+extern void rm_page_set_iptr(Page page, BlockNumber pagesPerRange,
+				 BlockNumber heapBlk, BlockNumber blkno, OffsetNumber offno);
+extern BlockNumber initialize_rmr_page(Buffer newbuf, BlockNumber mapBlk);
+extern void initialize_rma_page(Buffer buf);
+
+
+#endif   /* MINMAX_REVMAP_H */
diff --git a/src/include/access/minmax_tuple.h b/src/include/access/minmax_tuple.h
new file mode 100644
index 0000000..bd57fdd
--- /dev/null
+++ b/src/include/access/minmax_tuple.h
@@ -0,0 +1,84 @@
+/*
+ * Declarations for dealing with MinMax-specific tuples.
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *	  src/include/access/minmax_tuple.h
+ */
+#ifndef MINMAX_TUPLE_H
+#define MINMAX_TUPLE_H
+
+#include "access/minmax_internal.h"
+#include "access/tupdesc.h"
+
+
+/*
+ * A minmax index stores one index tuple per page range.  Each index tuple
+ * has one MMValues struct for each indexed column; in turn, each MMValues
+ * has (besides the null flags) an array of Datum whose size is determined by
+ * the opclass.
+ */
+typedef struct MMValues
+{
+	bool		hasnulls;		/* is there any nulls in the page range? */
+	bool		allnulls;		/* are all values nulls in the page range? */
+	Datum	   *values;			/* current accumulated values */
+} MMValues;
+
+/*
+ * This struct represents one index tuple, comprising the minimum and maximum
+ * values for all indexed columns, within one page range.  These values can
+ * only be meaningfully decoded with an appropriate MinmaxDesc.
+ */
+typedef struct DeformedMMTuple
+{
+	int			dt_seentup;
+	MMValues	dt_columns[FLEXIBLE_ARRAY_MEMBER];
+} DeformedMMTuple;
+
+/*
+ * An on-disk minmax tuple.  This is possibly followed by a nulls bitmask, with
+ * room for 2 null bits (two bits for each value stored); an opclass-defined
+ * number of Datum values for each column follow.
+ */
+typedef struct MMTuple
+{
+	/* ---------------
+	 * mt_info is laid out in the following fashion:
+	 *
+	 * 7th (high) bit: has nulls
+	 * 6th bit: unused
+	 * 5th bit: unused
+	 * 4-0 bit: offset of data
+	 * ---------------
+	 */
+	uint8		mt_info;
+} MMTuple;
+
+#define SizeOfMinMaxTuple	(offsetof(MMTuple, mt_info) + sizeof(uint8))
+
+/*
+ * t_info manipulation macros
+ */
+#define MMIDX_OFFSET_MASK 0x1F
+/* bit 0x20 is not used at present */
+/* bit 0x40 is not used at present */
+#define MMIDX_NULLS_MASK 0x80
+
+#define MMTupleDataOffset(mmtup)	((Size) (((MMTuple *) (mmtup))->mt_info & MMIDX_OFFSET_MASK))
+#define MMTupleHasNulls(mmtup)	(((((MMTuple *) (mmtup))->mt_info & MMIDX_NULLS_MASK)) != 0)
+
+
+extern MMTuple *minmax_form_tuple(MinmaxDesc *mmdesc,
+				  DeformedMMTuple *tuple, Size *size);
+extern void minmax_free_tuple(MMTuple *tuple);
+
+extern DeformedMMTuple *minmax_new_dtuple(MinmaxDesc *mmdesc);
+extern void minmax_dtuple_initialize(DeformedMMTuple *dtuple,
+						 MinmaxDesc *mmdesc);
+extern DeformedMMTuple *minmax_deform_tuple(MinmaxDesc *mmdesc,
+					MMTuple *tuple);
+
+#endif   /* MINMAX_TUPLE_H */
diff --git a/src/include/access/minmax_xlog.h b/src/include/access/minmax_xlog.h
new file mode 100644
index 0000000..b13fe2c
--- /dev/null
+++ b/src/include/access/minmax_xlog.h
@@ -0,0 +1,134 @@
+/*-------------------------------------------------------------------------
+ *
+ * minmax_xlog.h
+ *	  POSTGRES MinMax access XLOG definitions.
+ *
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/access/minmax_xlog.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef MINMAX_XLOG_H
+#define MINMAX_XLOG_H
+
+#include "access/xlog.h"
+#include "storage/bufpage.h"
+#include "storage/itemptr.h"
+#include "storage/relfilenode.h"
+#include "utils/relcache.h"
+
+
+/*
+ * WAL record definitions for minmax's WAL operations
+ *
+ * XLOG allows to store some information in high 4 bits of log
+ * record xl_info field.
+ */
+#define XLOG_MINMAX_CREATE_INDEX	0x00
+#define XLOG_MINMAX_INSERT			0x10
+#define XLOG_MINMAX_BULKREMOVE		0x20
+#define XLOG_MINMAX_REVMAP_SET		0x30
+#define XLOG_MINMAX_METAPG_SET		0x40
+#define XLOG_MINMAX_RMARRAY_SET		0x50
+#define XLOG_MINMAX_INIT_RMPG		0x60
+
+#define XLOG_MINMAX_OPMASK			0x70
+/*
+ * When we insert the first item on a new page, we restore the entire page in
+ * redo.
+ */
+#define XLOG_MINMAX_INIT_PAGE		0x80
+
+/* This is what we need to know about a minmax index create */
+typedef struct xl_minmax_createidx
+{
+	BlockNumber pagesPerRange;
+	RelFileNode	node;
+	uint16		version;
+} xl_minmax_createidx;
+#define SizeOfMinmaxCreateIdx	(offsetof(xl_minmax_createidx, version) + sizeof(uint16))
+
+/* All that we need to find a minmax tuple */
+typedef struct xl_minmax_tid
+{
+	RelFileNode	node;
+	ItemPointerData tid;
+} xl_minmax_tid;
+
+#define SizeOfMinmaxTid		(offsetof(xl_minmax_tid, tid) + SizeOfIptrData)
+
+/* This is what we need to know about a minmax tuple insert */
+typedef struct xl_minmax_insert
+{
+	xl_minmax_tid	target;
+	bool			overwrite;
+	/* tuple data follows at end of struct */
+} xl_minmax_insert;
+
+#define SizeOfMinmaxInsert		(offsetof(xl_minmax_insert, overwrite) + sizeof(bool))
+
+/* This is what we need to know about a bulk minmax tuple remove */
+typedef struct xl_minmax_bulkremove
+{
+	RelFileNode node;
+	BlockNumber	block;
+	/* offset number array follows at end of struct */
+} xl_minmax_bulkremove;
+
+#define SizeOfMinmaxBulkRemove	(offsetof(xl_minmax_bulkremove, block) + sizeof(BlockNumber))
+
+/* This is what we need to know about a revmap "set heap ptr" */
+typedef struct xl_minmax_rm_set
+{
+	RelFileNode		node;
+	BlockNumber		mapBlock;
+	int				pagesPerRange;
+	BlockNumber		heapBlock;
+	ItemPointerData newval;
+} xl_minmax_rm_set;
+
+#define SizeOfMinmaxRevmapSet	(offsetof(xl_minmax_rm_set, newval) + SizeOfIptrData)
+
+/* This is what we need to know about a "metapage set" operation */
+typedef struct xl_minmax_metapg_set
+{
+	RelFileNode		node;
+	uint32			blkidx;
+	BlockNumber		newpg;
+} xl_minmax_metapg_set;
+
+#define SizeOfMinmaxMetapgSet	(offsetof(xl_minmax_metapg_set, newpg) + \
+								 sizeof(BlockNumber))
+
+/* This is what we need to know about a "revmap array set" operation */
+typedef struct xl_minmax_rmarray_set
+{
+	RelFileNode		node;
+	BlockNumber		rmarray;
+	uint32			blkidx;
+	BlockNumber		newpg;
+} xl_minmax_rmarray_set;
+
+#define SizeOfMinmaxRmarraySet	(offsetof(xl_minmax_rmarray_set, newpg) + \
+								 sizeof(BlockNumber))
+
+/* This is what we need to know when we initialize a new revmap page */
+typedef struct xl_minmax_init_rmpg
+{
+	RelFileNode		node;
+	bool			array;	/* array revmap page or regular revmap page */
+	BlockNumber		blkno;
+	BlockNumber		logblk;	/* only used by regular revmap pages */
+} xl_minmax_init_rmpg;
+
+#define SizeOfMinmaxInitRmpg	(offsetof(xl_minmax_init_rmpg, blkno) + \
+								 sizeof(BlockNumber))
+
+
+extern void minmax_desc(StringInfo buf, XLogRecord *record);
+extern void minmax_redo(XLogRecPtr lsn, XLogRecord *record);
+
+#endif	/* MINMAX_XLOG_H */
diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h
index c226448..985d435 100644
--- a/src/include/access/reloptions.h
+++ b/src/include/access/reloptions.h
@@ -45,8 +45,9 @@ typedef enum relopt_kind
 	RELOPT_KIND_TABLESPACE = (1 << 7),
 	RELOPT_KIND_SPGIST = (1 << 8),
 	RELOPT_KIND_VIEW = (1 << 9),
+	RELOPT_KIND_MINMAX = (1 << 10),
 	/* if you add a new kind, make sure you update "last_default" too */
-	RELOPT_KIND_LAST_DEFAULT = RELOPT_KIND_VIEW,
+	RELOPT_KIND_LAST_DEFAULT = RELOPT_KIND_MINMAX,
 	/* some compilers treat enums as signed ints, so we can't use 1 << 31 */
 	RELOPT_KIND_MAX = (1 << 30)
 } relopt_kind;
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 8a57698..8beb1be 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -35,8 +35,10 @@ typedef struct HeapScanDescData
 	bool		rs_temp_snap;	/* unregister snapshot at scan end? */
 
 	/* state set up at initscan time */
-	BlockNumber rs_nblocks;		/* number of blocks to scan */
+	BlockNumber rs_nblocks;		/* total number of blocks in rel */
 	BlockNumber rs_startblock;	/* block # to start at */
+	BlockNumber	rs_initblock;	/* block # to consider initial of rel */
+	BlockNumber	rs_numblocks;	/* number of blocks to scan */
 	BufferAccessStrategy rs_strategy;	/* access strategy for reads */
 	bool		rs_syncscan;	/* report location to syncscan logic? */
 
diff --git a/src/include/access/rmgrlist.h b/src/include/access/rmgrlist.h
index 662fb77..9dc995a 100644
--- a/src/include/access/rmgrlist.h
+++ b/src/include/access/rmgrlist.h
@@ -42,3 +42,4 @@ PG_RMGR(RM_GIN_ID, "Gin", gin_redo, gin_desc, gin_xlog_startup, gin_xlog_cleanup
 PG_RMGR(RM_GIST_ID, "Gist", gist_redo, gist_desc, gist_xlog_startup, gist_xlog_cleanup)
 PG_RMGR(RM_SEQ_ID, "Sequence", seq_redo, seq_desc, NULL, NULL)
 PG_RMGR(RM_SPGIST_ID, "SPGist", spg_redo, spg_desc, spg_xlog_startup, spg_xlog_cleanup)
+PG_RMGR(RM_MINMAX_ID, "MinMax", minmax_redo, minmax_desc, NULL, NULL)
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index 006b180..de90178 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -97,6 +97,14 @@ extern double IndexBuildHeapScan(Relation heapRelation,
 				   bool allow_sync,
 				   IndexBuildCallback callback,
 				   void *callback_state);
+extern double IndexBuildHeapRangeScan(Relation heapRelation,
+						Relation indexRelation,
+						IndexInfo *indexInfo,
+						bool allow_sync,
+						BlockNumber start_blockno,
+						BlockNumber end_blockno,
+						IndexBuildCallback callback,
+						void *callback_state);
 
 extern void validate_index(Oid heapId, Oid indexId, Snapshot snapshot);
 
diff --git a/src/include/catalog/pg_am.h b/src/include/catalog/pg_am.h
index 759ea70..3010120 100644
--- a/src/include/catalog/pg_am.h
+++ b/src/include/catalog/pg_am.h
@@ -132,5 +132,7 @@ DESCR("GIN index access method");
 DATA(insert OID = 4000 (  spgist	0 5 f f f f f t f t f f f 0 spginsert spgbeginscan spggettuple spggetbitmap spgrescan spgendscan spgmarkpos spgrestrpos spgbuild spgbuildempty spgbulkdelete spgvacuumcleanup spgcanreturn spgcostestimate spgoptions ));
 DESCR("SP-GiST index access method");
 #define SPGIST_AM_OID 4000
+DATA(insert OID = 3580 (  minmax	5 7 f f f f t t f t t f f 0 mminsert mmbeginscan - mmgetbitmap mmrescan mmendscan mmmarkpos mmrestrpos mmbuild mmbuildempty mmbulkdelete mmvacuumcleanup - mmcostestimate mmoptions ));
+#define MINMAX_AM_OID 3580
 
 #endif   /* PG_AM_H */
diff --git a/src/include/catalog/pg_amop.h b/src/include/catalog/pg_amop.h
index 3ef5a49..4df4b7a 100644
--- a/src/include/catalog/pg_amop.h
+++ b/src/include/catalog/pg_amop.h
@@ -845,4 +845,85 @@ DATA(insert (	3550	869 869 25 s	932 783 0 ));
 DATA(insert (	3550	869 869 26 s	933 783 0 ));
 DATA(insert (	3550	869 869 27 s	934 783 0 ));
 
+/*
+ * MinMax int4_ops
+ */
+DATA(insert (	4054     23   23 1 s	  97	3580 0 ));
+DATA(insert (	4054     23   23 2 s	 523	3580 0 ));
+DATA(insert (	4054     23   23 3 s	  96	3580 0 ));
+DATA(insert (	4054     23   23 4 s	 525	3580 0 ));
+DATA(insert (	4054     23   23 5 s	 521	3580 0 ));
+
+/*
+ * MinMax numeric_ops
+ */
+DATA(insert (	4055   1700 1700 1 s	1754	3580 0 ));
+DATA(insert (	4055   1700 1700 2 s	1755	3580 0 ));
+DATA(insert (	4055   1700 1700 3 s	1752	3580 0 ));
+DATA(insert (	4055   1700 1700 4 s	1757	3580 0 ));
+DATA(insert (	4055   1700 1700 5 s	1756	3580 0 ));
+
+/*
+ * MinMax text_ops
+ */
+DATA(insert (	4056     25   25 1 s	 664	3580 0 ));
+DATA(insert (	4056     25   25 2 s	 665	3580 0 ));
+DATA(insert (	4056     25   25 3 s	  98	3580 0 ));
+DATA(insert (	4056     25   25 4 s	 667	3580 0 ));
+DATA(insert (	4056     25   25 5 s	 666	3580 0 ));
+
+/*
+ * MinMax time_ops
+ */
+DATA(insert (	4057   1083 1083 1 s	1110	3580 0 ));
+DATA(insert (	4057   1083 1083 2 s	1111	3580 0 ));
+DATA(insert (	4057   1083 1083 3 s	1108	3580 0 ));
+DATA(insert (	4057   1083 1083 4 s	1113	3580 0 ));
+DATA(insert (	4057   1083 1083 5 s	1112	3580 0 ));
+
+/*
+ * MinMax timetz_ops
+ */
+DATA(insert (	4058   1266 1266 1 s	1552	3580 0 ));
+DATA(insert (	4058   1266 1266 2 s	1553	3580 0 ));
+DATA(insert (	4058   1266 1266 3 s	1550	3580 0 ));
+DATA(insert (	4058   1266 1266 4 s	1555	3580 0 ));
+DATA(insert (	4058   1266 1266 5 s	1554	3580 0 ));
+
+/*
+ * MinMax timestamp_ops
+ */
+DATA(insert (	4059   1114 1114 1 s	2062	3580 0 ));
+DATA(insert (	4059   1114 1114 2 s	2063	3580 0 ));
+DATA(insert (	4059   1114 1114 3 s	2060	3580 0 ));
+DATA(insert (	4059   1114 1114 4 s	2065	3580 0 ));
+DATA(insert (	4059   1114 1114 5 s	2064	3580 0 ));
+
+/*
+ * MinMax timestamptz_ops
+ */
+DATA(insert (	4060   1184 1184 1 s	1322	3580 0 ));
+DATA(insert (	4060   1184 1184 2 s	1323	3580 0 ));
+DATA(insert (	4060   1184 1184 3 s	1320	3580 0 ));
+DATA(insert (	4060   1184 1184 4 s	1325	3580 0 ));
+DATA(insert (	4060   1184 1184 5 s	1324	3580 0 ));
+
+/*
+ * MinMax date_ops
+ */
+DATA(insert (	4061   1082 1082 1 s	1095	3580 0 ));
+DATA(insert (	4061   1082 1082 2 s	1096	3580 0 ));
+DATA(insert (	4061   1082 1082 3 s	1093	3580 0 ));
+DATA(insert (	4061   1082 1082 4 s	1098	3580 0 ));
+DATA(insert (	4061   1082 1082 5 s	1097	3580 0 ));
+
+/*
+ * MinMax char_ops
+ */
+DATA(insert (	4062     18   18 1 s	 631	3580 0 ));
+DATA(insert (	4062     18   18 2 s	 632	3580 0 ));
+DATA(insert (	4062     18   18 3 s	  92	3580 0 ));
+DATA(insert (	4062     18   18 4 s	 634	3580 0 ));
+DATA(insert (	4062     18   18 5 s	 633	3580 0 ));
+
 #endif   /* PG_AMOP_H */
diff --git a/src/include/catalog/pg_amproc.h b/src/include/catalog/pg_amproc.h
index 10a47df..9eb2456 100644
--- a/src/include/catalog/pg_amproc.h
+++ b/src/include/catalog/pg_amproc.h
@@ -431,4 +431,77 @@ DATA(insert (	4017   25 25 3 4029 ));
 DATA(insert (	4017   25 25 4 4030 ));
 DATA(insert (	4017   25 25 5 4031 ));
 
+/* minmax */
+DATA(insert (   4054   23 23 1 3383 ));
+DATA(insert (   4054   23 23 2 3384 ));
+DATA(insert (   4054   23 23 3 3385 ));
+DATA(insert (   4054   23 23 4   66 ));
+DATA(insert (   4054   23 23 5  149 ));
+DATA(insert (   4054   23 23 6  150 ));
+DATA(insert (   4054   23 23 7  147 ));
+
+DATA(insert (   4055   1700 1700 1 3383 ));
+DATA(insert (   4055   1700 1700 2 3384 ));
+DATA(insert (   4055   1700 1700 3 3385 ));
+DATA(insert (   4055   1700 1700 4 1722 ));
+DATA(insert (   4055   1700 1700 5 1723 ));
+DATA(insert (   4055   1700 1700 6 1721 ));
+DATA(insert (   4055   1700 1700 7 1720 ));
+
+DATA(insert (   4056   25 25 1 3383 ));
+DATA(insert (   4056   25 25 2 3384 ));
+DATA(insert (   4056   25 25 3 3385 ));
+DATA(insert (   4056   25 25 4  740 ));
+DATA(insert (   4056   25 25 5  741 ));
+DATA(insert (   4056   25 25 6  743 ));
+DATA(insert (   4056   25 25 7  742 ));
+
+DATA(insert (   4057   1083 1083 1 3383 ));
+DATA(insert (   4057   1083 1083 2 3384 ));
+DATA(insert (   4057   1083 1083 3 3385 ));
+DATA(insert (   4057   1083 1083 4 1102 ));
+DATA(insert (   4057   1083 1083 5 1103 ));
+DATA(insert (   4057   1083 1083 6 1105 ));
+DATA(insert (   4057   1083 1083 7 1104 ));
+
+DATA(insert (   4058   1266 1266 1 3383 ));
+DATA(insert (   4058   1266 1266 2 3384 ));
+DATA(insert (   4058   1266 1266 3 3385 ));
+DATA(insert (   4058   1266 1266 4 1354 ));
+DATA(insert (   4058   1266 1266 5 1355 ));
+DATA(insert (   4058   1266 1266 6 1356 ));
+DATA(insert (   4058   1266 1266 7 1357 ));
+
+DATA(insert (   4059   1114 1114 1 3383 ));
+DATA(insert (   4059   1114 1114 2 3384 ));
+DATA(insert (   4059   1114 1114 3 3385 ));
+DATA(insert (   4059   1114 1114 4 2054 ));
+DATA(insert (   4059   1114 1114 5 2055 ));
+DATA(insert (   4059   1114 1114 6 2056 ));
+DATA(insert (   4059   1114 1114 7 2057 ));
+
+DATA(insert (   4060   1184 1184 1 3383 ));
+DATA(insert (   4060   1184 1184 2 3384 ));
+DATA(insert (   4060   1184 1184 3 3385 ));
+DATA(insert (   4060   1184 1184 4 1154 ));
+DATA(insert (   4060   1184 1184 5 1155 ));
+DATA(insert (   4060   1184 1184 6 1156 ));
+DATA(insert (   4060   1184 1184 7 1157 ));
+
+DATA(insert (   4061   1082 1082 1 3383 ));
+DATA(insert (   4061   1082 1082 2 3384 ));
+DATA(insert (   4061   1082 1082 3 3385 ));
+DATA(insert (   4061   1082 1082 4 1087 ));
+DATA(insert (   4061   1082 1082 5 1088 ));
+DATA(insert (   4061   1082 1082 6 1090 ));
+DATA(insert (   4061   1082 1082 7 1089 ));
+
+DATA(insert (   4062   18 18 1 3383 ));
+DATA(insert (   4062   18 18 2 3384 ));
+DATA(insert (   4062   18 18 3 3385 ));
+DATA(insert (   4062   18 18 4 1246 ));
+DATA(insert (   4062   18 18 5   72 ));
+DATA(insert (   4062   18 18 6   74 ));
+DATA(insert (   4062   18 18 7   73 ));
+
 #endif   /* PG_AMPROC_H */
diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h
index dc52341..70e21ce 100644
--- a/src/include/catalog/pg_opclass.h
+++ b/src/include/catalog/pg_opclass.h
@@ -235,5 +235,14 @@ DATA(insert (	403		jsonb_ops			PGNSP PGUID 4033  3802 t 0 ));
 DATA(insert (	405		jsonb_ops			PGNSP PGUID 4034  3802 t 0 ));
 DATA(insert (	2742	jsonb_ops			PGNSP PGUID 4036  3802 t 25 ));
 DATA(insert (	2742	jsonb_path_ops		PGNSP PGUID 4037  3802 f 23 ));
+DATA(insert (	3580	int4_ops			PGNSP PGUID 4054    23 t 0 ));
+DATA(insert (	3580	numeric_ops			PGNSP PGUID 4055  1700 t 0 ));
+DATA(insert (	3580	text_ops			PGNSP PGUID 4056    25 t 0 ));
+DATA(insert (	3580	time_ops			PGNSP PGUID 4057  1083 t 0 ));
+DATA(insert (	3580	timetz_ops			PGNSP PGUID 4058  1266 t 0 ));
+DATA(insert (	3580	timestamp_ops		PGNSP PGUID 4059  1114 t 0 ));
+DATA(insert (	3580	timestamptz_ops		PGNSP PGUID 4060  1184 t 0 ));
+DATA(insert (	3580	date_ops			PGNSP PGUID 4061  1082 t 0 ));
+DATA(insert (	3580	char_ops			PGNSP PGUID 4062    18 t 0 ));
 
 #endif   /* PG_OPCLASS_H */
diff --git a/src/include/catalog/pg_opfamily.h b/src/include/catalog/pg_opfamily.h
index 26297ce..08ea569 100644
--- a/src/include/catalog/pg_opfamily.h
+++ b/src/include/catalog/pg_opfamily.h
@@ -157,4 +157,14 @@ DATA(insert OID = 4035 (	783		jsonb_ops		PGNSP PGUID ));
 DATA(insert OID = 4036 (	2742	jsonb_ops		PGNSP PGUID ));
 DATA(insert OID = 4037 (	2742	jsonb_path_ops	PGNSP PGUID ));
 
+DATA(insert OID = 4054 (	3580	int4_ops		PGNSP PGUID ));
+DATA(insert OID = 4055 (	3580	numeric_ops		PGNSP PGUID ));
+DATA(insert OID = 4056 (	3580	text_ops		PGNSP PGUID ));
+DATA(insert OID = 4057 (	3580	time_ops	PGNSP PGUID ));
+DATA(insert OID = 4058 (	3580	timetz_ops	PGNSP PGUID ));
+DATA(insert OID = 4059 (	3580	timestamp_ops	PGNSP PGUID ));
+DATA(insert OID = 4060 (	3580	timestamptz_ops	PGNSP PGUID ));
+DATA(insert OID = 4061 (	3580	date_ops	PGNSP PGUID ));
+DATA(insert OID = 4062 (    3580    char_ops    PGNSP PGUID ));
+
 #endif   /* PG_OPFAMILY_H */
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 0af1248..433a442 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -565,6 +565,34 @@ DESCR("btree(internal)");
 DATA(insert OID = 2785 (  btoptions		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 17 "1009 16" _null_ _null_ _null_ _null_  btoptions _null_ _null_ _null_ ));
 DESCR("btree(internal)");
 
+DATA(insert OID = 3789 (  mmgetbitmap	   PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 20 "2281 2281" _null_ _null_ _null_ _null_	mmgetbitmap _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3790 (  mminsert		   PGNSP PGUID 12 1 0 0 0 f f f f t f v 6 0 16 "2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_	mminsert _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3791 (  mmbeginscan	   PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_	mmbeginscan _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3792 (  mmrescan		   PGNSP PGUID 12 1 0 0 0 f f f f t f v 5 0 2278 "2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ mmrescan _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3793 (  mmendscan		   PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ mmendscan _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3794 (  mmmarkpos		   PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ mmmarkpos _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3795 (  mmrestrpos		   PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ mmrestrpos _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3796 (  mmbuild		   PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ mmbuild _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3797 (  mmbuildempty	   PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ mmbuildempty _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3798 (  mmbulkdelete	   PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 2281 "2281 2281 2281 2281" _null_ _null_ _null_ _null_ mmbulkdelete _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3799 (  mmvacuumcleanup   PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ mmvacuumcleanup _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3800 (  mmcostestimate   PGNSP PGUID 12 1 0 0 0 f f f f t f v 7 0 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ mmcostestimate _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+DATA(insert OID = 3801 (  mmoptions		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 17 "1009 16" _null_ _null_ _null_ _null_  mmoptions _null_ _null_ _null_ ));
+DESCR("minmax(internal)");
+
+
 DATA(insert OID = 339 (  poly_same		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "604 604" _null_ _null_ _null_ _null_ poly_same _null_ _null_ _null_ ));
 DATA(insert OID = 340 (  poly_contain	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "604 604" _null_ _null_ _null_ _null_ poly_contain _null_ _null_ _null_ ));
 DATA(insert OID = 341 (  poly_left		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "604 604" _null_ _null_ _null_ _null_ poly_left _null_ _null_ _null_ ));
@@ -4064,6 +4092,14 @@ DATA(insert OID = 2747 (  arrayoverlap		   PGNSP PGUID 12 1 0 0 0 f f f f t f i
 DATA(insert OID = 2748 (  arraycontains		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ arraycontains _null_ _null_ _null_ ));
 DATA(insert OID = 2749 (  arraycontained	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ arraycontained _null_ _null_ _null_ ));
 
+/* Minmax */
+DATA(insert OID = 3383 ( minmax_sortable_opcinfo PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ mmSortableOpcInfo _null_ _null_ _null_ ));
+DESCR("MinMax sortable datatype support");
+DATA(insert OID = 3384 ( minmax_sortable_add_value PGNSP PGUID 12 1 0 0 0 f f f f t f i 5 0 16 "2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ mmSortableAddValue _null_ _null_ _null_ ));
+DESCR("MinMax sortable datatype support");
+DATA(insert OID = 3385 ( minmax_sortable_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 5 0 16 "2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ mmSortableConsistent _null_ _null_ _null_ ));
+DESCR("MinMax sortable datatype support");
+
 /* userlock replacements */
 DATA(insert OID = 2880 (  pg_advisory_lock				PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "20" _null_ _null_ _null_ _null_ pg_advisory_lock_int8 _null_ _null_ _null_ ));
 DESCR("obtain exclusive advisory lock");
diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h
index d96e375..7801c85 100644
--- a/src/include/storage/bufpage.h
+++ b/src/include/storage/bufpage.h
@@ -393,6 +393,8 @@ extern void PageInit(Page page, Size pageSize, Size specialSize);
 extern bool PageIsVerified(Page page, BlockNumber blkno);
 extern OffsetNumber PageAddItem(Page page, Item item, Size size,
 			OffsetNumber offsetNumber, bool overwrite, bool is_heap);
+extern void PageOverwriteItemData(Page page, OffsetNumber offset, Item item,
+					  Size size);
 extern Page PageGetTempPage(Page page);
 extern Page PageGetTempPageCopy(Page page);
 extern Page PageGetTempPageCopySpecial(Page page);
@@ -403,6 +405,8 @@ extern Size PageGetExactFreeSpace(Page page);
 extern Size PageGetHeapFreeSpace(Page page);
 extern void PageIndexTupleDelete(Page page, OffsetNumber offset);
 extern void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems);
+extern void PageIndexDeleteNoCompact(Page page, OffsetNumber *itemnos,
+						 int nitems);
 extern char *PageSetChecksumCopy(Page page, BlockNumber blkno);
 extern void PageSetChecksumInplace(Page page, BlockNumber blkno);
 
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index 0f662ec..7482252 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -195,6 +195,7 @@ extern Datum hashcostestimate(PG_FUNCTION_ARGS);
 extern Datum gistcostestimate(PG_FUNCTION_ARGS);
 extern Datum spgcostestimate(PG_FUNCTION_ARGS);
 extern Datum gincostestimate(PG_FUNCTION_ARGS);
+extern Datum mmcostestimate(PG_FUNCTION_ARGS);
 
 /* Functions in array_selfuncs.c */
 
diff --git a/src/test/regress/expected/opr_sanity.out b/src/test/regress/expected/opr_sanity.out
index c04cddc..0ce2739 100644
--- a/src/test/regress/expected/opr_sanity.out
+++ b/src/test/regress/expected/opr_sanity.out
@@ -1591,6 +1591,11 @@ ORDER BY 1, 2, 3;
        2742 |            9 | ?
        2742 |           10 | ?|
        2742 |           11 | ?&
+       3580 |            1 | <
+       3580 |            2 | <=
+       3580 |            3 | =
+       3580 |            4 | >=
+       3580 |            5 | >
        4000 |            1 | <<
        4000 |            1 | ~<~
        4000 |            2 | &<
@@ -1613,7 +1618,7 @@ ORDER BY 1, 2, 3;
        4000 |           15 | >
        4000 |           16 | @>
        4000 |           18 | =
-(80 rows)
+(85 rows)
 
 -- Check that all opclass search operators have selectivity estimators.
 -- This is not absolutely required, but it seems a reasonable thing
@@ -1775,11 +1780,13 @@ WHERE NOT (
   -- GIN has six support functions. 1-3 are mandatory, 5 is optional, and
   --   at least one of 4 and 6 must be given.
   -- SP-GiST has five support functions, all mandatory
+  -- MinMax has seven support functions, all mandatory
   amname = 'btree' AND procnums @> '{1}' OR
   amname = 'hash' AND procnums = '{1}' OR
   amname = 'gist' AND procnums @> '{1, 2, 3, 4, 5, 6, 7}' OR
   amname = 'gin' AND (procnums @> '{1, 2, 3}' AND (procnums && '{4, 6}')) OR
-  amname = 'spgist' AND procnums = '{1, 2, 3, 4, 5}'
+  amname = 'spgist' AND procnums = '{1, 2, 3, 4, 5}' OR
+  amname = 'minmax' AND procnums = '{1, 2, 3, 4, 5, 6, 7}'
 );
  amname | opfname | amproclefttype | amprocrighttype | procnums 
 --------+---------+----------------+-----------------+----------
@@ -1800,7 +1807,8 @@ WHERE NOT (
   amname = 'hash' AND procnums = '{1}' OR
   amname = 'gist' AND procnums @> '{1, 2, 3, 4, 5, 6, 7}' OR
   amname = 'gin' AND (procnums @> '{1, 2, 3}' AND (procnums && '{4, 6}')) OR
-  amname = 'spgist' AND procnums = '{1, 2, 3, 4, 5}'
+  amname = 'spgist' AND procnums = '{1, 2, 3, 4, 5}' OR
+  amname = 'minmax' AND procnums = '{1, 2, 3, 4, 5, 6, 7}'
 );
  amname | opcname | procnums 
 --------+---------+----------
diff --git a/src/test/regress/sql/opr_sanity.sql b/src/test/regress/sql/opr_sanity.sql
index 213a66d..6670661 100644
--- a/src/test/regress/sql/opr_sanity.sql
+++ b/src/test/regress/sql/opr_sanity.sql
@@ -1178,11 +1178,13 @@ WHERE NOT (
   -- GIN has six support functions. 1-3 are mandatory, 5 is optional, and
   --   at least one of 4 and 6 must be given.
   -- SP-GiST has five support functions, all mandatory
+  -- MinMax has seven support functions, all mandatory
   amname = 'btree' AND procnums @> '{1}' OR
   amname = 'hash' AND procnums = '{1}' OR
   amname = 'gist' AND procnums @> '{1, 2, 3, 4, 5, 6, 7}' OR
   amname = 'gin' AND (procnums @> '{1, 2, 3}' AND (procnums && '{4, 6}')) OR
-  amname = 'spgist' AND procnums = '{1, 2, 3, 4, 5}'
+  amname = 'spgist' AND procnums = '{1, 2, 3, 4, 5}' OR
+  amname = 'minmax' AND procnums = '{1, 2, 3, 4, 5, 6, 7}'
 );
 
 -- Also, check if there are any pg_opclass entries that don't seem to have
@@ -1201,7 +1203,8 @@ WHERE NOT (
   amname = 'hash' AND procnums = '{1}' OR
   amname = 'gist' AND procnums @> '{1, 2, 3, 4, 5, 6, 7}' OR
   amname = 'gin' AND (procnums @> '{1, 2, 3}' AND (procnums && '{4, 6}')) OR
-  amname = 'spgist' AND procnums = '{1, 2, 3, 4, 5}'
+  amname = 'spgist' AND procnums = '{1, 2, 3, 4, 5}' OR
+  amname = 'minmax' AND procnums = '{1, 2, 3, 4, 5, 6, 7}'
 );
 
 -- Unfortunately, we can't check the amproc link very well because the
