From 81f838d81f15e9b8e91cf51efacb2164a20bf55d Mon Sep 17 00:00:00 2001
From: Peter Geoghegan <pg@heroku.com>
Date: Wed, 27 Aug 2014 15:01:32 -0700
Subject: [PATCH 2/6] Support INSERT ... ON CONFLICT {UPDATE | IGNORE}

This non-standard INSERT clause allows DML statement authors to specify
that in the event of each of any of the tuples being inserted
duplicating an existing tuple in terms of a value or set of values
constrained by a unique index, an alternative path may be taken.  The
statement may alternatively IGNORE the tuple being inserted without
raising an error, or go to UPDATE the existing tuple whose value is
duplicated by a value within one single tuple proposed for insertion.
The implementation loops until either an insert or an UPDATE/IGNORE
occurs.  No existing tuple may be affected more than once per INSERT.

This is implemented using a new infrastructure called "speculative
insertion".  (The approach to "Value locking" presenting here follows
design #2, as described on the value locking Postgres Wiki page).
Alternatively, we may go to UPDATE, using the EvalPlanQual() mechanism
to execute a special auxiliary plan.

READ COMMITTED isolation level is permitted to UPDATE a tuple even where
no version is visible to the command's MVCC snapshot.  Similarly, any
query predicate associated with the UPDATE portion of the new statement
need only satisfy an already locked, conclusively committed and visible
conflict tuple.  When the predicate isn't satisfied, the tuple is still
locked, which implies that at READ COMMITTED, a tuple may be locked
without any version being visible to the command's MVCC snapshot.

Users specify a single unique index to take the alternative path on,
which is inferred from a set of user-supplied column names (or
expressions).  This is mandatory for the ON CONFLICT UPDATE variant,
which should address concerns about spuriously taking an incorrect
alternative ON CONFLICT path (i.e. the wrong unique index is used for
arbitration of whether or not to take the alternative path) due to there
being more than one would-be unique violation.  Previous revisions of
the patch didn't mandate this.  However, we may still IGNORE based on
the first would-be unique violation detected, on the assumption that it
doesn't particularly matter where it originated from for that variant
(iff the user didn't make a point of indicated his or her intent).

The auxiliary ModifyTable plan used by the UPDATE portion of the new
statement is not formally a subplan of its parent INSERT ModifyTable
plan.  Rather, it's an independently planned subquery, whose execution
is tightly driven by its parent.  Special auxiliary state pertaining to
the auxiliary UPDATE is tracked by its parent through all stages of
query execution.

The implementation imposes some restrictions on child auxiliary UPDATE
plans, which make the plans comport with their parent to the extent
required during the executor stage.  One user-visible consequences of
this is that the special auxiliary UPDATE query cannot have subselects
within its targetlist or WHERE clause.  UPDATEs may not reference any
other table, and UPDATE FROM is disallowed.  INSERT's RETURNING clause
projects tuples successfully inserted and updated.  An INSERT with an ON
CONFLICT UPDATE clause processes all slots that are ultimately affected,
regardless of whether or not the alternative ON CONFLICT UPDATE path was
taken.  However, if an ON CONFLICT UPDATE's WHERE clause is not
satisfied in respect of some slot/tuple, the post-update tuple is not
projected (although the row is still locked, just as before).

Note that pg_stat_statements does not fingerprint ExludedExpr, because
it cannot appear in the post-parse-analysis, pre-rewrite Query tree.
(pg_stat_statements does not fingerprint every primnode anyway, mostly
because some are only expected in utility statements).  Other existing
Node handling sites that don't expect to see primnodes that appear only
after rewriting (ExcludedExpr may be in its own subcategory here in that
it is the only such non-utility related Node) do not have an
ExcludedExpr case added either.
---
 contrib/pg_stat_statements/pg_stat_statements.c |   5 +
 contrib/postgres_fdw/deparse.c                  |   7 +-
 contrib/postgres_fdw/postgres_fdw.c             |  16 +-
 contrib/postgres_fdw/postgres_fdw.h             |   2 +-
 src/backend/access/heap/heapam.c                |  97 ++++-
 src/backend/access/nbtree/nbtinsert.c           |  32 +-
 src/backend/catalog/index.c                     |  59 ++-
 src/backend/catalog/indexing.c                  |   2 +-
 src/backend/commands/constraint.c               |   7 +-
 src/backend/commands/copy.c                     |   7 +-
 src/backend/commands/explain.c                  |  87 ++++-
 src/backend/executor/execMain.c                 |  14 +-
 src/backend/executor/execQual.c                 |  54 +++
 src/backend/executor/execUtils.c                | 257 +++++++++++--
 src/backend/executor/nodeLockRows.c             |   9 +-
 src/backend/executor/nodeModifyTable.c          | 464 +++++++++++++++++++++++-
 src/backend/nodes/copyfuncs.c                   |  55 +++
 src/backend/nodes/equalfuncs.c                  |  43 +++
 src/backend/nodes/nodeFuncs.c                   |  74 ++++
 src/backend/nodes/outfuncs.c                    |  18 +
 src/backend/nodes/readfuncs.c                   |  19 +
 src/backend/optimizer/path/indxpath.c           |  57 +++
 src/backend/optimizer/path/tidpath.c            |   8 +-
 src/backend/optimizer/plan/createplan.c         |  16 +-
 src/backend/optimizer/plan/planner.c            |  53 +++
 src/backend/optimizer/plan/setrefs.c            |  31 +-
 src/backend/optimizer/plan/subselect.c          |   6 +
 src/backend/optimizer/util/plancat.c            | 222 +++++++++++-
 src/backend/parser/analyze.c                    |  86 ++++-
 src/backend/parser/gram.y                       |  75 +++-
 src/backend/parser/parse_clause.c               | 258 +++++++++++--
 src/backend/parser/parse_expr.c                 |   6 +-
 src/backend/parser/parse_node.c                 |   8 +-
 src/backend/rewrite/rewriteHandler.c            | 127 ++++++-
 src/backend/storage/ipc/procarray.c             | 109 ++++++
 src/backend/storage/lmgr/lmgr.c                 |  68 ++++
 src/backend/tcop/pquery.c                       |  16 +-
 src/backend/utils/adt/lockfuncs.c               |   1 +
 src/backend/utils/adt/ruleutils.c               |  39 ++
 src/backend/utils/time/tqual.c                  |  52 +++
 src/bin/psql/common.c                           |   5 +-
 src/include/access/heapam.h                     |   3 +-
 src/include/access/heapam_xlog.h                |   2 +
 src/include/catalog/index.h                     |   2 +
 src/include/executor/executor.h                 |  21 +-
 src/include/nodes/execnodes.h                   |  19 +
 src/include/nodes/nodes.h                       |  18 +
 src/include/nodes/parsenodes.h                  |  40 +-
 src/include/nodes/plannodes.h                   |   3 +
 src/include/nodes/primnodes.h                   |  47 +++
 src/include/optimizer/paths.h                   |   1 +
 src/include/optimizer/plancat.h                 |   2 +
 src/include/optimizer/planmain.h                |   3 +-
 src/include/parser/kwlist.h                     |   2 +
 src/include/parser/parse_clause.h               |   2 +
 src/include/parser/parse_node.h                 |   1 +
 src/include/storage/lmgr.h                      |   5 +
 src/include/storage/lock.h                      |  10 +
 src/include/storage/proc.h                      |  13 +
 src/include/storage/procarray.h                 |   7 +
 src/include/utils/snapshot.h                    |  11 +
 61 files changed, 2624 insertions(+), 159 deletions(-)

diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 95616b3..414ec83 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -2198,6 +2198,11 @@ JumbleQuery(pgssJumbleState *jstate, Query *query)
 	JumbleRangeTable(jstate, query->rtable);
 	JumbleExpr(jstate, (Node *) query->jointree);
 	JumbleExpr(jstate, (Node *) query->targetList);
+	APP_JUMB(query->specClause);
+	JumbleExpr(jstate, (Node *) query->arbiterExpr);
+	JumbleExpr(jstate, query->arbiterWhere);
+	if (query->onConflict)
+		JumbleQuery(jstate, (Query *) query->onConflict);
 	JumbleExpr(jstate, (Node *) query->returningList);
 	JumbleExpr(jstate, (Node *) query->groupClause);
 	JumbleExpr(jstate, query->havingQual);
diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c
index 59cb053..ca51586 100644
--- a/contrib/postgres_fdw/deparse.c
+++ b/contrib/postgres_fdw/deparse.c
@@ -847,8 +847,8 @@ appendWhereClause(StringInfo buf,
 void
 deparseInsertSql(StringInfo buf, PlannerInfo *root,
 				 Index rtindex, Relation rel,
-				 List *targetAttrs, List *returningList,
-				 List **retrieved_attrs)
+				 List *targetAttrs, bool ignore,
+				 List *returningList, List **retrieved_attrs)
 {
 	AttrNumber	pindex;
 	bool		first;
@@ -892,6 +892,9 @@ deparseInsertSql(StringInfo buf, PlannerInfo *root,
 	else
 		appendStringInfoString(buf, " DEFAULT VALUES");
 
+	if (ignore)
+		appendStringInfoString(buf, " ON CONFLICT IGNORE");
+
 	deparseReturningList(buf, root, rtindex, rel,
 					   rel->trigdesc && rel->trigdesc->trig_insert_after_row,
 						 returningList, retrieved_attrs);
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index d76e739..1539899 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -1167,6 +1167,7 @@ postgresPlanForeignModify(PlannerInfo *root,
 	List	   *targetAttrs = NIL;
 	List	   *returningList = NIL;
 	List	   *retrieved_attrs = NIL;
+	bool		ignore = false;
 
 	initStringInfo(&sql);
 
@@ -1201,7 +1202,7 @@ postgresPlanForeignModify(PlannerInfo *root,
 		int			col;
 
 		col = -1;
-		while ((col = bms_next_member(rte->modifiedCols, col)) >= 0)
+		while ((col = bms_next_member(rte->updatedCols, col)) >= 0)
 		{
 			/* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */
 			AttrNumber	attno = col + FirstLowInvalidHeapAttributeNumber;
@@ -1218,6 +1219,17 @@ postgresPlanForeignModify(PlannerInfo *root,
 	if (plan->returningLists)
 		returningList = (List *) list_nth(plan->returningLists, subplan_index);
 
+	if (root->parse->arbiterExpr)
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("postgres_fdw does not support ON CONFLICT unique index inference")));
+	else if (plan->spec == SPEC_INSERT)
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("postgres_fdw does not support ON CONFLICT UPDATE")));
+	else if (plan->spec == SPEC_IGNORE)
+		ignore = true;
+
 	/*
 	 * Construct the SQL command string.
 	 */
@@ -1225,7 +1237,7 @@ postgresPlanForeignModify(PlannerInfo *root,
 	{
 		case CMD_INSERT:
 			deparseInsertSql(&sql, root, resultRelation, rel,
-							 targetAttrs, returningList,
+							 targetAttrs, ignore, returningList,
 							 &retrieved_attrs);
 			break;
 		case CMD_UPDATE:
diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h
index 950c6f7..3763a57 100644
--- a/contrib/postgres_fdw/postgres_fdw.h
+++ b/contrib/postgres_fdw/postgres_fdw.h
@@ -60,7 +60,7 @@ extern void appendWhereClause(StringInfo buf,
 				  List **params);
 extern void deparseInsertSql(StringInfo buf, PlannerInfo *root,
 				 Index rtindex, Relation rel,
-				 List *targetAttrs, List *returningList,
+				 List *targetAttrs, bool ignore, List *returningList,
 				 List **retrieved_attrs);
 extern void deparseUpdateSql(StringInfo buf, PlannerInfo *root,
 				 Index rtindex, Relation rel,
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 46060bc..0aa3e57 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2048,6 +2048,9 @@ FreeBulkInsertState(BulkInsertState bistate)
  * This causes rows to be frozen, which is an MVCC violation and
  * requires explicit options chosen by user.
  *
+ * If HEAP_INSERT_SPECULATIVE is specified, the MyProc->specInsert fields
+ * are filled.
+ *
  * Note that these options will be applied when inserting into the heap's
  * TOAST table, too, if the tuple requires any out-of-line data.
  *
@@ -2196,6 +2199,13 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 
 	END_CRIT_SECTION();
 
+	/*
+	 * Let others know that we speculatively inserted this tuple, before
+	 * releasing the buffer lock.
+	 */
+	if (options & HEAP_INSERT_SPECULATIVE)
+		SetSpeculativeInsertionTid(relation->rd_node, &heaptup->t_self);
+
 	UnlockReleaseBuffer(buffer);
 	if (vmbuffer != InvalidBuffer)
 		ReleaseBuffer(vmbuffer);
@@ -2616,11 +2626,17 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
  * (the last only for HeapTupleSelfUpdated, since we
  * cannot obtain cmax from a combocid generated by another transaction).
  * See comments for struct HeapUpdateFailureData for additional info.
+ *
+ * If 'killspeculative' is true, caller requires that we "super-delete" a tuple
+ * we just inserted in the same command. Instead of the normal visibility
+ * checks, we check that the tuple was inserted by the current transaction and
+ * given command id.  Also, instead of setting its xmax, we set xmin to
+ * invalid, making it immediately appear as dead to everyone.
  */
 HTSU_Result
 heap_delete(Relation relation, ItemPointer tid,
 			CommandId cid, Snapshot crosscheck, bool wait,
-			HeapUpdateFailureData *hufd)
+			HeapUpdateFailureData *hufd, bool killspeculative)
 {
 	HTSU_Result result;
 	TransactionId xid = GetCurrentTransactionId();
@@ -2678,7 +2694,18 @@ heap_delete(Relation relation, ItemPointer tid,
 	tp.t_self = *tid;
 
 l1:
-	result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
+	if (!killspeculative)
+	{
+		result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
+	}
+	else
+	{
+		if (tp.t_data->t_choice.t_heap.t_xmin != xid ||
+			tp.t_data->t_choice.t_heap.t_field3.t_cid != cid)
+			elog(ERROR, "attempted to super-delete a tuple from other CID");
+		result = HeapTupleMayBeUpdated;
+	}
+
 
 	if (result == HeapTupleInvisible)
 	{
@@ -2823,12 +2850,15 @@ l1:
 	 * using our own TransactionId below, since some other backend could
 	 * incorporate our XID into a MultiXact immediately afterwards.)
 	 */
-	MultiXactIdSetOldestMember();
+	if (!killspeculative)
+	{
+		MultiXactIdSetOldestMember();
 
-	compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
-							  tp.t_data->t_infomask, tp.t_data->t_infomask2,
-							  xid, LockTupleExclusive, true,
-							  &new_xmax, &new_infomask, &new_infomask2);
+		compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
+								  tp.t_data->t_infomask, tp.t_data->t_infomask2,
+								  xid, LockTupleExclusive, true,
+								  &new_xmax, &new_infomask, &new_infomask2);
+	}
 
 	START_CRIT_SECTION();
 
@@ -2855,8 +2885,23 @@ l1:
 	tp.t_data->t_infomask |= new_infomask;
 	tp.t_data->t_infomask2 |= new_infomask2;
 	HeapTupleHeaderClearHotUpdated(tp.t_data);
-	HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
-	HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
+	/*
+	 * When killing a speculatively-inserted tuple, we set xmin to invalid
+	 * instead of setting xmax, to make the tuple clearly invisible to
+	 * everyone. In particular, we want HeapTupleSatisfiesDirty() to regard
+	 * the tuple as dead, so that another backend inserting a duplicate key
+	 * value won't unnecessarily wait for our transaction to finish.
+	 */
+	if (!killspeculative)
+	{
+		HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
+		HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
+	}
+	else
+	{
+		HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
+	}
+
 	/* Make sure there is no forward chain link in t_ctid */
 	tp.t_data->t_ctid = tp.t_self;
 
@@ -2872,7 +2917,11 @@ l1:
 		if (RelationIsAccessibleInLogicalDecoding(relation))
 			log_heap_new_cid(relation, &tp);
 
-		xlrec.flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
+		xlrec.flags = 0;
+		if (all_visible_cleared)
+			xlrec.flags |= XLOG_HEAP_ALL_VISIBLE_CLEARED;
+		if (killspeculative)
+			xlrec.flags |= XLOG_HEAP_KILLED_SPECULATIVE_TUPLE;
 		xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
 											  tp.t_data->t_infomask2);
 		xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
@@ -2977,7 +3026,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
 	result = heap_delete(relation, tid,
 						 GetCurrentCommandId(true), InvalidSnapshot,
 						 true /* wait for commit */ ,
-						 &hufd);
+						 &hufd, false);
 	switch (result)
 	{
 		case HeapTupleSelfUpdated:
@@ -4070,14 +4119,16 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
  *
  * Function result may be:
  *	HeapTupleMayBeUpdated: lock was successfully acquired
+ *	HeapTupleInvisible: lock failed because tuple instantaneously invisible
  *	HeapTupleSelfUpdated: lock failed because tuple updated by self
  *	HeapTupleUpdated: lock failed because tuple updated by other xact
  *	HeapTupleWouldBlock: lock couldn't be acquired and wait_policy is skip
  *
- * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
- * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
- * (the last only for HeapTupleSelfUpdated, since we
- * cannot obtain cmax from a combocid generated by another transaction).
+ * In the failure cases other than HeapTupleInvisible, the routine fills
+ * *hufd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
+ * if necessary), and t_cmax (the last only for HeapTupleSelfUpdated,
+ * since we cannot obtain cmax from a combocid generated by another
+ * transaction).
  * See comments for struct HeapUpdateFailureData for additional info.
  *
  * See README.tuplock for a thorough explanation of this mechanism.
@@ -4115,8 +4166,15 @@ l3:
 
 	if (result == HeapTupleInvisible)
 	{
-		UnlockReleaseBuffer(*buffer);
-		elog(ERROR, "attempted to lock invisible tuple");
+		LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+
+		/*
+		 * This is possible, but only when locking a tuple for speculative
+		 * insertion.  We return this value here rather than throwing an error
+		 * in order to give that case the opportunity to throw a more specific
+		 * error.
+		 */
+		return HeapTupleInvisible;
 	}
 	else if (result == HeapTupleBeingUpdated)
 	{
@@ -7326,7 +7384,10 @@ heap_xlog_delete(XLogReaderState *record)
 		HeapTupleHeaderClearHotUpdated(htup);
 		fix_infomask_from_infobits(xlrec->infobits_set,
 								   &htup->t_infomask, &htup->t_infomask2);
-		HeapTupleHeaderSetXmax(htup, xlrec->xmax);
+		if (!(xlrec->flags & XLOG_HEAP_KILLED_SPECULATIVE_TUPLE))
+			HeapTupleHeaderSetXmax(htup, xlrec->xmax);
+		else
+			HeapTupleHeaderSetXmin(htup, InvalidTransactionId);
 		HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
 
 		/* Mark the page as a candidate for pruning */
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 932c6f7..1a4e18d 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -51,7 +51,8 @@ static Buffer _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
 static TransactionId _bt_check_unique(Relation rel, IndexTuple itup,
 				 Relation heapRel, Buffer buf, OffsetNumber offset,
 				 ScanKey itup_scankey,
-				 IndexUniqueCheck checkUnique, bool *is_unique);
+				 IndexUniqueCheck checkUnique, bool *is_unique,
+				 uint32 *speculativeToken);
 static void _bt_findinsertloc(Relation rel,
 				  Buffer *bufptr,
 				  OffsetNumber *offsetptr,
@@ -159,17 +160,27 @@ top:
 	 */
 	if (checkUnique != UNIQUE_CHECK_NO)
 	{
-		TransactionId xwait;
+		TransactionId	xwait;
+		uint32			speculativeToken;
 
 		offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
 		xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey,
-								 checkUnique, &is_unique);
+								 checkUnique, &is_unique, &speculativeToken);
 
 		if (TransactionIdIsValid(xwait))
 		{
 			/* Have to wait for the other guy ... */
 			_bt_relbuf(rel, buf);
-			XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex);
+			/*
+			 * If it's a speculative insertion, wait for it to finish (ie.
+			 * to go ahead with the insertion, or kill the tuple). Otherwise
+			 * wait for the transaction to finish as usual.
+			 */
+			if (speculativeToken)
+				SpeculativeInsertionWait(xwait, speculativeToken);
+			else
+				XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex);
+
 			/* start over... */
 			_bt_freestack(stack);
 			goto top;
@@ -211,9 +222,12 @@ top:
  * also point to end-of-page, which means that the first tuple to check
  * is the first tuple on the next page.
  *
- * Returns InvalidTransactionId if there is no conflict, else an xact ID
- * we must wait for to see if it commits a conflicting tuple.   If an actual
- * conflict is detected, no return --- just ereport().
+ * Returns InvalidTransactionId if there is no conflict, else an xact ID we
+ * must wait for to see if it commits a conflicting tuple.	If an actual
+ * conflict is detected, no return --- just ereport(). If an xact ID is
+ * returned, and the conflicting tuple still has a speculative insertion in
+ * progress, *speculativeToken is set to non-zero, and the caller can wait for
+ * the verdict on the insertion using SpeculativeInsertionWait().
  *
  * However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
  * InvalidTransactionId because we don't want to wait.  In this case we
@@ -223,7 +237,8 @@ top:
 static TransactionId
 _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
 				 Buffer buf, OffsetNumber offset, ScanKey itup_scankey,
-				 IndexUniqueCheck checkUnique, bool *is_unique)
+				 IndexUniqueCheck checkUnique, bool *is_unique,
+				 uint32 *speculativeToken)
 {
 	TupleDesc	itupdesc = RelationGetDescr(rel);
 	int			natts = rel->rd_rel->relnatts;
@@ -340,6 +355,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
 						if (nbuf != InvalidBuffer)
 							_bt_relbuf(rel, nbuf);
 						/* Tell _bt_doinsert to wait... */
+						*speculativeToken = SnapshotDirty.speculativeToken;
 						return xwait;
 					}
 
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index f85ed93..e986d7e 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1662,6 +1662,10 @@ BuildIndexInfo(Relation index)
 	/* other info */
 	ii->ii_Unique = indexStruct->indisunique;
 	ii->ii_ReadyForInserts = IndexIsReady(indexStruct);
+	/* assume not doing speculative insertion for now */
+	ii->ii_UniqueOps = NULL;
+	ii->ii_UniqueProcs = NULL;
+	ii->ii_UniqueStrats = NULL;
 
 	/* initialize index-build state to default */
 	ii->ii_Concurrent = false;
@@ -1671,6 +1675,53 @@ BuildIndexInfo(Relation index)
 }
 
 /* ----------------
+ *		AddUniqueSpeculative
+ *			Append extra state to IndexInfo record
+ *
+ * For unique indexes, we usually don't want to add info to the IndexInfo for
+ * checking uniqueness, since the B-Tree AM handles that directly.  However, in
+ * the case of speculative insertion, external support is required.
+ *
+ * Do this processing here rather than in BuildIndexInfo() to save the common
+ * non-speculative cases the overhead they'd otherwise incur.
+ * ----------------
+ */
+void
+AddUniqueSpeculative(Relation index, IndexInfo *ii)
+{
+	int			ncols = index->rd_rel->relnatts;
+	int			i;
+
+	/*
+	 * fetch info for checking unique indexes
+	 */
+	Assert(ii->ii_Unique);
+
+	if (index->rd_rel->relam != BTREE_AM_OID)
+		elog(ERROR, "unexpected non-btree speculative unique index");
+
+	ii->ii_UniqueOps = (Oid *) palloc(sizeof(Oid) * ncols);
+	ii->ii_UniqueProcs = (Oid *) palloc(sizeof(Oid) * ncols);
+	ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * ncols);
+
+	/*
+	 * We have to look up the operator's strategy number.  This
+	 * provides a cross-check that the operator does match the index.
+	 */
+	/* We need the func OIDs and strategy numbers too */
+	for (i = 0; i < ncols; i++)
+	{
+		ii->ii_UniqueStrats[i] = BTEqualStrategyNumber;
+		ii->ii_UniqueOps[i] =
+			get_opfamily_member(index->rd_opfamily[i],
+								index->rd_opcintype[i],
+								index->rd_opcintype[i],
+								ii->ii_UniqueStrats[i]);
+		ii->ii_UniqueProcs[i] = get_opcode(ii->ii_UniqueOps[i]);
+	}
+}
+
+/* ----------------
  *		FormIndexDatum
  *			Construct values[] and isnull[] arrays for a new index tuple.
  *
@@ -2606,10 +2657,10 @@ IndexCheckExclusion(Relation heapRelation,
 		/*
 		 * Check that this tuple has no conflicts.
 		 */
-		check_exclusion_constraint(heapRelation,
-								   indexRelation, indexInfo,
-								   &(heapTuple->t_self), values, isnull,
-								   estate, true, false);
+		check_exclusion_or_unique_constraint(heapRelation, indexRelation,
+											 indexInfo, &(heapTuple->t_self),
+											 values, isnull, estate, true,
+											 false, true, NULL);
 	}
 
 	heap_endscan(scan);
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index fe123ad..0231084 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -46,7 +46,7 @@ CatalogOpenIndexes(Relation heapRel)
 	resultRelInfo->ri_RelationDesc = heapRel;
 	resultRelInfo->ri_TrigDesc = NULL;	/* we don't fire triggers */
 
-	ExecOpenIndices(resultRelInfo);
+	ExecOpenIndices(resultRelInfo, false);
 
 	return resultRelInfo;
 }
diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c
index 561d8fa..d5ab12f 100644
--- a/src/backend/commands/constraint.c
+++ b/src/backend/commands/constraint.c
@@ -170,9 +170,10 @@ unique_key_recheck(PG_FUNCTION_ARGS)
 		 * For exclusion constraints we just do the normal check, but now it's
 		 * okay to throw error.
 		 */
-		check_exclusion_constraint(trigdata->tg_relation, indexRel, indexInfo,
-								   &(new_row->t_self), values, isnull,
-								   estate, false, false);
+		check_exclusion_or_unique_constraint(trigdata->tg_relation, indexRel,
+											 indexInfo, &(new_row->t_self),
+											 values, isnull, estate, false,
+											 false, true, NULL);
 	}
 
 	/*
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index d2996fb..2d45eb3 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2283,7 +2283,7 @@ CopyFrom(CopyState cstate)
 					  1,		/* dummy rangetable index */
 					  0);
 
-	ExecOpenIndices(resultRelInfo);
+	ExecOpenIndices(resultRelInfo, false);
 
 	estate->es_result_relations = resultRelInfo;
 	estate->es_num_result_relations = 1;
@@ -2438,7 +2438,8 @@ CopyFrom(CopyState cstate)
 
 				if (resultRelInfo->ri_NumIndices > 0)
 					recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
-														   estate);
+														   estate, false,
+														   InvalidOid);
 
 				/* AFTER ROW INSERT Triggers */
 				ExecARInsertTriggers(estate, resultRelInfo, tuple,
@@ -2552,7 +2553,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
 			ExecStoreTuple(bufferedTuples[i], myslot, InvalidBuffer, false);
 			recheckIndexes =
 				ExecInsertIndexTuples(myslot, &(bufferedTuples[i]->t_self),
-									  estate);
+									  estate, false, InvalidOid);
 			ExecARInsertTriggers(estate, resultRelInfo,
 								 bufferedTuples[i],
 								 recheckIndexes);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 7cfc9bb..e6a8d8e 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -103,7 +103,8 @@ static void ExplainIndexScanDetails(Oid indexid, ScanDirection indexorderdir,
 static void ExplainScanTarget(Scan *plan, ExplainState *es);
 static void ExplainModifyTarget(ModifyTable *plan, ExplainState *es);
 static void ExplainTargetRel(Plan *plan, Index rti, ExplainState *es);
-static void show_modifytable_info(ModifyTableState *mtstate, ExplainState *es);
+static void show_modifytable_info(ModifyTableState *mtstate, ExplainState *es,
+								  List *ancestors);
 static void ExplainMemberNodes(List *plans, PlanState **planstates,
 				   List *ancestors, ExplainState *es);
 static void ExplainSubPlans(List *plans, List *ancestors,
@@ -763,6 +764,9 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used)
 			ExplainPreScanMemberNodes(((ModifyTable *) plan)->plans,
 								  ((ModifyTableState *) planstate)->mt_plans,
 									  rels_used);
+			if (((ModifyTable *) plan)->onConflictPlan)
+				ExplainPreScanNode(((ModifyTableState *) planstate)->onConflict,
+								   rels_used);
 			break;
 		case T_Append:
 			ExplainPreScanMemberNodes(((Append *) plan)->appendplans,
@@ -864,6 +868,8 @@ ExplainNode(PlanState *planstate, List *ancestors,
 	const char *custom_name = NULL;
 	int			save_indent = es->indent;
 	bool		haschildren;
+	bool		suppresschildren = false;
+	ModifyTable *mtplan;
 
 	switch (nodeTag(plan))
 	{
@@ -872,13 +878,33 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			break;
 		case T_ModifyTable:
 			sname = "ModifyTable";
-			switch (((ModifyTable *) plan)->operation)
+			mtplan = (ModifyTable *) plan;
+			switch (mtplan->operation)
 			{
 				case CMD_INSERT:
 					pname = operation = "Insert";
 					break;
 				case CMD_UPDATE:
-					pname = operation = "Update";
+					if (mtplan->spec == SPEC_NONE)
+					{
+						pname = operation = "Update";
+					}
+					else
+					{
+						Assert(mtplan->spec == SPEC_UPDATE);
+
+						pname = operation = "Conflict Update";
+
+						/*
+						 * Do not display child sequential scan/result node.
+						 * Quals from child will be directly attributed to
+						 * ModifyTable node, since we prefer to avoid
+						 * displaying scan node to users, as it is merely an
+						 * implementation detail; it is never executed in the
+						 * conventional way.
+						 */
+						suppresschildren = true;
+					}
 					break;
 				case CMD_DELETE:
 					pname = operation = "Delete";
@@ -1458,7 +1484,8 @@ ExplainNode(PlanState *planstate, List *ancestors,
 										   planstate, es);
 			break;
 		case T_ModifyTable:
-			show_modifytable_info((ModifyTableState *) planstate, es);
+			show_modifytable_info((ModifyTableState *) planstate, es,
+								  ancestors);
 			break;
 		case T_Hash:
 			show_hash_info((HashState *) planstate, es);
@@ -1586,7 +1613,8 @@ ExplainNode(PlanState *planstate, List *ancestors,
 		planstate->subPlan;
 	if (haschildren)
 	{
-		ExplainOpenGroup("Plans", "Plans", false, es);
+		 if (!suppresschildren)
+			ExplainOpenGroup("Plans", "Plans", false, es);
 		/* Pass current PlanState as head of ancestors list for children */
 		ancestors = lcons(planstate, ancestors);
 	}
@@ -1609,9 +1637,13 @@ ExplainNode(PlanState *planstate, List *ancestors,
 	switch (nodeTag(plan))
 	{
 		case T_ModifyTable:
-			ExplainMemberNodes(((ModifyTable *) plan)->plans,
-							   ((ModifyTableState *) planstate)->mt_plans,
-							   ancestors, es);
+			if (((ModifyTable *) plan)->spec != SPEC_UPDATE)
+				ExplainMemberNodes(((ModifyTable *) plan)->plans,
+								   ((ModifyTableState *) planstate)->mt_plans,
+								   ancestors, es);
+			if (((ModifyTable *) plan)->onConflictPlan)
+				ExplainNode(((ModifyTableState *) planstate)->onConflict,
+							ancestors, "Member", NULL, es);
 			break;
 		case T_Append:
 			ExplainMemberNodes(((Append *) plan)->appendplans,
@@ -1649,7 +1681,9 @@ ExplainNode(PlanState *planstate, List *ancestors,
 	if (haschildren)
 	{
 		ancestors = list_delete_first(ancestors);
-		ExplainCloseGroup("Plans", "Plans", false, es);
+
+		if (!suppresschildren)
+			ExplainCloseGroup("Plans", "Plans", false, es);
 	}
 
 	/* in text format, undo whatever indentation we added */
@@ -2202,6 +2236,15 @@ ExplainModifyTarget(ModifyTable *plan, ExplainState *es)
 	rti = linitial_int(plan->resultRelations);
 
 	ExplainTargetRel((Plan *) plan, rti, es);
+
+	if (plan->arbiterIndex != InvalidOid)
+	{
+		char	   *indexname = get_rel_name(plan->arbiterIndex);
+
+		/* nothing to do for text format explains */
+		if (es->format != EXPLAIN_FORMAT_TEXT && indexname != NULL)
+			ExplainPropertyText("Arbiter Index", indexname, es);
+	}
 }
 
 /*
@@ -2237,6 +2280,12 @@ ExplainTargetRel(Plan *plan, Index rti, ExplainState *es)
 			if (es->verbose)
 				namespace = get_namespace_name(get_rel_namespace(rte->relid));
 			objecttag = "Relation Name";
+
+			/*
+			 * ON CONFLICT's "TARGET" alias will not appear in output for
+			 * auxiliary ModifyTable as its alias, because target
+			 * resultRelation is shared between parent and auxiliary queries
+			 */
 			break;
 		case T_FunctionScan:
 			{
@@ -2315,7 +2364,8 @@ ExplainTargetRel(Plan *plan, Index rti, ExplainState *es)
  * Show extra information for a ModifyTable node
  */
 static void
-show_modifytable_info(ModifyTableState *mtstate, ExplainState *es)
+show_modifytable_info(ModifyTableState *mtstate, ExplainState *es,
+					  List *ancestors)
 {
 	FdwRoutine *fdwroutine = mtstate->resultRelInfo->ri_FdwRoutine;
 
@@ -2337,6 +2387,23 @@ show_modifytable_info(ModifyTableState *mtstate, ExplainState *es)
 										 0,
 										 es);
 	}
+	else if (mtstate->spec == SPEC_UPDATE)
+	{
+		PlanState *ps = (*mtstate->mt_plans);
+
+		/*
+		 * Seqscan node is always used, unless optimizer determined that
+		 * predicate precludes ever updating, in which case a simple Result
+		 * node is possible
+		 */
+		Assert(IsA(ps->plan, SeqScan) || IsA(ps->plan, Result));
+
+		/* Attribute child scan node's qual to ModifyTable node */
+		show_scan_qual(ps->plan->qual, "Filter", ps, ancestors, es);
+
+		if (ps->plan->qual)
+			show_instrumentation_count("Rows Removed by Filter", 1, ps, es);
+	}
 }
 
 /*
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index dbcebb7..3d7761d 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -2122,7 +2122,8 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
 			 * the latest version of the row was deleted, so we need do
 			 * nothing.  (Should be safe to examine xmin without getting
 			 * buffer's content lock, since xmin never changes in an existing
-			 * tuple.)
+			 * non-promise tuple, and there is no reason to lock a promise
+			 * tuple until it is clear that it has been fulfilled.)
 			 */
 			if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
 									 priorXmax))
@@ -2203,11 +2204,12 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
 					 * case, so as to avoid the "Halloween problem" of
 					 * repeated update attempts.  In the latter case it might
 					 * be sensible to fetch the updated tuple instead, but
-					 * doing so would require changing heap_lock_tuple as well
-					 * as heap_update and heap_delete to not complain about
-					 * updating "invisible" tuples, which seems pretty scary.
-					 * So for now, treat the tuple as deleted and do not
-					 * process.
+					 * doing so would require changing heap_update and
+					 * heap_delete to not complain about updating "invisible"
+					 * tuples, which seems pretty scary (heap_lock_tuple will
+					 * not complain, but few callers expect HeapTupleInvisible,
+					 * and we're not one of them).  So for now, treat the tuple
+					 * as deleted and do not process.
 					 */
 					ReleaseBuffer(buffer);
 					return NULL;
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 0e7400f..57d726e 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -182,6 +182,9 @@ static Datum ExecEvalArrayCoerceExpr(ArrayCoerceExprState *astate,
 						bool *isNull, ExprDoneCond *isDone);
 static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
 					  bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalExcluded(ExcludedExprState *excludedExpr,
+				 ExprContext *econtext, bool *isNull,
+				 ExprDoneCond *isDone);
 
 
 /* ----------------------------------------------------------------
@@ -4338,6 +4341,33 @@ ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
 	return 0;					/* keep compiler quiet */
 }
 
+/* ----------------------------------------------------------------
+ * ExecEvalExcluded
+ * ----------------------------------------------------------------
+ */
+static Datum
+ExecEvalExcluded(ExcludedExprState *excludedExpr, ExprContext *econtext,
+				 bool *isNull, ExprDoneCond *isDone)
+{
+	/*
+	 * ExcludedExpr is essentially an expression that adapts its single Var
+	 * argument to refer to the expression context inner slot's tuple, which is
+	 * reserved for the purpose of referencing EXCLUDED.* tuples within ON
+	 * CONFLICT UPDATE auxiliary queries' EPQ expression context (ON CONFLICT
+	 * UPDATE makes special use of the EvalPlanQual() mechanism to update).
+	 *
+	 * nodeModifyTable.c assigns its own table slot in the auxiliary queries'
+	 * EPQ expression state (originating in the parent INSERT node) on the
+	 * assumption that it may only be used by ExcludedExpr, and on the
+	 * assumption that the inner slot is not otherwise useful.  This occurs in
+	 * advance of the expression evaluation for UPDATE (which calls here are
+	 * part of) once per slot proposed for insertion, and works because of
+	 * restrictions on the structure of ON CONFLICT UPDATE auxiliary queries.
+	 *
+	 * Just evaluate nested Var.
+	 */
+	return ExecEvalScalarVar(excludedExpr->arg, econtext, isNull, isDone);
+}
 
 /*
  * ExecEvalExprSwitchContext
@@ -5065,6 +5095,30 @@ ExecInitExpr(Expr *node, PlanState *parent)
 			state = (ExprState *) makeNode(ExprState);
 			state->evalfunc = ExecEvalCurrentOfExpr;
 			break;
+		case T_ExcludedExpr:
+			{
+				ExcludedExpr		   *excludedexpr = (ExcludedExpr *) node;
+				ExcludedExprState	   *cstate = makeNode(ExcludedExprState);
+				Var					   *contained = (Var*) excludedexpr->arg;
+
+				/*
+				 * varno forced to INNER_VAR -- see remarks within
+				 * ExecLockUpdateTuple().
+				 *
+				 * We rely on the assumption that the only place that
+				 * ExcludedExpr may appear is where EXCLUDED Var references
+				 * originally appeared after parse analysis.  The rewriter
+				 * replaces these with ExcludedExpr that reference the
+				 * corresponding Var within the ON CONFLICT UPDATE target RTE.
+				 */
+				Assert(IsA(contained, Var));
+
+				contained->varno = INNER_VAR;
+				cstate->arg = ExecInitExpr((Expr *) contained, parent);
+				state = (ExprState *) cstate;
+				state->evalfunc = (ExprStateEvalFunc) ExecEvalExcluded;
+			}
+			break;
 		case T_TargetEntry:
 			{
 				TargetEntry *tle = (TargetEntry *) node;
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 022041b..cb8e4f6 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -885,7 +885,7 @@ ExecCloseScanRelation(Relation scanrel)
  * ----------------------------------------------------------------
  */
 void
-ExecOpenIndices(ResultRelInfo *resultRelInfo)
+ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
 {
 	Relation	resultRelation = resultRelInfo->ri_RelationDesc;
 	List	   *indexoidlist;
@@ -938,6 +938,13 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
 		/* extract index key information from the index's pg_index info */
 		ii = BuildIndexInfo(indexDesc);
 
+		/*
+		 * Iff the indexes are to be used for speculative insertion, add extra
+		 * information required by unique index entries
+		 */
+		if (speculative && ii->ii_Unique)
+			AddUniqueSpeculative(indexDesc, ii);
+
 		relationDescs[i] = indexDesc;
 		indexInfoArray[i] = ii;
 		i++;
@@ -990,7 +997,8 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo)
  *
  *		This returns a list of index OIDs for any unique or exclusion
  *		constraints that are deferred and that had
- *		potential (unconfirmed) conflicts.
+ *		potential (unconfirmed) conflicts. (if noDupErr == true, the
+ *		same is done for non-deferred constraints)
  *
  *		CAUTION: this must not be called for a HOT update.
  *		We can't defend against that here for lack of info.
@@ -1000,7 +1008,9 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo)
 List *
 ExecInsertIndexTuples(TupleTableSlot *slot,
 					  ItemPointer tupleid,
-					  EState *estate)
+					  EState *estate,
+					  bool noDupErr,
+					  Oid arbiterIdx)
 {
 	List	   *result = NIL;
 	ResultRelInfo *resultRelInfo;
@@ -1070,7 +1080,17 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 
 			/* Skip this index-update if the predicate isn't satisfied */
 			if (!ExecQual(predicate, econtext, false))
+			{
+				if (arbiterIdx == indexRelation->rd_index->indexrelid)
+					ereport(ERROR,
+							(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
+							 errmsg("partial arbiter unique index has predicate that does not cover tuple proposed for insertion"),
+							 errdetail("ON CONFLICT inference clause implies that the tuple proposed for insertion must be covered by predicate for partial index \"%s\".",
+									   RelationGetRelationName(indexRelation)),
+							 errtableconstraint(heapRelation,
+												RelationGetRelationName(indexRelation))));
 				continue;
+			}
 		}
 
 		/*
@@ -1092,9 +1112,16 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 		 * For a deferrable unique index, we tell the index AM to just detect
 		 * possible non-uniqueness, and we add the index OID to the result
 		 * list if further checking is needed.
+		 *
+		 * For a speculative insertion (ON CONFLICT UPDATE/IGNORE), just detect
+		 * possible non-uniqueness, and tell the caller if it failed.
 		 */
 		if (!indexRelation->rd_index->indisunique)
 			checkUnique = UNIQUE_CHECK_NO;
+		else if (noDupErr && arbiterIdx == InvalidOid)
+			checkUnique = UNIQUE_CHECK_PARTIAL;
+		else if (noDupErr && arbiterIdx == indexRelation->rd_index->indexrelid)
+			checkUnique = UNIQUE_CHECK_PARTIAL;
 		else if (indexRelation->rd_index->indimmediate)
 			checkUnique = UNIQUE_CHECK_YES;
 		else
@@ -1112,8 +1139,11 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 		 * If the index has an associated exclusion constraint, check that.
 		 * This is simpler than the process for uniqueness checks since we
 		 * always insert first and then check.  If the constraint is deferred,
-		 * we check now anyway, but don't throw error on violation; instead
-		 * we'll queue a recheck event.
+		 * we check now anyway, but don't throw error on violation or wait for
+		 * a conclusive outcome from a concurrent insertion; instead we'll
+		 * queue a recheck event.  Similarly, noDupErr callers (speculative
+		 * inserters) will recheck later, and wait for a conclusive outcome
+		 * then.
 		 *
 		 * An index for an exclusion constraint can't also be UNIQUE (not an
 		 * essential property, we just don't allow it in the grammar), so no
@@ -1121,13 +1151,15 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 		 */
 		if (indexInfo->ii_ExclusionOps != NULL)
 		{
-			bool		errorOK = !indexRelation->rd_index->indimmediate;
+			bool		violationOK = (!indexRelation->rd_index->indimmediate ||
+									   noDupErr);
 
 			satisfiesConstraint =
-				check_exclusion_constraint(heapRelation,
-										   indexRelation, indexInfo,
-										   tupleid, values, isnull,
-										   estate, false, errorOK);
+				check_exclusion_or_unique_constraint(heapRelation,
+													 indexRelation, indexInfo,
+													 tupleid, values, isnull,
+													 estate, false,
+													 violationOK, false, NULL);
 		}
 
 		if ((checkUnique == UNIQUE_CHECK_PARTIAL ||
@@ -1135,7 +1167,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 			!satisfiesConstraint)
 		{
 			/*
-			 * The tuple potentially violates the uniqueness or exclusion
+			 * The tuple potentially violates the unique index or exclusion
 			 * constraint, so make a note of the index so that we can re-check
 			 * it later.
 			 */
@@ -1146,18 +1178,154 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 	return result;
 }
 
+/* ----------------------------------------------------------------
+ *		ExecCheckIndexConstraints
+ *
+ *		This routine checks if a tuple violates any unique or
+ *		exclusion constraints. If no conflict, returns true.
+ *		Otherwise returns false, and the TID of the conflicting
+ *		tuple is returned in *conflictTid
+ *
+ *		Note that this doesn't lock the values in any way, so it's
+ *		possible that a conflicting tuple is inserted immediately
+ *		after this returns, and a later insert with the same values
+ *		still conflicts. But this can be used for a pre-check before
+ *		insertion.
+ * ----------------------------------------------------------------
+ */
+bool
+ExecCheckIndexConstraints(TupleTableSlot *slot,
+						  EState *estate, ItemPointer conflictTid,
+						  Oid arbiterIdx)
+{
+	ResultRelInfo *resultRelInfo;
+	int			i;
+	int			numIndices;
+	RelationPtr relationDescs;
+	Relation	heapRelation;
+	IndexInfo **indexInfoArray;
+	ExprContext *econtext;
+	Datum		values[INDEX_MAX_KEYS];
+	bool		isnull[INDEX_MAX_KEYS];
+	ItemPointerData invalidItemPtr;
+	bool		checkedIndex = false;
+
+	ItemPointerSetInvalid(conflictTid);
+	ItemPointerSetInvalid(&invalidItemPtr);
+
+	/*
+	 * Get information from the result relation info structure.
+	 */
+	resultRelInfo = estate->es_result_relation_info;
+	numIndices = resultRelInfo->ri_NumIndices;
+	relationDescs = resultRelInfo->ri_IndexRelationDescs;
+	indexInfoArray = resultRelInfo->ri_IndexRelationInfo;
+	heapRelation = resultRelInfo->ri_RelationDesc;
+
+	/*
+	 * We will use the EState's per-tuple context for evaluating predicates
+	 * and index expressions (creating it if it's not already there).
+	 */
+	econtext = GetPerTupleExprContext(estate);
+
+	/* Arrange for econtext's scan tuple to be the tuple under test */
+	econtext->ecxt_scantuple = slot;
+
+	/*
+	 * for each index, form and insert the index tuple
+	 */
+	for (i = 0; i < numIndices; i++)
+	{
+		Relation	indexRelation = relationDescs[i];
+		IndexInfo  *indexInfo;
+		bool		satisfiesConstraint;
+
+		if (indexRelation == NULL)
+			continue;
+
+		indexInfo = indexInfoArray[i];
+
+		if (!indexInfo->ii_Unique && !indexInfo->ii_ExclusionOps)
+			continue;
+
+		/* If the index is marked as read-only, ignore it */
+		if (!indexInfo->ii_ReadyForInserts)
+			continue;
+
+		/* When specific arbiter index requested, only examine it */
+		if (arbiterIdx != InvalidOid &&
+			arbiterIdx != indexRelation->rd_index->indexrelid)
+			continue;
+
+		checkedIndex = true;
+
+		/* Check for partial index */
+		if (indexInfo->ii_Predicate != NIL)
+		{
+			List	   *predicate;
+
+			/*
+			 * If predicate state not set up yet, create it (in the estate's
+			 * per-query context)
+			 */
+			predicate = indexInfo->ii_PredicateState;
+			if (predicate == NIL)
+			{
+				predicate = (List *)
+					ExecPrepareExpr((Expr *) indexInfo->ii_Predicate,
+									estate);
+				indexInfo->ii_PredicateState = predicate;
+			}
+
+			/* Skip this index-update if the predicate isn't satisfied */
+			if (!ExecQual(predicate, econtext, false))
+				continue;
+		}
+
+		/*
+		 * FormIndexDatum fills in its values and isnull parameters with the
+		 * appropriate values for the column(s) of the index.
+		 */
+		FormIndexDatum(indexInfo,
+					   slot,
+					   estate,
+					   values,
+					   isnull);
+
+		satisfiesConstraint =
+			check_exclusion_or_unique_constraint(heapRelation, indexRelation,
+												 indexInfo, &invalidItemPtr,
+												 values, isnull, estate, false,
+												 true, true, conflictTid);
+		if (!satisfiesConstraint)
+			return false;
+
+		/* If this was a user-specified arbiter index, we're done */
+		if (arbiterIdx == indexRelation->rd_index->indexrelid)
+			break;
+	}
+
+	if (arbiterIdx != InvalidOid && !checkedIndex)
+		elog(ERROR, "unexpected failure to find arbiter unique index");
+
+	return true;
+}
+
 /*
- * Check for violation of an exclusion constraint
+ * Check for violation of an exclusion or unique constraint
  *
  * heap: the table containing the new tuple
  * index: the index supporting the exclusion constraint
  * indexInfo: info about the index, including the exclusion properties
- * tupleid: heap TID of the new tuple we have just inserted
+ * tupleid: heap TID of the new tuple we have just inserted (invalid if we
+ *		haven't inserted a new tuple yet)
  * values, isnull: the *index* column values computed for the new tuple
  * estate: an EState we can do evaluation in
  * newIndex: if true, we are trying to build a new index (this affects
  *		only the wording of error messages)
  * errorOK: if true, don't throw error for violation
+ * wait: if true, wait for conflicting transaction to finish, even if !errorOK
+ * conflictTid: if not-NULL, the TID of conflicting tuple is returned here.
  *
  * Returns true if OK, false if actual or potential violation
  *
@@ -1167,16 +1335,25 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
  * is convenient for deferred exclusion checks; we need not bother queuing
  * a deferred event if there is definitely no conflict at insertion time.
  *
- * When errorOK is false, we'll throw error on violation, so a false result
+ * When violationOK is false, we'll throw error on violation, so a false result
  * is impossible.
+ *
+ * Note: The indexam is normally responsible for checking unique constraints,
+ * so this normally only needs to be used for exclusion constraints. But this
+ * function is also called when doing a "pre-check" for conflicts, for the
+ * benefit of speculative insertion.  Caller may request that conflict TID be
+ * set, to take further steps.
  */
 bool
-check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo,
-						   ItemPointer tupleid, Datum *values, bool *isnull,
-						   EState *estate, bool newIndex, bool errorOK)
+check_exclusion_or_unique_constraint(Relation heap, Relation index,
+									 IndexInfo *indexInfo, ItemPointer tupleid,
+									 Datum *values, bool *isnull,
+									 EState *estate, bool newIndex,
+									 bool violationOK, bool wait,
+									 ItemPointer conflictTid)
 {
-	Oid		   *constr_procs = indexInfo->ii_ExclusionProcs;
-	uint16	   *constr_strats = indexInfo->ii_ExclusionStrats;
+	Oid		   *constr_procs;
+	uint16	   *constr_strats;
 	Oid		   *index_collations = index->rd_indcollation;
 	int			index_natts = index->rd_index->indnatts;
 	IndexScanDesc index_scan;
@@ -1190,6 +1367,17 @@ check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo,
 	TupleTableSlot *existing_slot;
 	TupleTableSlot *save_scantuple;
 
+	if (indexInfo->ii_ExclusionOps)
+	{
+		constr_procs = indexInfo->ii_ExclusionProcs;
+		constr_strats = indexInfo->ii_ExclusionStrats;
+	}
+	else
+	{
+		constr_procs = indexInfo->ii_UniqueProcs;
+		constr_strats = indexInfo->ii_UniqueStrats;
+	}
+
 	/*
 	 * If any of the input values are NULL, the constraint check is assumed to
 	 * pass (i.e., we assume the operators are strict).
@@ -1254,7 +1442,8 @@ retry:
 		/*
 		 * Ignore the entry for the tuple we're trying to check.
 		 */
-		if (ItemPointerEquals(tupleid, &tup->t_self))
+		if (ItemPointerIsValid(tupleid) &&
+			ItemPointerEquals(tupleid, &tup->t_self))
 		{
 			if (found_self)		/* should not happen */
 				elog(ERROR, "found self tuple multiple times in index \"%s\"",
@@ -1288,9 +1477,11 @@ retry:
 		 * we're not supposed to raise error, just return the fact of the
 		 * potential conflict without waiting to see if it's real.
 		 */
-		if (errorOK)
+		if (violationOK && !wait)
 		{
 			conflict = true;
+			if (conflictTid)
+				*conflictTid = tup->t_self;
 			break;
 		}
 
@@ -1309,14 +1500,29 @@ retry:
 		{
 			ctid_wait = tup->t_data->t_ctid;
 			index_endscan(index_scan);
-			XactLockTableWait(xwait, heap, &ctid_wait,
-							  XLTW_RecheckExclusionConstr);
+			if (DirtySnapshot.speculativeToken)
+				SpeculativeInsertionWait(DirtySnapshot.xmin,
+										 DirtySnapshot.speculativeToken);
+			else if (violationOK)
+				XactLockTableWait(xwait, heap, &tup->t_self,
+								  XLTW_RecheckExclusionConstr);
+			else
+				XactLockTableWait(xwait, heap, &ctid_wait,
+								  XLTW_RecheckExclusionConstr);
 			goto retry;
 		}
 
 		/*
-		 * We have a definite conflict.  Report it.
+		 * We have a definite conflict.  Return it to caller, or report it.
 		 */
+		if (violationOK)
+		{
+			conflict = true;
+			if (conflictTid)
+				*conflictTid = tup->t_self;
+			break;
+		}
+
 		error_new = BuildIndexValueDescription(index, values, isnull);
 		error_existing = BuildIndexValueDescription(index, existing_values,
 													existing_isnull);
@@ -1352,6 +1558,9 @@ retry:
 	 * However, it is possible to define exclusion constraints for which that
 	 * wouldn't be true --- for instance, if the operator is <>. So we no
 	 * longer complain if found_self is still false.
+	 *
+	 * It would also not be true in the pre-check mode, when we haven't
+	 * inserted a tuple yet.
 	 */
 
 	econtext->ecxt_scantuple = save_scantuple;
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 48107d9..4699060 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -151,10 +151,11 @@ lnext:
 				 * case, so as to avoid the "Halloween problem" of repeated
 				 * update attempts.  In the latter case it might be sensible
 				 * to fetch the updated tuple instead, but doing so would
-				 * require changing heap_lock_tuple as well as heap_update and
-				 * heap_delete to not complain about updating "invisible"
-				 * tuples, which seems pretty scary.  So for now, treat the
-				 * tuple as deleted and do not process.
+				 * require changing heap_update and heap_delete to not complain
+				 * about updating "invisible" tuples, which seems pretty scary
+				 * (heap_lock_tuple will not complain, but few callers expect
+				 * HeapTupleInvisible, and we're not one of them).  So for now,
+				 * treat the tuple as deleted and do not process.
 				 */
 				goto lnext;
 
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index f96fb24..5411896 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -46,12 +46,23 @@
 #include "miscadmin.h"
 #include "nodes/nodeFuncs.h"
 #include "storage/bufmgr.h"
+#include "storage/lmgr.h"
+#include "storage/procarray.h"
 #include "utils/builtins.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 #include "utils/tqual.h"
 
 
+static bool ExecLockUpdateTuple(ResultRelInfo	   *resultRelInfo,
+								ItemPointer			conflictTid,
+								TupleTableSlot	   *planSlot,
+								TupleTableSlot	   *insertSlot,
+								ModifyTableState   *onConflict,
+								EState			   *estate,
+								bool				canSetTag,
+								TupleTableSlot	  **returning);
+
 /*
  * Verify that the tuples to be produced by INSERT or UPDATE match the
  * target relation's rowtype
@@ -151,6 +162,36 @@ ExecProcessReturning(ProjectionInfo *projectReturning,
 	return ExecProject(projectReturning, NULL);
 }
 
+/*
+ * ExecCheckHeapTupleVisible -- verify heap tuple is visible
+ *
+ * It would not be consistent with guarantees of the higher isolation levels to
+ * proceed with avoiding insertion (taking speculative insertion's alternative
+ * IGNORE/UPDATE path) on the basis of another tuple that is not visible.
+ * Check for the need to raise a serialization failure, and do so as necessary.
+ */
+static void
+ExecCheckHeapTupleVisible(EState *estate,
+						  ResultRelInfo *relinfo,
+						  ItemPointer tid)
+{
+
+	Relation		rel = relinfo->ri_RelationDesc;
+	Buffer			buffer;
+	HeapTupleData	tuple;
+
+	if (!IsolationUsesXactSnapshot())
+		return;
+
+	tuple.t_self = *tid;
+	if (!heap_fetch(rel, estate->es_snapshot, &tuple, &buffer, false, NULL))
+		ereport(ERROR,
+				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+				 errmsg("could not serialize access due to concurrent insert or update dictating alternative ON CONFLICT path")));
+
+	ReleaseBuffer(buffer);
+}
+
 /* ----------------------------------------------------------------
  *		ExecInsert
  *
@@ -163,6 +204,9 @@ ExecProcessReturning(ProjectionInfo *projectReturning,
 static TupleTableSlot *
 ExecInsert(TupleTableSlot *slot,
 		   TupleTableSlot *planSlot,
+		   ModifyTableState *onConflict,
+		   Oid arbiterIndex,
+		   SpecCmd spec,
 		   EState *estate,
 		   bool canSetTag)
 {
@@ -246,6 +290,8 @@ ExecInsert(TupleTableSlot *slot,
 	}
 	else
 	{
+		ItemPointerData		conflictTid;
+
 		/*
 		 * Constraints might reference the tableoid column, so initialize
 		 * t_tableOid before evaluating them.
@@ -259,20 +305,138 @@ ExecInsert(TupleTableSlot *slot,
 			ExecConstraints(resultRelInfo, slot, estate);
 
 		/*
+		 * If we are performing speculative insertion, do a non-conclusive
+		 * check for conflicts.
+		 *
+		 * Control returns here when there is 1) A row-locking conflict, or 2)
+		 * an insertion conflict.  See the executor README for a full
+		 * discussion of speculative insertion.
+		 */
+vlock:
+
+		/*
+		 * XXX If we know or assume that there are few duplicates, it would be
+		 * better to skip this, and just optimistically proceed with the
+		 * insertion below.
+		 */
+		if (spec != SPEC_NONE && resultRelInfo->ri_NumIndices > 0)
+		{
+			/*
+			 * Check if it's required to proceed with the second phase
+			 * ("insertion proper") of speculative insertion in respect of the
+			 * slot.  If insertion ultimately does not proceed, no firing of
+			 * AFTER ROW INSERT triggers occurs.
+			 *
+			 * We don't suppress the effects (or, perhaps, side-effects) of
+			 * BEFORE ROW INSERT triggers.  This isn't ideal, but then we
+			 * cannot proceed with even considering uniqueness violations until
+			 * these triggers fire on the one hand, but on the other hand they
+			 * have the ability to execute arbitrary user-defined code which
+			 * may perform operations entirely outside the system's ability to
+			 * nullify.
+			 */
+			if (!ExecCheckIndexConstraints(slot, estate, &conflictTid,
+										   arbiterIndex))
+			{
+				TupleTableSlot	   *returning = NULL;
+
+				/*
+				 * Lock and consider updating in the SPEC_INSERT case.  For the
+				 * SPEC_IGNORE case, it's still necessary to verify that the
+				 * tuple is visible to the executor's MVCC snapshot.
+				 */
+				if (spec == SPEC_INSERT && !ExecLockUpdateTuple(resultRelInfo,
+																&conflictTid,
+																planSlot,
+																slot,
+																onConflict,
+																estate,
+																canSetTag,
+																&returning))
+						goto vlock;
+				else if (spec == SPEC_IGNORE)
+					ExecCheckHeapTupleVisible(estate, resultRelInfo, &conflictTid);
+
+				/*
+				 * RETURNING may have been processed already -- the target
+				 * ResultRelInfo might have made representation within
+				 * ExecUpdate() that this is required.  Inserted and updated
+				 * tuples are projected indifferently for ON CONFLICT UPDATE
+				 * with RETURNING.
+				 *
+				 * Since there was no row conflict, we're done.
+				 */
+				return returning;
+			}
+
+			/*
+			 * Before we start insertion proper, acquire our "promise tuple
+			 * insertion lock". Others can use that (rather than an XID lock,
+			 * which is appropriate only for non-promise tuples) to wait for us
+			 * to decide if we're going to go ahead with the insertion.
+			 */
+			SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
+		}
+
+		/*
 		 * insert the tuple
 		 *
 		 * Note: heap_insert returns the tid (location) of the new tuple in
 		 * the t_self field.
 		 */
 		newId = heap_insert(resultRelationDesc, tuple,
-							estate->es_output_cid, 0, NULL);
+							estate->es_output_cid,
+							spec != SPEC_NONE? HEAP_INSERT_SPECULATIVE:0,
+							NULL);
 
 		/*
 		 * insert index entries for tuple
 		 */
 		if (resultRelInfo->ri_NumIndices > 0)
+		{
 			recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
-												   estate);
+												   estate, spec != SPEC_NONE,
+												   arbiterIndex);
+
+			if (spec != SPEC_NONE)
+			{
+				HeapUpdateFailureData	hufd;
+
+				/*
+				 * Consider possible race:  concurrent insertion conflicts with
+				 * our speculative heap tuple.  Must then "super-delete" the
+				 * heap tuple and retry from the start.
+				 *
+				 * This is occasionally necessary so that "unprincipled
+				 * deadlocks" are avoided;  now that a conflict was found,
+				 * other sessions should not wait on our speculative token, and
+				 * they certainly shouldn't treat our speculatively-inserted
+				 * heap tuple as an ordinary tuple that it must wait on the
+				 * outcome of our xact to UPDATE/DELETE.  This makes heap
+				 * tuples behave as conceptual "value locks" of short duration,
+				 * distinct from ordinary tuples that other xacts must wait on
+				 * xmin-xact-end of in the event of a possible unique/exclusion
+				 * violation (the violation that arbitrates taking the
+				 * alternative UPDATE/IGNORE path).
+				 */
+				if (recheckIndexes)
+					heap_delete(resultRelationDesc, &(tuple->t_self),
+								estate->es_output_cid, InvalidSnapshot, false,
+								&hufd, true);
+
+				Assert(hufd.cmax == estate->es_output_cid);
+				SpeculativeInsertionLockRelease(GetCurrentTransactionId());
+				ClearSpeculativeInsertionState();
+
+				if (recheckIndexes)
+				{
+					list_free(recheckIndexes);
+					goto vlock;
+				}
+
+				/* since there was no insertion conflict, we're done */
+			}
+		}
 	}
 
 	if (canSetTag)
@@ -399,7 +563,8 @@ ldelete:;
 							 estate->es_output_cid,
 							 estate->es_crosscheck_snapshot,
 							 true /* wait for commit */ ,
-							 &hufd);
+							 &hufd,
+							 false);
 		switch (result)
 		{
 			case HeapTupleSelfUpdated:
@@ -768,7 +933,7 @@ lreplace:;
 		 */
 		if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
 			recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
-												   estate);
+												   estate, false, InvalidOid);
 	}
 
 	if (canSetTag)
@@ -792,6 +957,236 @@ lreplace:;
 	return NULL;
 }
 
+/* ----------------------------------------------------------------
+ * Try to lock tuple for update as part of speculative insertion.  If
+ * a qual originating from ON CONFLICT UPDATE is satisfied, update
+ * (but still lock row, even though it may not satisfy estate's
+ * snapshot).
+ *
+ * Returns value indicating if we're done (with or without an
+ * update), or if the executor must start from scratch.
+ * ----------------------------------------------------------------
+ */
+static bool
+ExecLockUpdateTuple(ResultRelInfo *resultRelInfo,
+					ItemPointer conflictTid,
+					TupleTableSlot *planSlot,
+					TupleTableSlot *insertSlot,
+					ModifyTableState *onConflict,
+					EState			 *estate,
+					bool			canSetTag,
+					TupleTableSlot **returning)
+{
+	Relation				relation = resultRelInfo->ri_RelationDesc;
+	HeapTupleData			tuple;
+	HeapTuple				copyTuple = NULL;
+	HeapUpdateFailureData 	hufd;
+	HTSU_Result 			test;
+	Buffer					buffer;
+	TupleTableSlot		   *slot;
+	ExprContext			   *econtext;
+
+	/*
+	 * Lock tuple for update.
+	 *
+	 * Like EvalPlanQualFetch(), don't follow updates.  There is no actual
+	 * benefit to doing so, since as discussed below, a conflict invalidates
+	 * our previous conclusion that the tuple is the conclusively committed
+	 * conflicting tuple.
+	 */
+	tuple.t_self = *conflictTid;
+	test = heap_lock_tuple(relation, &tuple, estate->es_output_cid,
+						   LockTupleExclusive, LockWaitBlock, false, &buffer,
+						   &hufd);
+
+	if (test == HeapTupleMayBeUpdated)
+		copyTuple = heap_copytuple(&tuple);
+
+	switch (test)
+	{
+		case HeapTupleInvisible:
+			/*
+			 * This may occur when an instantaneously invisible tuple is blamed
+			 * as a conflict because multiple rows are inserted with the same
+			 * constrained values.
+			 *
+			 * We cannot proceed, because to do so would leave users open to
+			 * the risk that the same row will be updated a second time in the
+			 * same command;  allowing a second update affecting a single row
+			 * within the same command a second time would leave the update
+			 * order undefined.  It is the user's responsibility to resolve
+			 * these self-duplicates in advance of proposing for insertion a
+			 * set of tuples, but warn them.  These problems are why SQL-2003
+			 * similarly specifies that for SQL MERGE, an exception must be
+			 * raised in the event of an attempt to update the same row twice.
+			 *
+			 * XXX It might be preferable to do something similar when a row is
+			 * locked twice (and not updated twice) by the same speculative
+			 * insertion, as if to take each lock acquisition as a indication
+			 * of a discrete, unfulfilled intent to update (perhaps in some
+			 * later command of the same xact).  This does not seem feasible,
+			 * though.
+			 */
+			if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple.t_data)))
+				ereport(ERROR,
+						(errcode(ERRCODE_CARDINALITY_VIOLATION),
+						 errmsg("ON CONFLICT UPDATE command could not lock/update self-inserted tuple"),
+						 errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
+
+			/* This shouldn't happen */
+			elog(ERROR, "attempted to lock invisible tuple");
+			return false;		/* keep compiler quiet */
+		case HeapTupleSelfUpdated:
+			/*
+			 * XXX In practice this is dead code, since BEFORE triggers fire
+			 * prior to speculative insertion.  Since a dirty snapshot is used
+			 * to find possible conflict tuples, speculative insertion could
+			 * not have seen the old/MVCC-current row version at all (even if
+			 * it was only rendered old by this same command).
+			 */
+			elog(ERROR,"unexpected self-updated tuple");
+			return false;		/* keep compiler quiet */
+		case HeapTupleMayBeUpdated:
+			/*
+			 * Success -- we're done, as tuple is locked.  Verify that the
+			 * tuple is known to be visible to our snapshot under conventional
+			 * MVCC rules if the current isolation level mandates that.  In
+			 * READ COMMITTED mode, we can lock and update a tuple still in
+			 * progress according to our snapshot, but higher isolation levels
+			 * cannot avail of that, and must actively defend against doing so.
+			 * We might get a serialization failure within ExecUpdate() anyway
+			 * if this step was skipped, but this cannot be relied on, for
+			 * example because the auxiliary WHERE clause happened to not be
+			 * satisfied.
+			 */
+			ExecCheckHeapTupleVisible(estate, resultRelInfo, &tuple.t_data->t_ctid);
+
+			/*
+			 * This loosening of snapshot isolation for the benefit of READ
+			 * COMMITTED speculative insertions is used consistently:
+			 * speculative quals are only tested against already locked tuples.
+			 * It would be rather inconsistent to UPDATE when no tuple version
+			 * is MVCC-visible (which seems inevitable since we must *do
+			 * something* there, and "READ COMMITTED serialization failures"
+			 * are unappealing), while also avoiding updating here entirely on
+			 * the basis of a non-conclusive tuple version (the version that
+			 * happens to be visible to this command's MVCC snapshot, or a
+			 * subsequent non-conclusive version).
+			 *
+			 * In other words:  Only the final, conclusively locked tuple
+			 * (which must have the same value in the relevant constrained
+			 * attribute(s) as the value previously "value locked") matters.
+			 */
+
+			/* must provide our own instrumentation support */
+			if (onConflict->ps.instrument)
+				InstrStartNode(onConflict->ps.instrument);
+
+			/*
+			 * Conceptually, the parent ModifyTable is like a relation scan
+			 * node that uses a dirty snapshot, returning rows which the
+			 * auxiliary plan must operate on (if only to lock all such rows).
+			 * EvalPlanQual() is involved in the evaluation of their UPDATE,
+			 * regardless of whether or not the tuple is visible to the
+			 * command's MVCC Snapshot.
+			 */
+			EvalPlanQualBegin(&onConflict->mt_epqstate, onConflict->ps.state);
+
+			/*
+			 * Save EPQ expression context.  Auxiliary plan's scan node (which
+			 * would have been just initialized by EvalPlanQualBegin() on the
+			 * first time through here per query) cannot fail to provide one.
+			 */
+			econtext = onConflict->mt_epqstate.planstate->ps_ExprContext;
+
+			/*
+			 * UPDATE affects the same ResultRelation as INSERT in the context
+			 * of ON CONFLICT UPDATE, so parent's target rti is used
+			 */
+			EvalPlanQualSetTuple(&onConflict->mt_epqstate,
+								 resultRelInfo->ri_RangeTableIndex, copyTuple);
+
+			/*
+			 * Make available rejected tuple for referencing within UPDATE
+			 * expression (that is, make available a slot with the rejected
+			 * tuple, possibly already modified by BEFORE INSERT row triggers).
+			 *
+			 * This is for the benefit of any ExcludedExpr that may appear
+			 * within UPDATE's targetlist or WHERE clause.  The EXCLUDED tuple
+			 * may be referenced as an ExcludedExpr, which exist purely for our
+			 * benefit.  The nested ExcludedExpr's Var will necessarily have an
+			 * INNER_VAR varno on the assumption that the inner slot of the EPQ
+			 * scan plan state's expression context will contain the EXCLUDED
+			 * heaptuple slot (that is, on the assumption that during
+			 * expression evaluation, the ecxt_innertuple will be assigned the
+			 * insertSlot by this codepath, in advance of expression
+			 * evaluation).
+			 *
+			 * See handling of ExcludedExpr within handleRewrite.c and
+			 * execQual.c.
+			 */
+			econtext->ecxt_innertuple = insertSlot;
+
+			slot = EvalPlanQualNext(&onConflict->mt_epqstate);
+
+			if (!TupIsNull(slot))
+				*returning = ExecUpdate(&tuple.t_data->t_ctid, NULL, slot,
+										planSlot, &onConflict->mt_epqstate,
+										onConflict->ps.state, canSetTag);
+
+			ReleaseBuffer(buffer);
+
+			/*
+			 * As when executing an UPDATE's ModifyTable node in the
+			 * conventional manner, reset the per-output-tuple ExprContext
+			 */
+			ResetPerTupleExprContext(onConflict->ps.state);
+
+			/* must provide our own instrumentation support */
+			if (onConflict->ps.instrument)
+				InstrStopNode(onConflict->ps.instrument, *returning ? 1:0);
+
+			return true;
+		case HeapTupleUpdated:
+			if (IsolationUsesXactSnapshot())
+				ereport(ERROR,
+						(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+						 errmsg("could not serialize access due to concurrent update")));
+
+			/*
+			 * Tell caller to try again from the very start.  We don't use the
+			 * usual EvalPlanQual() looping pattern here, fundamentally because
+			 * we don't have a useful qual to verify the next tuple with.  Our
+			 * "qual" is really any user-supplied qual AND the unique
+			 * constraint "col OP value" implied by a speculative insertion
+			 * conflict.  However, because of the selective evaluation of the
+			 * former "qual" (the interactions with MVCC and row locking), this
+			 * is an over-simplification.
+			 *
+			 * We might devise a means of verifying, by way of binary equality
+			 * in a similar manner to HOT codepaths, if any unique indexed
+			 * columns changed, but this would only serve to ameliorate the
+			 * fundamental problem.  It might well not be good enough, because
+			 * those columns could change too.  It seems unlikely that working
+			 * harder here is worthwhile.
+			 *
+			 * At this point, all bets are off -- it might actually turn out to
+			 * be okay to proceed with insertion instead of locking now (the
+			 * tuple we attempted to lock could have been deleted, for
+			 * example).  On the other hand, it might not be okay, but for an
+			 * entirely different reason, with an entirely separate TID to
+			 * blame and lock.  This TID may not even be part of the same
+			 * update chain.
+			 */
+			ReleaseBuffer(buffer);
+			return false;
+		default:
+			elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
+	}
+
+	return false;
+}
+
 
 /*
  * Process BEFORE EACH STATEMENT triggers
@@ -803,6 +1198,9 @@ fireBSTriggers(ModifyTableState *node)
 	{
 		case CMD_INSERT:
 			ExecBSInsertTriggers(node->ps.state, node->resultRelInfo);
+			if (node->spec == SPEC_INSERT)
+				ExecBSUpdateTriggers(node->onConflict->state,
+									 node->resultRelInfo);
 			break;
 		case CMD_UPDATE:
 			ExecBSUpdateTriggers(node->ps.state, node->resultRelInfo);
@@ -825,6 +1223,9 @@ fireASTriggers(ModifyTableState *node)
 	switch (node->operation)
 	{
 		case CMD_INSERT:
+			if (node->spec == SPEC_INSERT)
+				ExecASUpdateTriggers(node->onConflict->state,
+									 node->resultRelInfo);
 			ExecASInsertTriggers(node->ps.state, node->resultRelInfo);
 			break;
 		case CMD_UPDATE:
@@ -852,6 +1253,8 @@ ExecModifyTable(ModifyTableState *node)
 {
 	EState	   *estate = node->ps.state;
 	CmdType		operation = node->operation;
+	ModifyTableState  *onConflict = (ModifyTableState *) node->onConflict;
+	SpecCmd		spec = node->spec;
 	ResultRelInfo *saved_resultRelInfo;
 	ResultRelInfo *resultRelInfo;
 	PlanState  *subplanstate;
@@ -1022,7 +1425,9 @@ ExecModifyTable(ModifyTableState *node)
 		switch (operation)
 		{
 			case CMD_INSERT:
-				slot = ExecInsert(slot, planSlot, estate, node->canSetTag);
+				slot = ExecInsert(slot, planSlot, onConflict,
+								  node->arbiterIndex, spec, estate,
+								  node->canSetTag);
 				break;
 			case CMD_UPDATE:
 				slot = ExecUpdate(tupleid, oldtuple, slot, planSlot,
@@ -1070,6 +1475,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 {
 	ModifyTableState *mtstate;
 	CmdType		operation = node->operation;
+	Plan	   *onConflictPlan = node->onConflictPlan;
 	int			nplans = list_length(node->plans);
 	ResultRelInfo *saved_resultRelInfo;
 	ResultRelInfo *resultRelInfo;
@@ -1097,6 +1503,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
 	mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
 	mtstate->mt_nplans = nplans;
+	mtstate->spec = node->spec;
 
 	/* set up epqstate with dummy subplan data for the moment */
 	EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
@@ -1135,7 +1542,15 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 		if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
 			operation != CMD_DELETE &&
 			resultRelInfo->ri_IndexRelationDescs == NULL)
-			ExecOpenIndices(resultRelInfo);
+			ExecOpenIndices(resultRelInfo, mtstate->spec != SPEC_NONE);
+
+		/*
+		 * ON CONFLICT UPDATE variant must have unique index to arbitrate on
+		 * taking alternative path
+		 */
+		Assert(node->spec != SPEC_INSERT || node->arbiterIndex != InvalidOid);
+
+		mtstate->arbiterIndex = node->arbiterIndex;
 
 		/* Now init the plan for this result rel */
 		estate->es_result_relation_info = resultRelInfo;
@@ -1308,7 +1723,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 				break;
 			case CMD_UPDATE:
 			case CMD_DELETE:
-				junk_filter_needed = true;
+				junk_filter_needed = (node->spec == SPEC_NONE);
 				break;
 			default:
 				elog(ERROR, "unknown operation");
@@ -1373,6 +1788,30 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	}
 
 	/*
+	 * Initialize auxiliary ModifyTable node for INSERT...ON CONFLICT UPDATE.
+	 *
+	 * The UPDATE portion of the query is essentially represented as auxiliary
+	 * to INSERT state at all stages of query processing, with a representation
+	 * at each stage that is analogous to a regular UPDATE.
+	 */
+	if (onConflictPlan)
+	{
+		PlanState *pstate;
+
+		Assert(mtstate->spec == SPEC_INSERT);
+
+		/*
+		 * Initialize auxiliary child plan.
+		 *
+		 * ExecModifyTable() is never called for auxiliary update
+		 * ModifyTableState.  Execution of the auxiliary plan is driven by its
+		 * parent in an ad-hoc fashion.
+		 */
+		pstate = ExecInitNode(onConflictPlan, estate, eflags);
+		mtstate->onConflict = pstate;
+	}
+
+	/*
 	 * Set up a tuple table slot for use for trigger output tuples. In a plan
 	 * containing multiple ModifyTable nodes, all can share one such slot, so
 	 * we keep it in the estate.
@@ -1387,9 +1826,14 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	 * ModifyTable node too, but there's no need.)  Note the use of lcons not
 	 * lappend: we need later-initialized ModifyTable nodes to be shut down
 	 * before earlier ones.  This ensures that we don't throw away RETURNING
-	 * rows that need to be seen by a later CTE subplan.
+	 * rows that need to be seen by a later CTE subplan.  We do not want to
+	 * append an auxiliary ON CONFLICT UPDATE node either, since it must have a
+	 * parent SPEC_INSERT ModifyTable node that it is auxiliary to that
+	 * directly drives execution of what is logically a single unified
+	 * statement (*that* plan will be appended here, though).  If it must
+	 * project updated rows, that will only ever be done through the parent.
 	 */
-	if (!mtstate->canSetTag)
+	if (!mtstate->canSetTag && mtstate->spec != SPEC_UPDATE)
 		estate->es_auxmodifytables = lcons(mtstate,
 										   estate->es_auxmodifytables);
 
@@ -1442,6 +1886,8 @@ ExecEndModifyTable(ModifyTableState *node)
 	 */
 	for (i = 0; i < node->mt_nplans; i++)
 		ExecEndNode(node->mt_plans[i]);
+
+	ExecEndNode(node->onConflict);
 }
 
 void
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 00ffe4a..df611d2 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -178,6 +178,9 @@ _copyModifyTable(const ModifyTable *from)
 	COPY_NODE_FIELD(resultRelations);
 	COPY_SCALAR_FIELD(resultRelIndex);
 	COPY_NODE_FIELD(plans);
+	COPY_SCALAR_FIELD(spec);
+	COPY_SCALAR_FIELD(arbiterIndex);
+	COPY_NODE_FIELD(onConflictPlan);
 	COPY_NODE_FIELD(withCheckOptionLists);
 	COPY_NODE_FIELD(returningLists);
 	COPY_NODE_FIELD(fdwPrivLists);
@@ -1776,6 +1779,19 @@ _copyCurrentOfExpr(const CurrentOfExpr *from)
 }
 
 /*
+ * _copyExcludedExpr
+ */
+static ExcludedExpr *
+_copyExcludedExpr(const ExcludedExpr *from)
+{
+	ExcludedExpr *newnode = makeNode(ExcludedExpr);
+
+	COPY_NODE_FIELD(arg);
+
+	return newnode;
+}
+
+/*
  * _copyTargetEntry
  */
 static TargetEntry *
@@ -2120,6 +2136,31 @@ _copyWithClause(const WithClause *from)
 	return newnode;
 }
 
+static InferClause *
+_copyInferClause(const InferClause *from)
+{
+	InferClause *newnode = makeNode(InferClause);
+
+	COPY_NODE_FIELD(indexElems);
+	COPY_NODE_FIELD(whereClause);
+	COPY_LOCATION_FIELD(location);
+
+	return newnode;
+}
+
+static ConflictClause *
+_copyConflictClause(const ConflictClause *from)
+{
+	ConflictClause *newnode = makeNode(ConflictClause);
+
+	COPY_SCALAR_FIELD(specclause);
+	COPY_NODE_FIELD(infer);
+	COPY_NODE_FIELD(updatequery);
+	COPY_LOCATION_FIELD(location);
+
+	return newnode;
+}
+
 static CommonTableExpr *
 _copyCommonTableExpr(const CommonTableExpr *from)
 {
@@ -2525,6 +2566,10 @@ _copyQuery(const Query *from)
 	COPY_NODE_FIELD(jointree);
 	COPY_NODE_FIELD(targetList);
 	COPY_NODE_FIELD(withCheckOptions);
+	COPY_SCALAR_FIELD(specClause);
+	COPY_NODE_FIELD(arbiterExpr);
+	COPY_NODE_FIELD(arbiterWhere);
+	COPY_NODE_FIELD(onConflict);
 	COPY_NODE_FIELD(returningList);
 	COPY_NODE_FIELD(groupClause);
 	COPY_NODE_FIELD(havingQual);
@@ -2548,6 +2593,7 @@ _copyInsertStmt(const InsertStmt *from)
 	COPY_NODE_FIELD(relation);
 	COPY_NODE_FIELD(cols);
 	COPY_NODE_FIELD(selectStmt);
+	COPY_NODE_FIELD(confClause);
 	COPY_NODE_FIELD(returningList);
 	COPY_NODE_FIELD(withClause);
 
@@ -4254,6 +4300,9 @@ copyObject(const void *from)
 		case T_CurrentOfExpr:
 			retval = _copyCurrentOfExpr(from);
 			break;
+		case T_ExcludedExpr:
+			retval = _copyExcludedExpr(from);
+			break;
 		case T_TargetEntry:
 			retval = _copyTargetEntry(from);
 			break;
@@ -4721,6 +4770,12 @@ copyObject(const void *from)
 		case T_WithClause:
 			retval = _copyWithClause(from);
 			break;
+		case T_InferClause:
+			retval = _copyInferClause(from);
+			break;
+		case T_ConflictClause:
+			retval = _copyConflictClause(from);
+			break;
 		case T_CommonTableExpr:
 			retval = _copyCommonTableExpr(from);
 			break;
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 79035b2..24e58fa 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -681,6 +681,14 @@ _equalCurrentOfExpr(const CurrentOfExpr *a, const CurrentOfExpr *b)
 }
 
 static bool
+_equalExcludedExpr(const ExcludedExpr *a, const ExcludedExpr *b)
+{
+	COMPARE_NODE_FIELD(arg);
+
+	return true;
+}
+
+static bool
 _equalTargetEntry(const TargetEntry *a, const TargetEntry *b)
 {
 	COMPARE_NODE_FIELD(expr);
@@ -863,6 +871,10 @@ _equalQuery(const Query *a, const Query *b)
 	COMPARE_NODE_FIELD(jointree);
 	COMPARE_NODE_FIELD(targetList);
 	COMPARE_NODE_FIELD(withCheckOptions);
+	COMPARE_SCALAR_FIELD(specClause);
+	COMPARE_NODE_FIELD(arbiterExpr);
+	COMPARE_NODE_FIELD(arbiterWhere);
+	COMPARE_NODE_FIELD(onConflict);
 	COMPARE_NODE_FIELD(returningList);
 	COMPARE_NODE_FIELD(groupClause);
 	COMPARE_NODE_FIELD(havingQual);
@@ -884,6 +896,7 @@ _equalInsertStmt(const InsertStmt *a, const InsertStmt *b)
 	COMPARE_NODE_FIELD(relation);
 	COMPARE_NODE_FIELD(cols);
 	COMPARE_NODE_FIELD(selectStmt);
+	COMPARE_NODE_FIELD(confClause);
 	COMPARE_NODE_FIELD(returningList);
 	COMPARE_NODE_FIELD(withClause);
 
@@ -2426,6 +2439,27 @@ _equalWithClause(const WithClause *a, const WithClause *b)
 }
 
 static bool
+_equalInferClause(const InferClause *a, const InferClause *b)
+{
+	COMPARE_NODE_FIELD(indexElems);
+	COMPARE_NODE_FIELD(whereClause);
+	COMPARE_LOCATION_FIELD(location);
+
+	return true;
+}
+
+static bool
+_equalConflictClause(const ConflictClause *a, const ConflictClause *b)
+{
+	COMPARE_SCALAR_FIELD(specclause);
+	COMPARE_NODE_FIELD(infer);
+	COMPARE_NODE_FIELD(updatequery);
+	COMPARE_LOCATION_FIELD(location);
+
+	return true;
+}
+
+static bool
 _equalCommonTableExpr(const CommonTableExpr *a, const CommonTableExpr *b)
 {
 	COMPARE_STRING_FIELD(ctename);
@@ -2694,6 +2728,9 @@ equal(const void *a, const void *b)
 		case T_CurrentOfExpr:
 			retval = _equalCurrentOfExpr(a, b);
 			break;
+		case T_ExcludedExpr:
+			retval = _equalExcludedExpr(a, b);
+			break;
 		case T_TargetEntry:
 			retval = _equalTargetEntry(a, b);
 			break;
@@ -3148,6 +3185,12 @@ equal(const void *a, const void *b)
 		case T_WithClause:
 			retval = _equalWithClause(a, b);
 			break;
+		case T_InferClause:
+			retval = _equalInferClause(a, b);
+			break;
+		case T_ConflictClause:
+			retval = _equalConflictClause(a, b);
+			break;
 		case T_CommonTableExpr:
 			retval = _equalCommonTableExpr(a, b);
 			break;
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 21dfda7..a9e1e13 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -235,6 +235,13 @@ exprType(const Node *expr)
 		case T_CurrentOfExpr:
 			type = BOOLOID;
 			break;
+		case T_ExcludedExpr:
+			{
+				const ExcludedExpr *n = (const ExcludedExpr *) expr;
+
+				type = exprType((Node *) n->arg);
+			}
+			break;
 		case T_PlaceHolderVar:
 			type = exprType((Node *) ((const PlaceHolderVar *) expr)->phexpr);
 			break;
@@ -469,6 +476,12 @@ exprTypmod(const Node *expr)
 			return ((const CoerceToDomainValue *) expr)->typeMod;
 		case T_SetToDefault:
 			return ((const SetToDefault *) expr)->typeMod;
+		case T_ExcludedExpr:
+			{
+				const ExcludedExpr *n = (const ExcludedExpr *) expr;
+
+				return ((const Var *) n->arg)->vartypmod;
+			}
 		case T_PlaceHolderVar:
 			return exprTypmod((Node *) ((const PlaceHolderVar *) expr)->phexpr);
 		default:
@@ -894,6 +907,9 @@ exprCollation(const Node *expr)
 		case T_CurrentOfExpr:
 			coll = InvalidOid;	/* result is always boolean */
 			break;
+		case T_ExcludedExpr:
+			coll = exprCollation((Node *) ((const ExcludedExpr *) expr)->arg);
+			break;
 		case T_PlaceHolderVar:
 			coll = exprCollation((Node *) ((const PlaceHolderVar *) expr)->phexpr);
 			break;
@@ -1089,6 +1105,12 @@ exprSetCollation(Node *expr, Oid collation)
 		case T_CurrentOfExpr:
 			Assert(!OidIsValid(collation));		/* result is always boolean */
 			break;
+		case T_ExcludedExpr:
+			{
+				Var *v = (Var *) ((ExcludedExpr *) expr)->arg;
+				v->varcollid = collation;
+			}
+			break;
 		default:
 			elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr));
 			break;
@@ -1474,6 +1496,12 @@ exprLocation(const Node *expr)
 		case T_WithClause:
 			loc = ((const WithClause *) expr)->location;
 			break;
+		case T_InferClause:
+			loc = ((const InferClause *) expr)->location;
+			break;
+		case T_ConflictClause:
+			loc = ((const ConflictClause *) expr)->location;
+			break;
 		case T_CommonTableExpr:
 			loc = ((const CommonTableExpr *) expr)->location;
 			break;
@@ -1481,6 +1509,10 @@ exprLocation(const Node *expr)
 			/* just use argument's location */
 			loc = exprLocation((Node *) ((const PlaceHolderVar *) expr)->phexpr);
 			break;
+		case T_ExcludedExpr:
+			/* just use nested expr's location */
+			loc = exprLocation((Node *) ((const ExcludedExpr *) expr)->arg);
+			break;
 		default:
 			/* for any other node type it's just unknown... */
 			loc = -1;
@@ -1910,6 +1942,8 @@ expression_tree_walker(Node *node,
 			break;
 		case T_PlaceHolderVar:
 			return walker(((PlaceHolderVar *) node)->phexpr, context);
+		case T_ExcludedExpr:
+			return walker(((ExcludedExpr *) node)->arg, context);
 		case T_AppendRelInfo:
 			{
 				AppendRelInfo *appinfo = (AppendRelInfo *) node;
@@ -1958,6 +1992,12 @@ query_tree_walker(Query *query,
 		return true;
 	if (walker((Node *) query->withCheckOptions, context))
 		return true;
+	if (walker((Node *) query->arbiterExpr, context))
+		return true;
+	if (walker(query->arbiterWhere, context))
+		return true;
+	if (walker(query->onConflict, context))
+		return true;
 	if (walker((Node *) query->returningList, context))
 		return true;
 	if (walker((Node *) query->jointree, context))
@@ -2620,6 +2660,16 @@ expression_tree_mutator(Node *node,
 				return (Node *) newnode;
 			}
 			break;
+		case T_ExcludedExpr:
+			{
+				ExcludedExpr *excludedexpr = (ExcludedExpr *) node;
+				ExcludedExpr *newnode;
+
+				FLATCOPY(newnode, excludedexpr, ExcludedExpr);
+				MUTATE(newnode->arg, newnode->arg, Node *);
+				return (Node *) newnode;
+			}
+			break;
 		case T_AppendRelInfo:
 			{
 				AppendRelInfo *appinfo = (AppendRelInfo *) node;
@@ -2699,6 +2749,9 @@ query_tree_mutator(Query *query,
 
 	MUTATE(query->targetList, query->targetList, List *);
 	MUTATE(query->withCheckOptions, query->withCheckOptions, List *);
+	MUTATE(query->arbiterExpr, query->arbiterExpr, List *);
+	MUTATE(query->arbiterWhere, query->arbiterWhere, Node *);
+	MUTATE(query->onConflict, query->onConflict, Node *);
 	MUTATE(query->returningList, query->returningList, List *);
 	MUTATE(query->jointree, query->jointree, FromExpr *);
 	MUTATE(query->setOperations, query->setOperations, Node *);
@@ -2968,6 +3021,8 @@ raw_expression_tree_walker(Node *node,
 					return true;
 				if (walker(stmt->selectStmt, context))
 					return true;
+				if (walker(stmt->confClause, context))
+					return true;
 				if (walker(stmt->returningList, context))
 					return true;
 				if (walker(stmt->withClause, context))
@@ -3207,6 +3262,25 @@ raw_expression_tree_walker(Node *node,
 			break;
 		case T_WithClause:
 			return walker(((WithClause *) node)->ctes, context);
+
+		case T_InferClause:
+			{
+				InferClause *stmt = (InferClause *) node;
+
+				if (walker(stmt->indexElems, context))
+					return true;
+				if (walker(stmt->whereClause, context))
+					return true;
+			}
+		case T_ConflictClause:
+			{
+				ConflictClause *stmt = (ConflictClause *) node;
+
+				if (walker(stmt->infer, context))
+					return true;
+				if (walker(stmt->updatequery, context))
+					return true;
+			}
 		case T_CommonTableExpr:
 			return walker(((CommonTableExpr *) node)->ctequery, context);
 		default:
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index b4a2667..34e9163 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -330,6 +330,9 @@ _outModifyTable(StringInfo str, const ModifyTable *node)
 	WRITE_NODE_FIELD(resultRelations);
 	WRITE_INT_FIELD(resultRelIndex);
 	WRITE_NODE_FIELD(plans);
+	WRITE_ENUM_FIELD(spec, SpecType);
+	WRITE_OID_FIELD(arbiterIndex);
+	WRITE_NODE_FIELD(onConflictPlan);
 	WRITE_NODE_FIELD(withCheckOptionLists);
 	WRITE_NODE_FIELD(returningLists);
 	WRITE_NODE_FIELD(fdwPrivLists);
@@ -1426,6 +1429,14 @@ _outCurrentOfExpr(StringInfo str, const CurrentOfExpr *node)
 }
 
 static void
+_outExcludedExpr(StringInfo str, const ExcludedExpr *node)
+{
+	WRITE_NODE_TYPE("EXCLUDED");
+
+	WRITE_NODE_FIELD(arg);
+}
+
+static void
 _outTargetEntry(StringInfo str, const TargetEntry *node)
 {
 	WRITE_NODE_TYPE("TARGETENTRY");
@@ -2301,6 +2312,10 @@ _outQuery(StringInfo str, const Query *node)
 	WRITE_NODE_FIELD(jointree);
 	WRITE_NODE_FIELD(targetList);
 	WRITE_NODE_FIELD(withCheckOptions);
+	WRITE_ENUM_FIELD(specClause, SpecType);
+	WRITE_NODE_FIELD(arbiterExpr);
+	WRITE_NODE_FIELD(arbiterWhere);
+	WRITE_NODE_FIELD(onConflict);
 	WRITE_NODE_FIELD(returningList);
 	WRITE_NODE_FIELD(groupClause);
 	WRITE_NODE_FIELD(havingQual);
@@ -3062,6 +3077,9 @@ _outNode(StringInfo str, const void *obj)
 			case T_CurrentOfExpr:
 				_outCurrentOfExpr(str, obj);
 				break;
+			case T_ExcludedExpr:
+				_outExcludedExpr(str, obj);
+				break;
 			case T_TargetEntry:
 				_outTargetEntry(str, obj);
 				break;
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index dbc162a..48a7206 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -214,6 +214,10 @@ _readQuery(void)
 	READ_NODE_FIELD(jointree);
 	READ_NODE_FIELD(targetList);
 	READ_NODE_FIELD(withCheckOptions);
+	READ_ENUM_FIELD(specClause, SpecCmd);
+	READ_NODE_FIELD(arbiterExpr);
+	READ_NODE_FIELD(arbiterWhere);
+	READ_NODE_FIELD(onConflict);
 	READ_NODE_FIELD(returningList);
 	READ_NODE_FIELD(groupClause);
 	READ_NODE_FIELD(havingQual);
@@ -1128,6 +1132,19 @@ _readCurrentOfExpr(void)
 }
 
 /*
+ * _readExcludedExpr
+ */
+static ExcludedExpr *
+_readExcludedExpr(void)
+{
+	READ_LOCALS(ExcludedExpr);
+
+	READ_NODE_FIELD(arg);
+
+	READ_DONE();
+}
+
+/*
  * _readTargetEntry
  */
 static TargetEntry *
@@ -1392,6 +1409,8 @@ parseNodeString(void)
 		return_value = _readSetToDefault();
 	else if (MATCH("CURRENTOFEXPR", 13))
 		return_value = _readCurrentOfExpr();
+	else if (MATCH("EXCLUDED", 8))
+		return_value = _readExcludedExpr();
 	else if (MATCH("TARGETENTRY", 11))
 		return_value = _readTargetEntry();
 	else if (MATCH("RANGETBLREF", 11))
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index b86a3cd..6f75759 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -4013,3 +4013,60 @@ string_to_const(const char *str, Oid datatype)
 	return makeConst(datatype, -1, collation, constlen,
 					 conval, false, false);
 }
+
+/*
+ * plan_speculative_use_index
+ *		Use the planner to decide speculative insertion arbiter index
+ *
+ * Among indexes on target of INSERT ... ON CONFLICT UPDATE/IGNORE,  decide
+ * which index to use to arbitrate taking alternative path.  This should be
+ * called infrequently in practice, because its unusual for more than one index
+ * to be available that can satisfy a user-specified unique index inference
+ * specification.
+ *
+ * Note: caller had better already hold some type of lock on the table.
+ */
+Oid
+plan_speculative_use_index(PlannerInfo *root, List *indexList)
+{
+	IndexOptInfo *indexInfo;
+	RelOptInfo *rel;
+	IndexPath  *cheapest;
+	IndexPath  *indexScanPath;
+	ListCell   *lc;
+
+	/* Set up RTE/RelOptInfo arrays if needed */
+	if (!root->simple_rel_array)
+		setup_simple_rel_arrays(root);
+
+	/* Build RelOptInfo */
+	rel = build_simple_rel(root, root->parse->resultRelation, RELOPT_BASEREL);
+
+	/* Locate cheapest IndexOptInfo for the target index */
+	cheapest = NULL;
+
+	foreach(lc, rel->indexlist)
+	{
+		indexInfo = (IndexOptInfo *) lfirst(lc);
+
+		if (!list_member_oid(indexList, indexInfo->indexoid))
+			continue;
+
+		/* Estimate the cost of index scan */
+		indexScanPath = create_index_path(root, indexInfo,
+										  NIL, NIL, NIL, NIL, NIL,
+										  ForwardScanDirection, false,
+										  NULL, 1.0);
+
+		if (!cheapest || compare_fractional_path_costs(&cheapest->path,
+													   &indexScanPath->path,
+													   DEFAULT_RANGE_INEQ_SEL) > 0)
+			cheapest = indexScanPath;
+
+	}
+
+	if (cheapest)
+		return cheapest->indexinfo->indexoid;
+
+	return InvalidOid;
+}
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index 1258961..263ff5f 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -255,13 +255,17 @@ create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
 	/*
 	 * We don't support pushing join clauses into the quals of a tidscan, but
 	 * it could still have required parameterization due to LATERAL refs in
-	 * its tlist.
+	 * its tlist.  To be tidy, we disallow TID scans as the unexecuted scan
+	 * node of an ON CONFLICT UPDATE auxiliary query, even though there is no
+	 * reason to think that would be harmful;  the optimizer should always
+	 * prefer a SeqScan or Result node (actually, we assert that it's one of
+	 * those two in several places, so accepting TID scans would break those).
 	 */
 	required_outer = rel->lateral_relids;
 
 	tidquals = TidQualFromRestrictinfo(rel->baserestrictinfo, rel->relid);
 
-	if (tidquals)
+	if (tidquals && root->parse->specClause != SPEC_UPDATE)
 		add_path(rel, (Path *) create_tidscan_path(root, rel, tidquals,
 												   required_outer));
 }
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 655be81..648dbbc 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -4811,7 +4811,8 @@ make_modifytable(PlannerInfo *root,
 				 CmdType operation, bool canSetTag,
 				 List *resultRelations, List *subplans,
 				 List *withCheckOptionLists, List *returningLists,
-				 List *rowMarks, int epqParam)
+				 List *rowMarks, Plan *onConflictPlan, SpecCmd spec,
+				 int epqParam)
 {
 	ModifyTable *node = makeNode(ModifyTable);
 	Plan	   *plan = &node->plan;
@@ -4860,6 +4861,9 @@ make_modifytable(PlannerInfo *root,
 	node->resultRelations = resultRelations;
 	node->resultRelIndex = -1;	/* will be set correctly in setrefs.c */
 	node->plans = subplans;
+	node->spec = spec;
+	node->arbiterIndex = InvalidOid;
+	node->onConflictPlan = onConflictPlan;
 	node->withCheckOptionLists = withCheckOptionLists;
 	node->returningLists = returningLists;
 	node->rowMarks = rowMarks;
@@ -4912,6 +4916,16 @@ make_modifytable(PlannerInfo *root,
 	}
 	node->fdwPrivLists = fdw_private_list;
 
+	/*
+	 * If a set of unique index inference expressions was provided (for
+	 * INSERT...ON CONFLICT UPDATE/IGNORE), then infer appropriate
+	 * unique index (or throw an error if none is available).  It's
+	 * possible that there will be a costing step in the event of
+	 * having to choose between multiple alternatives.
+	 */
+	if (root->parse->arbiterExpr)
+		node->arbiterIndex = infer_unique_index(root);
+
 	return node;
 }
 
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 9cbbcfb..47d11d2 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -612,7 +612,58 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
 											 withCheckOptionLists,
 											 returningLists,
 											 rowMarks,
+											 NULL,
+											 parse->specClause,
 											 SS_assign_special_param(root));
+
+			if (parse->onConflict)
+			{
+				Query		   *conflictQry = (Query*) parse->onConflict;
+				ModifyTable	   *parent = (ModifyTable *) plan;
+
+				/*
+				 * An ON CONFLICT UPDATE query is a subquery of its parent
+				 * INSERT ModifyTable, but isn't formally a subplan -- it's an
+				 * "auxiliary" plan.
+				 *
+				 * During execution, the auxiliary plan state is used to
+				 * execute the UPDATE query in an ad-hoc manner, driven by the
+				 * parent.  The executor will only ever execute the auxiliary
+				 * plan through its parent.  onConflictPlan is "auxiliary" to
+				 * its parent in the sense that it's strictly encapsulated from
+				 * other code (for example, the executor does not separately
+				 * track it within estate as a plan that needs to have
+				 * execution finished when it appears within a data-modifying
+				 * CTE -- only the parent is specifically tracked for that
+				 * purpose).
+				 *
+				 * There is a fundamental nexus between parent and auxiliary
+				 * plans that makes a fully unified representation seem
+				 * compelling (a "CMD_UPSERT" ModifyTable plan and Query).
+				 * That would obviate the need to specially track auxiliary
+				 * state across all stages of execution just for this case;
+				 * the optimizer would then not have to generate a
+				 * fully-formed, independent UPDATE subquery plan (with a
+				 * scanstate only useful for EvalPlanQual() re-evaluation).
+				 * However, it's convenient to plan each ModifyTable
+				 * separately, as doing so maximizes code reuse.  The
+				 * alternative must be to introduce abstractions that (for
+				 * example) allow a single "CMD_UPSERT" ModifyTable to have two
+				 * distinct types of targetlist (that will need to be processed
+				 * differently during parsing and rewriting anyway).  The
+				 * auxiliary UPDATE plan is a good trade-off between a
+				 * fully-fledged "CMD_UPSERT" representation, and the opposite
+				 * extreme of tracking two separate ModifyTable nodes, joined
+				 * by a contrived join type, with (for example) odd properties
+				 * around tuple visibility not well encapsulated.  A contrived
+				 * join based design would also necessitate teaching
+				 * ModifyTable nodes to support rescan just for the benefit of
+				 * ON CONFLICT UPDATE.
+				 */
+				parent->onConflictPlan = subquery_planner(glob, conflictQry,
+														  root, hasRecursion,
+														  0, NULL);
+			}
 		}
 	}
 
@@ -1056,6 +1107,8 @@ inheritance_planner(PlannerInfo *root)
 									 withCheckOptionLists,
 									 returningLists,
 									 rowMarks,
+									 NULL,
+									 parse->specClause,
 									 SS_assign_special_param(root));
 }
 
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 5d865b0..2f88694 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -779,9 +779,34 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
 				 * global list.
 				 */
 				splan->resultRelIndex = list_length(root->glob->resultRelations);
-				root->glob->resultRelations =
-					list_concat(root->glob->resultRelations,
-								list_copy(splan->resultRelations));
+
+				if (!splan->onConflictPlan)
+				{
+					/*
+					 * Only actually append result relation for non-auxiliary
+					 * ModifyTable plans
+					 */
+					root->glob->resultRelations =
+						list_concat(root->glob->resultRelations,
+									list_copy(splan->resultRelations));
+				}
+				else
+				{
+					/*
+					 * Decrement rtoffset, to compensate for dummy RTE left by
+					 * EXCLUDED.* alias.  Auxiliary plan will have same
+					 * resultRelation from flattened range table as its parent.
+					 */
+					rtoffset -= PRS2_OLD_VARNO;
+					splan->onConflictPlan = (Plan *) set_plan_refs(root,
+											  (Plan *) splan->onConflictPlan,
+											  rtoffset);
+					/*
+					 * Set up the visible plan targetlist as being the same as
+					 * the parent. Again, this is for the use of EXPLAIN only.
+					 */
+					splan->onConflictPlan->targetlist = splan->plan.targetlist;
+				}
 			}
 			break;
 		case T_Append:
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 78fb6b1..f7a0523 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -2345,6 +2345,12 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params,
 													  valid_params,
 													  scan_params));
 				}
+
+				/*
+				 * No need to directly handle onConflictPlan here, since it
+				 * cannot have params (due to parse analysis enforced
+				 * restrictions prohibiting subqueries).
+				 */
 			}
 			break;
 
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index fb7db6d..3086ca3 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -31,6 +31,7 @@
 #include "nodes/makefuncs.h"
 #include "optimizer/clauses.h"
 #include "optimizer/cost.h"
+#include "optimizer/paths.h"
 #include "optimizer/plancat.h"
 #include "optimizer/predtest.h"
 #include "optimizer/prep.h"
@@ -125,10 +126,12 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
 
 	/*
 	 * Make list of indexes.  Ignore indexes on system catalogs if told to.
-	 * Don't bother with indexes for an inheritance parent, either.
+	 * Don't bother with indexes for an inheritance parent or speculative
+	 * insertion UPDATE auxiliary queries, either.
 	 */
 	if (inhparent ||
-		(IgnoreSystemIndexes && IsSystemRelation(relation)))
+		(IgnoreSystemIndexes && IsSystemRelation(relation)) ||
+		root->parse->specClause == SPEC_UPDATE)
 		hasindex = false;
 	else
 		hasindex = relation->rd_rel->relhasindex;
@@ -394,6 +397,221 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
 }
 
 /*
+ * infer_unique_index -
+ *	  Retrieves unique index to arbitrate speculative insertion.
+ *
+ * Uses user-supplied inference clause expressions and predicate to match a
+ * unique index from those defined and ready on the heap relation (target).  An
+ * exact match is required on columns/expressions (although they can appear in
+ * any order).  However, the predicate given by the user need only restrict
+ * insertion to a subset of some part of the table covered by some particular
+ * unique index (in particular, a partial unique index) in order to be
+ * inferred.
+ *
+ * The implementation does not consider which B-Tree operator class any
+ * particular available unique index uses.  In particular, there is no system
+ * dependency on the default operator class for the purposes of inference.
+ * This should be okay, since by convention non-default opclasses only
+ * introduce alternative sort orders, not alternative notions of equality
+ * (there are only trivial known exceptions to this convention, where "equals"
+ * operator of a type's opclasses do not match across opclasses, exceptions
+ * that exist precisely to discourage user code from using the divergent
+ * opclass).  Even if we assume that a type could usefully have multiple
+ * alternative concepts of equality, surely the definition actually implied by
+ * the operator class of actually indexed attributes is pertinent.  However,
+ * this is a bit of a wart, because strictly speaking there is leeway for a
+ * query to be interpreted in deference to available unique indexes, and
+ * indexes are traditionally only an implementation detail.  It hardly seems
+ * worth it to waste cycles on this corner case, though.
+ *
+ * This logic somewhat mirrors get_relation_info().  This process is not
+ * deferred to a get_relation_info() call while planning because there may not
+ * be any such call.  In the ON CONFLICT UPDATE case get_relation_info() will
+ * be called, for auxiliary query planning, but even then indexes won't be
+ * examined since they're not generally interesting to that case (building
+ * index paths is explicitly avoided for auxiliary query planning, in fact).
+ */
+Oid
+infer_unique_index(PlannerInfo *root)
+{
+	Query	   *parse = root->parse;
+	Relation	relation;
+	Oid			relationObjectId;
+	Bitmapset  *plainAttrs = NULL;
+	List	   *candidates = NIL;
+	ListCell   *l;
+	List	   *indexList;
+
+	Assert(parse->specClause == SPEC_INSERT ||
+		   parse->specClause == SPEC_IGNORE);
+
+	/*
+	 * We need not lock the relation since it was already locked, either by
+	 * the rewriter or when expand_inherited_rtentry() added it to the query's
+	 * rangetable.
+	 */
+	relationObjectId = rt_fetch(parse->resultRelation, parse->rtable)->relid;
+
+	relation = heap_open(relationObjectId, NoLock);
+
+	/*
+	 * Match expressions appearing in clause (if any) with index
+	 * definition
+	 */
+	foreach(l, parse->arbiterExpr)
+	{
+		Expr   *elem;
+		Var	   *var;
+		int		attno;
+
+		elem = (Expr *) lfirst(l);
+
+		/*
+		 * Parse analysis of inference elements performs full parse analysis of
+		 * Vars, even for non-expression indexes (in contrast with utility
+		 * command related use of IndexElem).  However, indexes are cataloged
+		 * with simple attribute numbers for non-expression indexes.
+		 * Therefore, we must build a compatible bms representation here.
+		 */
+		if (!IsA(elem, Var))
+			continue;
+
+		var = (Var*) elem;
+		attno = var->varattno;
+
+		if (attno < 0)
+			ereport(ERROR,
+					(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+					 errmsg("system columns may not appear in unique index inference specification")));
+		else if (attno == 0)
+			elog(ERROR, "whole row unique index inference specifications are not valid");
+
+		plainAttrs = bms_add_member(plainAttrs, attno);
+	}
+
+	indexList = RelationGetIndexList(relation);
+
+	/*
+	 * Using that representation, iterate through the list of indexes on the
+	 * target relation to try and find a match
+	 */
+	foreach(l, indexList)
+	{
+		Oid				indexoid = lfirst_oid(l);
+		Relation		idxRel;
+		Form_pg_index	idxForm;
+		Bitmapset	   *indexedPlainAttrs = NULL;
+		List		   *idxExprs;
+		List		   *predExprs;
+		List		   *whereExplicit;
+		AttrNumber		natt;
+		ListCell	   *e;
+
+		/*
+		 * Extract info from the relation descriptor for the index.  We know
+		 * that this is a target, so get lock type it is known will ultimately
+		 * be required by the executor.
+		 *
+		 * Let executor complain about !indimmediate case directly.
+		 */
+		idxRel = index_open(indexoid, RowExclusiveLock);
+		idxForm = idxRel->rd_index;
+
+		if (!idxForm->indisunique ||
+			!IndexIsValid(idxForm))
+			goto next;
+
+		/*
+		 * If the index is valid, but cannot yet be used, ignore it. See
+		 * src/backend/access/heap/README.HOT for discussion.
+		 */
+		if (idxForm->indcheckxmin &&
+			!TransactionIdPrecedes(HeapTupleHeaderGetXmin(idxRel->rd_indextuple->t_data),
+								   TransactionXmin))
+			goto next;
+
+		/* Check in detail if the clause attributes/expressions match */
+		for (natt = 0; natt < idxForm->indnatts; natt++)
+		{
+			int			attno = idxRel->rd_index->indkey.values[natt];
+
+			if (attno < 0)
+				elog(ERROR, "system column in index");
+
+			if (attno != 0)
+				indexedPlainAttrs = bms_add_member(indexedPlainAttrs, attno);
+		}
+
+		/*
+		 * Since expressions were made unique during parse analysis, it's
+		 * evident that we cannot proceed with this index if the number of
+		 * attributes (plain or expression) does not match exactly.  This
+		 * precludes support for unique indexes created with redundantly
+		 * referenced columns (which are not forbidden by CREATE INDEX), but
+		 * this seems inconsequential.
+		 */
+		if (list_length(parse->arbiterExpr) != idxForm->indnatts)
+			goto next;
+
+		idxExprs = RelationGetIndexExpressions(idxRel);
+
+		/*
+		 * Match expressions appearing in clause (if any) with index
+		 * definition
+		 */
+		foreach(e, parse->arbiterExpr)
+		{
+			Expr  *elem = (Expr *) lfirst(e);
+
+			/* Plain Vars were already separately accounted for */
+			if (IsA(elem, Var))
+				continue;
+
+			if (!list_member(idxExprs, elem))
+				goto next;
+		}
+
+		/* Non-expression attributes (if any) must match */
+		if (!bms_equal(indexedPlainAttrs, plainAttrs))
+			goto next;
+
+		/*
+		 * Any user-supplied ON CONFLICT unique index inference WHERE clause
+		 * need only be implied by the cataloged index definitions predicate
+		 */
+		predExprs =	RelationGetIndexPredicate(idxRel);
+		whereExplicit = make_ands_implicit((Expr *) parse->arbiterWhere);
+
+		if (!predicate_implied_by(predExprs, whereExplicit))
+			goto next;
+
+		candidates = lappend_oid(candidates, idxForm->indexrelid);
+next:
+		index_close(idxRel, NoLock);
+	}
+
+	list_free(indexList);
+	heap_close(relation, NoLock);
+
+	/*
+	 * In the common case where there is only a single candidate unique index,
+	 * there is clearly no point in building index paths to determine which is
+	 * cheapest.
+	 */
+	if (list_length(candidates) == 1)
+		return linitial_oid(candidates);
+	else if (candidates == NIL)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+				 errmsg("could not infer which unique index to use from expressions/columns and predicate provided for ON CONFLICT")));
+	else
+		/* Otherwise, deduce the least expensive unique index */
+		return plan_speculative_use_index(root, candidates);
+
+	return InvalidOid;		/* keep compiler quiet */
+}
+
+/*
  * estimate_rel_size - estimate # pages and # tuples in a table or index
  *
  * We also estimate the fraction of the pages that are marked all-visible in
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index df89065..6c194f9 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -387,6 +387,8 @@ transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt)
 	/* done building the range table and jointree */
 	qry->rtable = pstate->p_rtable;
 	qry->jointree = makeFromExpr(pstate->p_joinlist, qual);
+	qry->specClause = SPEC_NONE;
+	qry->onConflict = NULL;
 
 	qry->hasSubLinks = pstate->p_hasSubLinks;
 	qry->hasWindowFuncs = pstate->p_hasWindowFuncs;
@@ -408,6 +410,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
 {
 	Query	   *qry = makeNode(Query);
 	SelectStmt *selectStmt = (SelectStmt *) stmt->selectStmt;
+	SpecCmd		spec = stmt->confClause? stmt->confClause->specclause:SPEC_NONE;
 	List	   *exprList = NIL;
 	bool		isGeneralSelect;
 	List	   *sub_rtable;
@@ -425,6 +428,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
 
 	qry->commandType = CMD_INSERT;
 	pstate->p_is_insert = true;
+	pstate->p_is_speculative = spec != SPEC_NONE;
 
 	/* process the WITH clause independently of all else */
 	if (stmt->withClause)
@@ -472,11 +476,16 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
 		sub_namespace = NIL;
 	}
 
+	/* INSERT with an ON CONFLICT clause forces the "target" alias */
+	if (pstate->p_is_speculative)
+		stmt->relation->alias = makeAlias("target", NIL);
+
 	/*
 	 * Must get write lock on INSERT target table before scanning SELECT, else
 	 * we will grab the wrong kind of initial lock if the target table is also
 	 * mentioned in the SELECT part.  Note that the target table is not added
-	 * to the joinlist or namespace.
+	 * to the joinlist or namespace.  Note also that additional requiredPerms
+	 * may be added to the target RTE iff there is an auxiliary UPDATE.
 	 */
 	qry->resultRelation = setTargetTable(pstate, stmt->relation,
 										 false, false, ACL_INSERT);
@@ -741,12 +750,13 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
 	}
 
 	/*
-	 * If we have a RETURNING clause, we need to add the target relation to
-	 * the query namespace before processing it, so that Var references in
-	 * RETURNING will work.  Also, remove any namespace entries added in a
-	 * sub-SELECT or VALUES list.
+	 * If we have a RETURNING clause, or there are attributes used as the
+	 * condition on which to take an alternative ON CONFLICT path, we need to
+	 * add the target relation to the query namespace before processing it, so
+	 * that Var references in RETURNING/the alternative path key will work.
+	 * Also, remove any namespace entries added in a sub-SELECT or VALUES list.
 	 */
-	if (stmt->returningList)
+	if (stmt->returningList || stmt->confClause)
 	{
 		pstate->p_namespace = NIL;
 		addRTEtoQuery(pstate, pstate->p_target_rangetblentry,
@@ -758,8 +768,49 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
 	/* done building the range table and jointree */
 	qry->rtable = pstate->p_rtable;
 	qry->jointree = makeFromExpr(pstate->p_joinlist, NULL);
-
+	qry->specClause = spec;
 	qry->hasSubLinks = pstate->p_hasSubLinks;
+	qry->onConflict = NULL;
+
+	if (stmt->confClause)
+	{
+		/*
+		 * ON CONFLICT UPDATE requires special parse analysis of auxiliary
+		 * update Query
+		 */
+		if (stmt->confClause->updatequery)
+		{
+			ParseState	   *sub_pstate = make_parsestate(pstate);
+			Query		   *uqry;
+
+			/*
+			 * The optimizer is not prepared to accept a subquery RTE for a
+			 * non-CMD_SELECT Query.  The CMD_UPDATE Query is tracked as
+			 * special auxiliary state, while there is more or less analogous
+			 * auxiliary state tracked in later stages of query execution.
+			 *
+			 * Parent canSetTag only ever actually consulted, so no need to set
+			 * that here.
+			 */
+			uqry = transformStmt(sub_pstate, stmt->confClause->updatequery);
+			Assert(uqry->commandType == CMD_UPDATE &&
+				   uqry->specClause == SPEC_UPDATE);
+
+			/* Save auxiliary query */
+			qry->onConflict = (Node *) uqry;
+
+			free_parsestate(sub_pstate);
+		}
+
+		/*
+		 * Infer a unique index from columns/expressions.  This is later used
+		 * to infer a unique index which arbitrates whether or not to take the
+		 * alternative ON CONFLICT path (i.e.  whether or not to INSERT or
+		 * UPDATE/IGNORE in respect of each slot proposed for insertion).
+		 */
+		transformConflictClause(pstate, stmt->confClause, &qry->arbiterExpr,
+								&qry->arbiterWhere);
+	}
 
 	assign_query_collations(pstate, qry);
 
@@ -1006,6 +1057,8 @@ transformSelectStmt(ParseState *pstate, SelectStmt *stmt)
 
 	qry->rtable = pstate->p_rtable;
 	qry->jointree = makeFromExpr(pstate->p_joinlist, qual);
+	qry->specClause = SPEC_NONE;
+	qry->onConflict = NULL;
 
 	qry->hasSubLinks = pstate->p_hasSubLinks;
 	qry->hasWindowFuncs = pstate->p_hasWindowFuncs;
@@ -1903,10 +1956,14 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
 	Node	   *qual;
 	ListCell   *origTargetList;
 	ListCell   *tl;
+	bool		InhOption;
 
 	qry->commandType = CMD_UPDATE;
 	pstate->p_is_update = true;
 
+	/* for auxiliary UPDATEs, visit parent INSERT to set target table */
+	pstate->p_is_speculative = (stmt->relation == NULL);
+
 	/* process the WITH clause independently of all else */
 	if (stmt->withClause)
 	{
@@ -1915,8 +1972,20 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
 		qry->hasModifyingCTE = pstate->p_hasModifyingCTE;
 	}
 
+	if (!pstate->p_is_speculative)
+	{
+		InhOption = interpretInhOption(stmt->relation->inhOpt);
+		qry->specClause = SPEC_NONE;
+	}
+	else
+	{
+		/* auxiliary UPDATE does not accept ONLY */
+		InhOption = false;
+		qry->specClause = SPEC_UPDATE;
+	}
+
 	qry->resultRelation = setTargetTable(pstate, stmt->relation,
-								  interpretInhOption(stmt->relation->inhOpt),
+										 InhOption,
 										 true,
 										 ACL_UPDATE);
 
@@ -1947,6 +2016,7 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
 
 	qry->rtable = pstate->p_rtable;
 	qry->jointree = makeFromExpr(pstate->p_joinlist, qual);
+	qry->onConflict = NULL;
 
 	qry->hasSubLinks = pstate->p_hasSubLinks;
 
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 36dac29..f987432 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -215,6 +215,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
 	RangeVar			*range;
 	IntoClause			*into;
 	WithClause			*with;
+	InferClause			*infer;
+	ConflictClause			*conf;
 	A_Indices			*aind;
 	ResTarget			*target;
 	struct PrivTarget	*privtarget;
@@ -415,6 +417,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
 %type <defelt>	SeqOptElem
 
 %type <istmt>	insert_rest
+%type <infer>	opt_conf_expr
+%type <conf>	opt_on_conflict
 
 %type <vsetstmt> generic_set set_rest set_rest_more generic_reset reset_rest
 				 SetResetClause FunctionSetResetClause
@@ -513,6 +517,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
 %type <list>	cte_list
 
 %type <list>	within_group_clause
+%type <node>	UpdateInsertStmt
 %type <node>	filter_clause
 %type <list>	window_clause window_definition_list opt_partition_clause
 %type <windef>	window_definition over_clause window_specification
@@ -551,8 +556,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
 	CACHE CALLED CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P
 	CHARACTER CHARACTERISTICS CHECK CHECKPOINT CLASS CLOSE
 	CLUSTER COALESCE COLLATE COLLATION COLUMN COMMENT COMMENTS COMMIT
-	COMMITTED CONCURRENTLY CONFIGURATION CONNECTION CONSTRAINT CONSTRAINTS
-	CONTENT_P CONTINUE_P CONVERSION_P COPY COST CREATE
+	COMMITTED CONCURRENTLY CONFIGURATION CONFLICT CONNECTION CONSTRAINT
+	CONSTRAINTS CONTENT_P CONTINUE_P CONVERSION_P COPY COST CREATE
 	CROSS CSV CURRENT_P
 	CURRENT_CATALOG CURRENT_DATE CURRENT_ROLE CURRENT_SCHEMA
 	CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURSOR CYCLE
@@ -572,7 +577,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
 
 	HANDLER HAVING HEADER_P HOLD HOUR_P
 
-	IDENTITY_P IF_P ILIKE IMMEDIATE IMMUTABLE IMPLICIT_P IMPORT_P IN_P
+	IDENTITY_P IF_P IGNORE_P ILIKE IMMEDIATE IMMUTABLE IMPLICIT_P IMPORT_P IN_P
 	INCLUDING INCREMENT INDEX INDEXES INHERIT INHERITS INITIALLY INLINE_P
 	INNER_P INOUT INPUT_P INSENSITIVE INSERT INSTEAD INT_P INTEGER
 	INTERSECT INTERVAL INTO INVOKER IS ISNULL ISOLATION
@@ -652,6 +657,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
 %nonassoc	OVERLAPS
 %nonassoc	BETWEEN
 %nonassoc	IN_P
+%nonassoc	DISTINCT
+%nonassoc	ON
 %left		POSTFIXOP		/* dummy for postfix Op rules */
 /*
  * To support target_el without AS, we must give IDENT an explicit priority
@@ -9399,10 +9406,12 @@ DeallocateStmt: DEALLOCATE name
  *****************************************************************************/
 
 InsertStmt:
-			opt_with_clause INSERT INTO qualified_name insert_rest returning_clause
+			opt_with_clause INSERT INTO qualified_name insert_rest
+			opt_on_conflict returning_clause
 				{
 					$5->relation = $4;
-					$5->returningList = $6;
+					$5->confClause = $6;
+					$5->returningList = $7;
 					$5->withClause = $1;
 					$$ = (Node *) $5;
 				}
@@ -9447,6 +9456,44 @@ insert_column_item:
 				}
 		;
 
+opt_on_conflict:
+			ON CONFLICT opt_conf_expr UpdateInsertStmt
+				{
+					$$ = makeNode(ConflictClause);
+					$$->specclause = SPEC_INSERT;
+					$$->infer = $3;
+					$$->updatequery = $4;
+					$$->location = @1;
+				}
+			|
+			ON CONFLICT opt_conf_expr IGNORE_P
+				{
+					$$ = makeNode(ConflictClause);
+					$$->specclause = SPEC_IGNORE;
+					$$->infer = $3;
+					$$->updatequery = NULL;
+					$$->location = @1;
+				}
+			| /*EMPTY*/
+				{
+					$$ = NULL;
+				}
+		;
+
+opt_conf_expr:
+			'(' index_params where_clause ')'
+				{
+					$$ = makeNode(InferClause);
+					$$->indexElems = $2;
+					$$->whereClause = $3;
+					$$->location = @1;
+				}
+			| /*EMPTY*/
+				{
+					$$ = NULL;
+				}
+		;
+
 returning_clause:
 			RETURNING target_list		{ $$ = $2; }
 			| /* EMPTY */				{ $$ = NIL; }
@@ -9546,6 +9593,22 @@ UpdateStmt: opt_with_clause UPDATE relation_expr_opt_alias
 				}
 		;
 
+UpdateInsertStmt: UPDATE
+			SET set_clause_list
+			where_clause
+				{
+					UpdateStmt *n = makeNode(UpdateStmt);
+					/* NULL relation conveys auxiliary */
+					n->relation = NULL;
+					n->targetList = $3;
+					n->fromClause = NULL;
+					n->whereClause = $4;
+					n->returningList = NULL;
+					n->withClause = NULL;
+					$$ = (Node *)n;
+				}
+		;
+
 set_clause_list:
 			set_clause							{ $$ = $1; }
 			| set_clause_list ',' set_clause	{ $$ = list_concat($1,$3); }
@@ -13188,6 +13251,7 @@ unreserved_keyword:
 			| COMMIT
 			| COMMITTED
 			| CONFIGURATION
+			| CONFLICT
 			| CONNECTION
 			| CONSTRAINTS
 			| CONTENT_P
@@ -13247,6 +13311,7 @@ unreserved_keyword:
 			| HOUR_P
 			| IDENTITY_P
 			| IF_P
+			| IGNORE_P
 			| IMMEDIATE
 			| IMMUTABLE
 			| IMPLICIT_P
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 654dce6..0bc45d5 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -75,6 +75,8 @@ static TargetEntry *findTargetlistEntrySQL99(ParseState *pstate, Node *node,
 						 List **tlist, ParseExprKind exprKind);
 static int get_matching_location(int sortgroupref,
 					  List *sortgrouprefs, List *exprs);
+static List* resolve_unique_index_expr(ParseState *pstate, InferClause *infer,
+									   Relation heapRel);
 static List *addTargetToGroupList(ParseState *pstate, TargetEntry *tle,
 					 List *grouplist, List *targetlist, int location,
 					 bool resolveUnknown);
@@ -145,7 +147,9 @@ transformFromClause(ParseState *pstate, List *frmList)
  *	  We also open the target relation and acquire a write lock on it.
  *	  This must be done before processing the FROM list, in case the target
  *	  is also mentioned as a source relation --- we want to be sure to grab
- *	  the write lock before any read lock.
+ *	  the write lock before any read lock.  Note that when called during
+ *	  the parse analysis of an auxiliary UPDATE query, relation may be
+ *	  NULL, and the details are acquired from the parent.
  *
  *	  If alsoSource is true, add the target to the query's joinlist and
  *	  namespace.  For INSERT, we don't want the target to be joined to;
@@ -172,19 +176,79 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
 
 	/*
 	 * Open target rel and grab suitable lock (which we will hold till end of
-	 * transaction).
+	 * transaction), iff this is not an auxiliary ON CONFLICT UPDATE.
 	 *
-	 * free_parsestate() will eventually do the corresponding heap_close(),
-	 * but *not* release the lock.
+	 * free_parsestate() will eventually do the corresponding heap_close(), but
+	 * *not* release the lock (again, iff this is not an auxiliary ON CONFLICT
+	 * UPDATE).
 	 */
-	pstate->p_target_relation = parserOpenTable(pstate, relation,
-												RowExclusiveLock);
+	if (!pstate->p_is_speculative || pstate->p_is_insert)
+	{
+		pstate->p_target_relation = parserOpenTable(pstate, relation,
+													RowExclusiveLock);
+
+		/*
+		 * Now build an RTE.
+		 */
+		rte = addRangeTableEntryForRelation(pstate, pstate->p_target_relation,
+											relation->alias, inh, false);
+
+		/*
+		 * Override addRangeTableEntry's default ACL_SELECT permissions
+		 * check, and instead mark target table as requiring exactly the
+		 * specified permissions.
+		 *
+		 * If we find an explicit reference to the rel later during parse
+		 * analysis, we will add the ACL_SELECT bit back again; see
+		 * markVarForSelectPriv and its callers.
+		 */
+		rte->requiredPerms = requiredPerms;
+	}
+	else
+	{
+		RangeTblEntry  *exclRte;
+
+		/* auxilary UPDATE (of ON CONFLICT UPDATE) */
+		Assert(pstate->p_is_update);
+		/* target shared with parent */
+		pstate->p_target_relation =
+			pstate->parentParseState->p_target_relation;
+		rte = pstate->parentParseState->p_target_rangetblentry;
+
+		/*
+		 * When called for auxiliary UPDATE, same target RTE is processed here
+		 * for a second time.  Just append requiredPerms.  There is no need to
+		 * override addRangeTableEntry's default ACL_SELECT permissions check
+		 * now.
+		 */
+		rte->requiredPerms |= requiredPerms;
+
+		/*
+		 * Build EXCLUDED alias for target relation.  This can be used to
+		 * reference the tuple originally proposed for insertion from within
+		 * the ON CONFLICT UPDATE auxiliary query.  This is not visible in the
+		 * parent INSERT.
+		 *
+		 * NOTE: 'EXCLUDED' will always have a varno equal to 1 (at least until
+		 * rewriting, where the RTE is effectively discarded -- its Vars are
+		 * replaced with a special-purpose primnode, ExcludedExpr).
+		 */
+		exclRte = addRangeTableEntryForRelation(pstate,
+												pstate->p_target_relation,
+												makeAlias("excluded", NIL),
+												false, false);
+
+		/*
+		 * Add EXCLUDED RTE to namespace.  It does not matter that the RTE is
+		 * not added to the Query joinlist, since its Vars are merely
+		 * placeholders for ExcludedExpr.
+		 */
+		addRTEtoQuery(pstate, exclRte, false, true, true);
+
+		/* Append parent/our target to Query rtable (should be last) */
+		pstate->p_rtable = lappend(pstate->p_rtable, rte);
+	}
 
-	/*
-	 * Now build an RTE.
-	 */
-	rte = addRangeTableEntryForRelation(pstate, pstate->p_target_relation,
-										relation->alias, inh, false);
 	pstate->p_target_rangetblentry = rte;
 
 	/* assume new rte is at end */
@@ -192,17 +256,6 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
 	Assert(rte == rt_fetch(rtindex, pstate->p_rtable));
 
 	/*
-	 * Override addRangeTableEntry's default ACL_SELECT permissions check, and
-	 * instead mark target table as requiring exactly the specified
-	 * permissions.
-	 *
-	 * If we find an explicit reference to the rel later during parse
-	 * analysis, we will add the ACL_SELECT bit back again; see
-	 * markVarForSelectPriv and its callers.
-	 */
-	rte->requiredPerms = requiredPerms;
-
-	/*
 	 * If UPDATE/DELETE, add table to joinlist and namespace.
 	 *
 	 * Note: some callers know that they can find the new ParseNamespaceItem
@@ -2166,6 +2219,167 @@ get_matching_location(int sortgroupref, List *sortgrouprefs, List *exprs)
 }
 
 /*
+ * resolve_unique_index_expr
+ *		Infer a unique index from a list of indexElems, for ON
+ *		CONFLICT UPDATE/IGNORE
+ *
+ * Perform parse analysis of expressions and columns appearing within ON
+ * CONFLICT clause.  During planning, the returned list of expressions is used
+ * to infer which unique index to use.
+ */
+static List *
+resolve_unique_index_expr(ParseState *pstate, InferClause *infer,
+						  Relation heapRel)
+{
+	List		  *clauseexprs = NIL;
+	ListCell	  *l;
+
+	if (heapRel->rd_rel->relkind != RELKIND_RELATION)
+		ereport(ERROR,
+				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+				 errmsg("relation \"%s\" is not an ordinary table",
+						RelationGetRelationName(heapRel)),
+				 errhint("Only ordinary tables are accepted as targets when a unique index is inferred for ON CONFLICT.")));
+
+	if (heapRel->rd_rel->relhassubclass)
+		ereport(ERROR,
+				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+				 errmsg("relation \"%s\" has inheritance children",
+						RelationGetRelationName(heapRel)),
+				 errhint("Only heap relations without inheritance children are accepted as targets when a unique index is inferred for ON CONFLICT.")));
+
+	foreach(l, infer->indexElems)
+	{
+		IndexElem  *ielem = (IndexElem *) lfirst(l);
+		Node	   *trans;
+
+		/*
+		 * Raw grammar re-uses CREATE INDEX infrastructure for unique index
+		 * inference clause, and so will accept opclasses by name and so on.
+		 * Reject these here explicitly.
+		 */
+		if (ielem->ordering != SORTBY_DEFAULT ||
+			ielem->nulls_ordering != SORTBY_NULLS_DEFAULT)
+			ereport(ERROR,
+					(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+					 errmsg("ON CONFLICT does not accept ordering or NULLS FIRST/LAST specifications"),
+					 errhint("These factors do not affect uniqueness of indexed datums."),
+					 parser_errposition(pstate,
+										exprLocation((Node *) infer))));
+
+		if (ielem->collation != NIL)
+			ereport(ERROR,
+					(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+					 errmsg("ON CONFLICT collation specification is unnecessary"),
+					 errhint("Collations do not affect uniqueness of collatable datums."),
+					 parser_errposition(pstate,
+										exprLocation((Node *) infer))));
+
+		if (ielem->opclass != NIL)
+			ereport(ERROR,
+					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+					 errmsg("ON CONFLICT cannot accept non-default operator class specifications"),
+					 parser_errposition(pstate,
+										exprLocation((Node *) infer))));
+
+		if (!ielem->expr)
+		{
+			/* Simple index attribute */
+			ColumnRef	   *n;
+
+			/*
+			 * Grammar won't have built raw expression for us in event of plain
+			 * column reference.  Create one directly, and perform expression
+			 * transformation, which seems better principled than simply
+			 * propagating catalog-style simple attribute numbers.  For
+			 * example, it means the Var is marked for SELECT privileges, which
+			 * speculative insertion requires.  Planner expects this, and
+			 * performs its own normalization for the purposes of matching
+			 * against pg_index.
+			 */
+			n = makeNode(ColumnRef);
+			n->fields = list_make1(makeString(ielem->name));
+			/* Location is approximately that of inference specification */
+			n->location = infer->location;
+			trans = (Node *) n;
+		}
+		else
+		{
+			/* Do parse transformation of the raw expression */
+			trans = (Node *) ielem->expr;
+		}
+
+		/*
+		 * transformExpr() should have already rejected subqueries,
+		 * aggregates, and window functions, based on the EXPR_KIND_ for an
+		 * index expression.  Expressions returning sets won't have been
+		 * rejected, but don't bother doing so here; there should be no
+		 * available expression unique index to match any such expression
+		 * against anyway.
+		 */
+		trans = transformExpr(pstate, trans, EXPR_KIND_INDEX_EXPRESSION);
+		/* Save in list of transformed expressions */
+		clauseexprs = list_append_unique(clauseexprs, trans);
+	}
+
+	return clauseexprs;
+}
+
+/*
+ * transformConflictClauseExpr -
+ *		transform expressions of ON CONFLICT UPDATE/IGNORE.
+ *
+ * Transformed expressions used to infer one unique index relation to serve as
+ * an ON CONFLICT arbiter.  Partial unique indexes may be inferred using WHERE
+ * clause from inference specification clause.
+ */
+void
+transformConflictClause(ParseState *pstate, ConflictClause *confClause,
+						List **arbiterExpr, Node **arbiterWhere)
+{
+	InferClause	   *infer = confClause->infer;
+
+	if (confClause->specclause == SPEC_INSERT && !infer)
+		ereport(ERROR,
+				(errcode(ERRCODE_SYNTAX_ERROR),
+				 errmsg("ON CONFLICT with UPDATE must contain columns or expressions to infer a unique index from"),
+				 parser_errposition(pstate,
+									exprLocation((Node *) confClause))));
+
+	/* Raw grammar must ensure this invariant holds */
+	Assert(confClause->specclause != SPEC_INSERT ||
+		   confClause->updatequery != NULL);
+
+	/*
+	 * If there is no inference clause, this might be an updatable view, which
+	 * are supported by ON CONFLICT IGNORE (without columns/ expressions
+	 * specified to infer a unique index from -- this is mandatory for the
+	 * UPDATE variant).  It might also be a relation with inheritance children,
+	 * which would also make proceeding with inference fail.
+	 */
+	if (infer)
+	{
+		*arbiterExpr = resolve_unique_index_expr(pstate, infer,
+												 pstate->p_target_relation);
+
+		/* Handling inference WHERE clause (for partial unique index inference) */
+		if (infer->whereClause)
+			*arbiterWhere = transformExpr(pstate, infer->whereClause,
+										  EXPR_KIND_INDEX_PREDICATE);
+	}
+
+	/*
+	 * It's convenient to form a list of expressions based on the
+	 * representation used by CREATE INDEX, since the same restrictions are
+	 * appropriate (on subqueries and so on).  However, from here on, the
+	 * handling of those expressions is identical to ordinary optimizable
+	 * statements.  In particular, assign_query_collations() can be trusted to
+	 * do the right thing with the post parse analysis query tree inference
+	 * clause representation.
+	 */
+}
+
+/*
  * addTargetToSortList
  *		If the given targetlist entry isn't already in the SortGroupClause
  *		list, add it to the end of the list, using the given sort ordering
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index f0f0488..d1583c7 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -1497,7 +1497,8 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
 	/*
 	 * Check to see if the sublink is in an invalid place within the query. We
 	 * allow sublinks everywhere in SELECT/INSERT/UPDATE/DELETE, but generally
-	 * not in utility statements.
+	 * not in utility statements.  They're also disallowed within auxiliary ON
+	 * CONFLICT UPDATE commands, which we check for here.
 	 */
 	err = NULL;
 	switch (pstate->p_expr_kind)
@@ -1564,6 +1565,9 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
 			 * which is sane anyway.
 			 */
 	}
+
+	if (pstate->p_is_speculative && pstate->p_is_update)
+		 err = _("cannot use subquery in ON CONFLICT UPDATE");
 	if (err)
 		ereport(ERROR,
 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index 4130cbf..9a94fa4 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -84,7 +84,13 @@ free_parsestate(ParseState *pstate)
 				 errmsg("target lists can have at most %d entries",
 						MaxTupleAttributeNumber)));
 
-	if (pstate->p_target_relation != NULL)
+	/*
+	 * Don't close target relation for auxiliary ON CONFLICT UPDATE, since it
+	 * is managed by parent INSERT directly
+	 */
+	if (pstate->p_target_relation != NULL &&
+		(!pstate->p_is_speculative ||
+		 pstate->p_is_insert))
 		heap_close(pstate->p_target_relation, NoLock);
 
 	pfree(pstate);
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index fab2948..f37760b 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -43,6 +43,12 @@ typedef struct acquireLocksOnSubLinks_context
 	bool		for_execute;	/* AcquireRewriteLocks' forExecute param */
 } acquireLocksOnSubLinks_context;
 
+typedef struct excluded_replace_context
+{
+	int			varno;			/* varno of EXLCUDED.* Vars */
+	int			rvarno;			/* replace varno */
+} excluded_replace_context;
+
 static bool acquireLocksOnSubLinks(Node *node,
 					   acquireLocksOnSubLinks_context *context);
 static Query *rewriteRuleAction(Query *parsetree,
@@ -66,11 +72,15 @@ static void markQueryForLocking(Query *qry, Node *jtnode,
 					LockClauseStrength strength, LockWaitPolicy waitPolicy,
 					bool pushedDown);
 static List *matchLocks(CmdType event, RuleLock *rulelocks,
-		   int varno, Query *parsetree);
+		   int varno, Query *parsetree, bool *hasUpdate);
 static Query *fireRIRrules(Query *parsetree, List *activeRIRs,
 			 bool forUpdatePushedDown);
 static bool view_has_instead_trigger(Relation view, CmdType event);
 static Bitmapset *adjust_view_column_set(Bitmapset *cols, List *targetlist);
+static Node *excluded_replace_vars(Node *expr,
+			 excluded_replace_context *context);
+static Node *excluded_replace_vars_callback(Var *var,
+		   replace_rte_variables_context *context);
 
 
 /*
@@ -1288,7 +1298,8 @@ static List *
 matchLocks(CmdType event,
 		   RuleLock *rulelocks,
 		   int varno,
-		   Query *parsetree)
+		   Query *parsetree,
+		   bool *hasUpdate)
 {
 	List	   *matching_locks = NIL;
 	int			nlocks;
@@ -1309,6 +1320,9 @@ matchLocks(CmdType event,
 	{
 		RewriteRule *oneLock = rulelocks->rules[i];
 
+		if (oneLock->event == CMD_UPDATE)
+			*hasUpdate = true;
+
 		/*
 		 * Suppress ON INSERT/UPDATE/DELETE rules that are disabled or
 		 * configured to not fire during the current sessions replication
@@ -2961,6 +2975,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
 	CmdType		event = parsetree->commandType;
 	bool		instead = false;
 	bool		returning = false;
+	bool		updatableview = false;
 	Query	   *qual_product = NULL;
 	List	   *rewritten = NIL;
 	ListCell   *lc1;
@@ -3043,6 +3058,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
 		Relation	rt_entry_relation;
 		List	   *locks;
 		List	   *product_queries;
+		bool		hasUpdate = false;
 
 		result_relation = parsetree->resultRelation;
 		Assert(result_relation != 0);
@@ -3094,6 +3110,49 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
 				/* Process just the main targetlist */
 				rewriteTargetListIU(parsetree, rt_entry_relation, NULL);
 			}
+
+			if (parsetree->specClause == SPEC_INSERT)
+			{
+				Query						   *qry;
+				excluded_replace_context		context;
+
+				/*
+				 * While user-defined rules will never be applied in the
+				 * auxiliary update query, normalization of tlist is still
+				 * required
+				 */
+				qry = (Query *) parsetree->onConflict;
+				rewriteTargetListIU(qry, rt_entry_relation, NULL);
+
+				/*
+				 * Replace OLD Vars (associated with the EXCLUDED.* alias) with
+				 * first (and only) "real" relation RTE in rtable.  This allows
+				 * the implementation to treat EXCLUDED.* as an alias for the
+				 * target relation, which is useful during parse analysis,
+				 * while ultimately having those references rewritten as
+				 * special ExcludedExpr references to the corresponding Var in
+				 * the target RTE.
+				 *
+				 * This is necessary because while we want a join-like syntax
+				 * for aesthetic reasons, the resemblance is superficial.  In
+				 * fact, execution of the ModifyTable node (and its direct
+				 * child auxiliary query) manages tupleslot state directly, and
+				 * is directly tasked with making available the appropriate
+				 * tupleslot to the expression context.
+				 *
+				 * This is a kludge, but appears necessary, since the slot made
+				 * available for referencing via ExcludedExpr is in fact the
+				 * slot just excluded from insertion by speculative insertion
+				 * (with the effects of BEFORE ROW INSERT triggers carried).
+				 * An ad-hoc method for making the excluded tuple available
+				 * within the auxiliary expression context is appropriate.
+				 */
+				context.varno = PRS2_OLD_VARNO;
+				context.rvarno = PRS2_OLD_VARNO + 1;
+
+				parsetree->onConflict =
+					excluded_replace_vars(parsetree->onConflict, &context);
+			}
 		}
 		else if (event == CMD_UPDATE)
 		{
@@ -3111,7 +3170,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
 		 * Collect and apply the appropriate rules.
 		 */
 		locks = matchLocks(event, rt_entry_relation->rd_rules,
-						   result_relation, parsetree);
+						   result_relation, parsetree, &hasUpdate);
 
 		product_queries = fireRules(parsetree,
 									result_relation,
@@ -3160,6 +3219,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
 			 */
 			instead = true;
 			returning = true;
+			updatableview = true;
 		}
 
 		/*
@@ -3240,6 +3300,18 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
 			}
 		}
 
+		/*
+		 * Updatable views are supported on a limited basis by ON CONFLICT
+		 * IGNORE (if there is no unique index inference required, speculative
+		 * insertion proceeds).
+		 */
+		if (parsetree->specClause != SPEC_NONE &&
+			(product_queries != NIL || hasUpdate) &&
+			!updatableview)
+				ereport(ERROR,
+						(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+						 errmsg("INSERT with ON CONFLICT clause may not target relation with INSERT or UPDATE rules")));
+
 		heap_close(rt_entry_relation, NoLock);
 	}
 
@@ -3402,3 +3474,52 @@ QueryRewrite(Query *parsetree)
 
 	return results;
 }
+
+/*
+ * Apply pullup variable replacement throughout an expression tree
+ *
+ * Returns modified tree, with user-specified rvarno replaced with varno.
+ */
+static Node *
+excluded_replace_vars(Node *expr, excluded_replace_context *context)
+{
+	/*
+	 * Don't recurse into subqueries;  they're forbidden in auxiliary ON
+	 * CONFLICT query
+	 */
+	return replace_rte_variables(expr,
+								 context->varno, 0,
+								 excluded_replace_vars_callback,
+								 (void *) context,
+								 NULL);
+}
+
+static Node *
+excluded_replace_vars_callback(Var *var,
+							   replace_rte_variables_context *context)
+{
+	excluded_replace_context *rcon = (excluded_replace_context *) context->callback_arg;
+	ExcludedExpr *n = makeNode(ExcludedExpr);
+
+	/* Replace with an enclosing ExcludedExpr */
+	var->varno = rcon->rvarno;
+	n->arg = (Node *) var;
+
+	/*
+	 * Would have to adjust varlevelsup if referenced item is from higher query
+	 * (should not happen)
+	 */
+	Assert(var->varlevelsup == 0);
+
+	if (var->varattno < 0)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+				 errmsg("cannot reference system column using EXCLUDED.* alias")));
+
+	if (var->varattno == 0)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+				 errmsg("cannot reference whole-row using EXCLUDED.* alias")));
+
+	return (Node*) n;
+}
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index a1ebc72..a1c5bcb 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -421,6 +421,13 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
 								  latestXid))
 			ShmemVariableCache->latestCompletedXid = latestXid;
 
+		/* Also clear any speculative insertion information */
+		MyProc->specInsertRel.spcNode = InvalidOid;
+		MyProc->specInsertRel.dbNode = InvalidOid;
+		MyProc->specInsertRel.relNode = InvalidOid;
+		ItemPointerSetInvalid(&MyProc->specInsertTid);
+		MyProc->specInsertToken = 0;
+
 		LWLockRelease(ProcArrayLock);
 	}
 	else
@@ -438,6 +445,11 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
 		pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
 		pgxact->delayChkpt = false;		/* be sure this is cleared in abort */
 		proc->recoveryConflictPending = false;
+		MyProc->specInsertRel.spcNode = InvalidOid;
+		MyProc->specInsertRel.dbNode = InvalidOid;
+		MyProc->specInsertRel.relNode = InvalidOid;
+		ItemPointerSetInvalid(&MyProc->specInsertTid);
+		MyProc->specInsertToken = 0;
 
 		Assert(pgxact->nxids == 0);
 		Assert(pgxact->overflowed == false);
@@ -476,6 +488,13 @@ ProcArrayClearTransaction(PGPROC *proc)
 	/* Clear the subtransaction-XID cache too */
 	pgxact->nxids = 0;
 	pgxact->overflowed = false;
+
+	/* these should be clear, but just in case.. */
+	MyProc->specInsertRel.spcNode = InvalidOid;
+	MyProc->specInsertRel.dbNode = InvalidOid;
+	MyProc->specInsertRel.relNode = InvalidOid;
+	ItemPointerSetInvalid(&MyProc->specInsertTid);
+	MyProc->specInsertToken = 0;
 }
 
 /*
@@ -1110,6 +1129,96 @@ TransactionIdIsActive(TransactionId xid)
 
 
 /*
+ * SetSpeculativeInsertionToken -- Set speculative token
+ *
+ * The backend local counter value is set, to allow waiters to differentiate
+ * individual speculative insertions.
+ */
+void
+SetSpeculativeInsertionToken(uint32 token)
+{
+	MyProc->specInsertToken = token;
+}
+
+/*
+ * SetSpeculativeInsertionTid -- Set TID for speculative relfilenode
+ */
+void
+SetSpeculativeInsertionTid(RelFileNode relnode, ItemPointer tid)
+{
+	LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+	MyProc->specInsertRel = relnode;
+	ItemPointerCopy(tid, &MyProc->specInsertTid);
+	LWLockRelease(ProcArrayLock);
+}
+
+/*
+ * ClearSpeculativeInsertionState -- Clear token and TID for ourselves
+ */
+void
+ClearSpeculativeInsertionState(void)
+{
+	LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+	MyProc->specInsertRel.spcNode = InvalidOid;
+	MyProc->specInsertRel.dbNode = InvalidOid;
+	MyProc->specInsertRel.relNode = InvalidOid;
+	ItemPointerSetInvalid(&MyProc->specInsertTid);
+	MyProc->specInsertToken = 0;
+	LWLockRelease(ProcArrayLock);
+}
+
+/*
+ * Returns a speculative insertion token for waiting for the insertion to
+ * finish
+ */
+uint32
+SpeculativeInsertionIsInProgress(TransactionId xid, RelFileNode rel,
+								 ItemPointer tid)
+{
+	ProcArrayStruct	   *arrayP = procArray;
+	int					index;
+	uint32				result = 0;
+
+	if (TransactionIdPrecedes(xid, RecentXmin))
+		return result;
+
+	/*
+	 * Get the top transaction id.
+	 *
+	 * XXX We could search the proc array first, like
+	 * TransactionIdIsInProgress() does, but this isn't performance-critical.
+	 */
+	xid = SubTransGetTopmostTransaction(xid);
+
+	LWLockAcquire(ProcArrayLock, LW_SHARED);
+
+	for (index = 0; index < arrayP->numProcs; index++)
+	{
+		int			pgprocno = arrayP->pgprocnos[index];
+		PGPROC	   *proc = &allProcs[pgprocno];
+		volatile PGXACT *pgxact = &allPgXact[pgprocno];
+
+		if (pgxact->xid == xid)
+		{
+			/*
+			 * Found the backend.  Is it doing a speculative insertion of the
+			 * given tuple?
+			 */
+			if (RelFileNodeEquals(proc->specInsertRel, rel) &&
+				ItemPointerEquals(tid, &proc->specInsertTid))
+				result = proc->specInsertToken;
+
+			break;
+		}
+	}
+
+	LWLockRelease(ProcArrayLock);
+
+	return result;
+}
+
+
+/*
  * GetOldestXmin -- returns oldest transaction that was running
  *					when any current transaction was started.
  *
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index d13a167..7a1df22 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -575,6 +575,69 @@ ConditionalXactLockTableWait(TransactionId xid)
 	return true;
 }
 
+static uint32 speculativeInsertionToken = 0;
+
+/*
+ *		SpeculativeInsertionLockAcquire
+ *
+ * Insert a lock showing that the given transaction ID is inserting a tuple,
+ * but hasn't yet decided whether it's going to keep it. The lock can then be
+ * used to wait for the decision to go ahead with the insertion, or aborting
+ * it.
+ *
+ * The token is used to distinguish multiple insertions by the same
+ * transaction. A counter will do, for example.
+ */
+void
+SpeculativeInsertionLockAcquire(TransactionId xid)
+{
+	LOCKTAG		tag;
+
+	speculativeInsertionToken++;
+	SetSpeculativeInsertionToken(speculativeInsertionToken);
+
+	SET_LOCKTAG_SPECULATIVE_INSERTION(tag, xid, speculativeInsertionToken);
+
+	(void) LockAcquire(&tag, ExclusiveLock, false, false);
+}
+
+/*
+ *		SpeculativeInsertionLockRelease
+ *
+ * Delete the lock showing that the given transaction is speculatively
+ * inserting a tuple.
+ */
+void
+SpeculativeInsertionLockRelease(TransactionId xid)
+{
+	LOCKTAG		tag;
+
+	SET_LOCKTAG_SPECULATIVE_INSERTION(tag, xid, speculativeInsertionToken);
+
+	LockRelease(&tag, ExclusiveLock, false);
+}
+
+/*
+ *		SpeculativeInsertionWait
+ *
+ * Wait for the specified transaction to finish or abort the insertion of a
+ * tuple.
+ */
+void
+SpeculativeInsertionWait(TransactionId xid, uint32 token)
+{
+	LOCKTAG		tag;
+
+	SET_LOCKTAG_SPECULATIVE_INSERTION(tag, xid, token);
+
+	Assert(TransactionIdIsValid(xid));
+	Assert(token != 0);
+
+	(void) LockAcquire(&tag, ShareLock, false, false);
+	LockRelease(&tag, ShareLock, false);
+}
+
+
 /*
  * XactLockTableWaitErrorContextCb
  *		Error context callback for transaction lock waits.
@@ -873,6 +936,11 @@ DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
 							 tag->locktag_field1,
 							 tag->locktag_field2);
 			break;
+		case LOCKTAG_PROMISE_TUPLE_INSERTION:
+			appendStringInfo(buf,
+							 _("tuple insertion by transaction %u"),
+							 tag->locktag_field1);
+			break;
 		case LOCKTAG_OBJECT:
 			appendStringInfo(buf,
 							 _("object %u of class %u of database %u"),
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 9c14e8a..41c4191 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -189,7 +189,8 @@ ProcessQuery(PlannedStmt *plan,
 	 */
 	if (completionTag)
 	{
-		Oid			lastOid;
+		Oid					lastOid;
+		ModifyTableState   *pstate;
 
 		switch (queryDesc->operation)
 		{
@@ -198,12 +199,16 @@ ProcessQuery(PlannedStmt *plan,
 						 "SELECT %u", queryDesc->estate->es_processed);
 				break;
 			case CMD_INSERT:
+				pstate = (((ModifyTableState *) queryDesc->planstate));
+				Assert(IsA(pstate, ModifyTableState));
+
 				if (queryDesc->estate->es_processed == 1)
 					lastOid = queryDesc->estate->es_lastoid;
 				else
 					lastOid = InvalidOid;
 				snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
-				   "INSERT %u %u", lastOid, queryDesc->estate->es_processed);
+				   "%s %u %u", pstate->spec == SPEC_INSERT? "UPSERT":"INSERT",
+				   lastOid, queryDesc->estate->es_processed);
 				break;
 			case CMD_UPDATE:
 				snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
@@ -1356,7 +1361,10 @@ PortalRunMulti(Portal portal, bool isTopLevel,
 	 * 0" here because technically there is no query of the matching tag type,
 	 * and printing a non-zero count for a different query type seems wrong,
 	 * e.g.  an INSERT that does an UPDATE instead should not print "0 1" if
-	 * one row was updated.  See QueryRewrite(), step 3, for details.
+	 * one row was updated (unless the ON CONFLICT UPDATE, or "UPSERT" variant
+	 * of INSERT was used to update the row, where it's logically a direct
+	 * effect of the top level command).  See QueryRewrite(), step 3, for
+	 * details.
 	 */
 	if (completionTag && completionTag[0] == '\0')
 	{
@@ -1366,6 +1374,8 @@ PortalRunMulti(Portal portal, bool isTopLevel,
 			sprintf(completionTag, "SELECT 0 0");
 		else if (strcmp(completionTag, "INSERT") == 0)
 			strcpy(completionTag, "INSERT 0 0");
+		else if (strcmp(completionTag, "UPSERT") == 0)
+			strcpy(completionTag, "UPSERT 0 0");
 		else if (strcmp(completionTag, "UPDATE") == 0)
 			strcpy(completionTag, "UPDATE 0");
 		else if (strcmp(completionTag, "DELETE") == 0)
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index a1967b69..95d62cb 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -28,6 +28,7 @@ static const char *const LockTagTypeNames[] = {
 	"tuple",
 	"transactionid",
 	"virtualxid",
+	"inserter transactionid",
 	"object",
 	"userlock",
 	"advisory"
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index c1d860c..04235e2 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -5645,6 +5645,24 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
 
 		return NULL;
 	}
+	else if (var->varno == INNER_VAR)
+	{
+		/* Assume an EXCLUDED variable */
+		rte = rt_fetch(PRS2_OLD_VARNO, dpns->rtable);
+
+		/*
+		 * Sanity check:  EXCLUDED.* Vars should only appear in auxiliary ON
+		 * CONFLICT UPDATE queries.  Assert that rte and planstate are
+		 * consistent with that.
+		 */
+		Assert(rte->rtekind == RTE_RELATION);
+		Assert(IsA(dpns->planstate, SeqScanState) ||
+			   IsA(dpns->planstate, ResultState));
+
+		refname = "excluded";
+		colinfo = deparse_columns_fetch(PRS2_OLD_VARNO, dpns);
+		attnum = var->varattno;
+	}
 	else
 	{
 		elog(ERROR, "bogus varno: %d", var->varno);
@@ -6385,6 +6403,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
 		case T_CoerceToDomainValue:
 		case T_SetToDefault:
 		case T_CurrentOfExpr:
+		case T_ExcludedExpr:
 			/* single words: always simple */
 			return true;
 
@@ -7610,6 +7629,26 @@ get_rule_expr(Node *node, deparse_context *context,
 			}
 			break;
 
+		case T_ExcludedExpr:
+			{
+				ExcludedExpr	   *excludedexpr = (ExcludedExpr *) node;
+				Var				   *variable = (Var *) excludedexpr->arg;
+				bool				save_varprefix;
+
+				/*
+				 * Force parentheses because our caller probably assumed our
+				 * Var is a simple expression.
+				 */
+				appendStringInfoChar(buf, '(');
+				save_varprefix = context->varprefix;
+				/* Ensure EXCLUDED.* prefix is always visible */
+				context->varprefix = true;
+				get_rule_expr((Node *) variable, context, true);
+				context->varprefix = save_varprefix;
+				appendStringInfoChar(buf, ')');
+			}
+			break;
+
 		case T_List:
 			{
 				char	   *sep;
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 777f55c..99bb417 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -170,6 +170,13 @@ HeapTupleSatisfiesSelf(HeapTuple htup, Snapshot snapshot, Buffer buffer)
 	Assert(ItemPointerIsValid(&htup->t_self));
 	Assert(htup->t_tableOid != InvalidOid);
 
+	/*
+	 * Never return "super-deleted" tuples
+	 */
+	if (TransactionIdEquals(HeapTupleHeaderGetRawXmin(tuple),
+							InvalidTransactionId))
+		return false;
+
 	if (!HeapTupleHeaderXminCommitted(tuple))
 	{
 		if (HeapTupleHeaderXminInvalid(tuple))
@@ -726,6 +733,17 @@ HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
 	Assert(htup->t_tableOid != InvalidOid);
 
 	snapshot->xmin = snapshot->xmax = InvalidTransactionId;
+	snapshot->speculativeToken = 0;
+
+	/*
+	 * Never return "super-deleted" tuples
+	 *
+	 * XXX:  Comment this code out and you'll get conflicts within
+	 * ExecLockUpdateTuple(), which result in an infinite loop.
+	 */
+	if (TransactionIdEquals(HeapTupleHeaderGetRawXmin(tuple),
+							InvalidTransactionId))
+		return false;
 
 	if (!HeapTupleHeaderXminCommitted(tuple))
 	{
@@ -807,6 +825,26 @@ HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
 		}
 		else if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmin(tuple)))
 		{
+			RelFileNode		rnode;
+			ForkNumber		forkno;
+			BlockNumber		blockno;
+
+			BufferGetTag(buffer, &rnode, &forkno, &blockno);
+
+			/* tuples can only be in the main fork */
+			Assert(forkno == MAIN_FORKNUM);
+			Assert(blockno == ItemPointerGetBlockNumber(&htup->t_self));
+
+			/*
+			 * Set speculative token.  Caller can worry about xmax, since it
+			 * requires a conclusively locked row version, and a concurrent
+			 * update to this tuple is a conflict of its purposes.
+			 */
+			snapshot->speculativeToken =
+				SpeculativeInsertionIsInProgress(HeapTupleHeaderGetRawXmin(tuple),
+												 rnode,
+												 &htup->t_self);
+
 			snapshot->xmin = HeapTupleHeaderGetRawXmin(tuple);
 			/* XXX shouldn't we fall through to look at xmax? */
 			return true;		/* in insertion by other */
@@ -922,6 +960,13 @@ HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot,
 	Assert(ItemPointerIsValid(&htup->t_self));
 	Assert(htup->t_tableOid != InvalidOid);
 
+	/*
+	 * Never return "super-deleted" tuples
+	 */
+	if (TransactionIdEquals(HeapTupleHeaderGetRawXmin(tuple),
+							InvalidTransactionId))
+		return false;
+
 	if (!HeapTupleHeaderXminCommitted(tuple))
 	{
 		if (HeapTupleHeaderXminInvalid(tuple))
@@ -1126,6 +1171,13 @@ HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
 	Assert(htup->t_tableOid != InvalidOid);
 
 	/*
+	 * Immediately VACUUM "super-deleted" tuples
+	 */
+	if (TransactionIdEquals(HeapTupleHeaderGetRawXmin(tuple),
+							InvalidTransactionId))
+		return HEAPTUPLE_DEAD;
+
+	/*
 	 * Has inserting transaction committed?
 	 *
 	 * If the inserting transaction aborted, then the tuple was never visible
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 275bdcc..9302e41 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -894,9 +894,12 @@ PrintQueryResults(PGresult *results)
 				success = StoreQueryTuple(results);
 			else
 				success = PrintQueryTuples(results);
-			/* if it's INSERT/UPDATE/DELETE RETURNING, also print status */
+			/*
+			 * if it's INSERT/UPSERT/UPDATE/DELETE RETURNING, also print status
+			 */
 			cmdstatus = PQcmdStatus(results);
 			if (strncmp(cmdstatus, "INSERT", 6) == 0 ||
+				strncmp(cmdstatus, "UPSERT", 6) == 0 ||
 				strncmp(cmdstatus, "UPDATE", 6) == 0 ||
 				strncmp(cmdstatus, "DELETE", 6) == 0)
 				PrintQueryStatus(results);
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 939d93d..62e760a 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -28,6 +28,7 @@
 #define HEAP_INSERT_SKIP_WAL	0x0001
 #define HEAP_INSERT_SKIP_FSM	0x0002
 #define HEAP_INSERT_FROZEN		0x0004
+#define HEAP_INSERT_SPECULATIVE 0x0008
 
 typedef struct BulkInsertStateData *BulkInsertState;
 
@@ -141,7 +142,7 @@ extern void heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
 				  CommandId cid, int options, BulkInsertState bistate);
 extern HTSU_Result heap_delete(Relation relation, ItemPointer tid,
 			CommandId cid, Snapshot crosscheck, bool wait,
-			HeapUpdateFailureData *hufd);
+			HeapUpdateFailureData *hufd, bool killspeculative);
 extern HTSU_Result heap_update(Relation relation, ItemPointer otid,
 			HeapTuple newtup,
 			CommandId cid, Snapshot crosscheck, bool wait,
diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h
index a2ed2a0..870985d 100644
--- a/src/include/access/heapam_xlog.h
+++ b/src/include/access/heapam_xlog.h
@@ -73,6 +73,8 @@
 #define XLOG_HEAP_SUFFIX_FROM_OLD			(1<<6)
 /* last xl_heap_multi_insert record for one heap_multi_insert() call */
 #define XLOG_HEAP_LAST_MULTI_INSERT			(1<<7)
+/* reuse xl_heap_multi_insert-only bit for xl_heap_delete */
+#define XLOG_HEAP_KILLED_SPECULATIVE_TUPLE	XLOG_HEAP_LAST_MULTI_INSERT
 
 /* convenience macro for checking whether any form of old tuple was logged */
 #define XLOG_HEAP_CONTAINS_OLD						\
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index e7cc7a0..42c10d4 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -80,6 +80,8 @@ extern void index_drop(Oid indexId, bool concurrent);
 
 extern IndexInfo *BuildIndexInfo(Relation index);
 
+extern void AddUniqueSpeculative(Relation index, IndexInfo *ii);
+
 extern void FormIndexDatum(IndexInfo *indexInfo,
 			   TupleTableSlot *slot,
 			   EState *estate,
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 40fde83..accdc83 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -352,16 +352,21 @@ extern bool ExecRelationIsTargetRelation(EState *estate, Index scanrelid);
 extern Relation ExecOpenScanRelation(EState *estate, Index scanrelid, int eflags);
 extern void ExecCloseScanRelation(Relation scanrel);
 
-extern void ExecOpenIndices(ResultRelInfo *resultRelInfo);
+extern void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative);
 extern void ExecCloseIndices(ResultRelInfo *resultRelInfo);
+extern List *ExecLockIndexValues(TupleTableSlot *slot, EState *estate,
+						   SpecCmd specReason);
 extern List *ExecInsertIndexTuples(TupleTableSlot *slot, ItemPointer tupleid,
-					  EState *estate);
-extern bool check_exclusion_constraint(Relation heap, Relation index,
-						   IndexInfo *indexInfo,
-						   ItemPointer tupleid,
-						   Datum *values, bool *isnull,
-						   EState *estate,
-						   bool newIndex, bool errorOK);
+					  EState *estate, bool noDupErr, Oid arbiterIdx);
+extern bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate,
+					  ItemPointer conflictTid, Oid arbiterIdx);
+extern bool check_exclusion_or_unique_constraint(Relation heap, Relation index,
+									   IndexInfo *indexInfo,
+									   ItemPointer tupleid,
+									   Datum *values, bool *isnull,
+									   EState *estate,
+									   bool newIndex, bool errorOK,
+									   bool wait, ItemPointer conflictTid);
 
 extern void RegisterExprContextCallback(ExprContext *econtext,
 							ExprContextCallbackFunction function,
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 41288ed..2e4e168 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -41,6 +41,9 @@
  *		ExclusionOps		Per-column exclusion operators, or NULL if none
  *		ExclusionProcs		Underlying function OIDs for ExclusionOps
  *		ExclusionStrats		Opclass strategy numbers for ExclusionOps
+ *		UniqueOps			Theses are like Exclusion*, but for unique indexes
+ *		UniqueProcs
+ *		UniqueStrats
  *		Unique				is it a unique index?
  *		ReadyForInserts		is it valid for inserts?
  *		Concurrent			are we doing a concurrent index build?
@@ -62,6 +65,9 @@ typedef struct IndexInfo
 	Oid		   *ii_ExclusionOps;	/* array with one entry per column */
 	Oid		   *ii_ExclusionProcs;		/* array with one entry per column */
 	uint16	   *ii_ExclusionStrats;		/* array with one entry per column */
+	Oid		   *ii_UniqueOps;	/* array with one entry per column */
+	Oid		   *ii_UniqueProcs;		/* array with one entry per column */
+	uint16	   *ii_UniqueStrats;		/* array with one entry per column */
 	bool		ii_Unique;
 	bool		ii_ReadyForInserts;
 	bool		ii_Concurrent;
@@ -967,6 +973,16 @@ typedef struct DomainConstraintState
 	ExprState  *check_expr;		/* for CHECK, a boolean expression */
 } DomainConstraintState;
 
+/* ----------------
+ *		ExcludedExprState node
+ * ----------------
+ */
+typedef struct ExcludedExprState
+{
+	ExprState	xprstate;
+	ExprState  *arg;			/* the argument */
+} ExcludedExprState;
+
 
 /* ----------------------------------------------------------------
  *				 Executor State Trees
@@ -1088,6 +1104,9 @@ typedef struct ModifyTableState
 	int			mt_whichplan;	/* which one is being executed (0..n-1) */
 	ResultRelInfo *resultRelInfo;		/* per-subplan target relations */
 	List	  **mt_arowmarks;	/* per-subplan ExecAuxRowMark lists */
+	SpecCmd		spec;			/* reason for speculative insertion */
+	Oid			arbiterIndex;	/* unique index to arbitrate taking alt path */
+	PlanState  *onConflict; /* associated OnConflict state */
 	EPQState	mt_epqstate;	/* for evaluating EvalPlanQual rechecks */
 	bool		fireBSTriggers; /* do we need to fire stmt triggers? */
 } ModifyTableState;
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 97ef0fc..8d6fba4 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -168,6 +168,7 @@ typedef enum NodeTag
 	T_CoerceToDomainValue,
 	T_SetToDefault,
 	T_CurrentOfExpr,
+	T_ExcludedExpr,
 	T_TargetEntry,
 	T_RangeTblRef,
 	T_JoinExpr,
@@ -207,6 +208,7 @@ typedef enum NodeTag
 	T_NullTestState,
 	T_CoerceToDomainState,
 	T_DomainConstraintState,
+	T_ExcludedExprState,
 
 	/*
 	 * TAGS FOR PLANNER NODES (relation.h)
@@ -412,6 +414,8 @@ typedef enum NodeTag
 	T_RowMarkClause,
 	T_XmlSerialize,
 	T_WithClause,
+	T_InferClause,
+	T_ConflictClause,
 	T_CommonTableExpr,
 
 	/*
@@ -624,4 +628,18 @@ typedef enum JoinType
 	   (1 << JOIN_RIGHT) | \
 	   (1 << JOIN_ANTI))) != 0)
 
+/*
+ * SpecCmd -
+ *	  "Speculative insertion" clause
+ *
+ * This is needed in both parsenodes.h and plannodes.h, so put it here...
+ */
+typedef enum
+{
+	SPEC_NONE,		/* Not involved in speculative insertion */
+	SPEC_IGNORE,	/* INSERT of "ON CONFLICT IGNORE" */
+	SPEC_INSERT,	/* INSERT of "ON CONFLICT UPDATE" */
+	SPEC_UPDATE		/* UPDATE of "ON CONFLICT UPDATE" */
+} SpecCmd;
+
 #endif   /* NODES_H */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 86d1c07..c03c9ca 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -132,6 +132,11 @@ typedef struct Query
 
 	List	   *withCheckOptions;		/* a list of WithCheckOption's */
 
+	SpecCmd		specClause;		/* speculative insertion clause */
+	List	   *arbiterExpr;	/* Unique index arbiter exprs */
+	Node	   *arbiterWhere;	/* Unique index arbiter WHERE clause */
+	Node	   *onConflict;		/* ON CONFLICT Query */
+
 	List	   *returningList;	/* return-values list (of TargetEntry) */
 
 	List	   *groupClause;	/* a list of SortGroupClause's */
@@ -564,7 +569,7 @@ typedef enum TableLikeOption
 } TableLikeOption;
 
 /*
- * IndexElem - index parameters (used in CREATE INDEX)
+ * IndexElem - index parameters (used in CREATE INDEX, and in ON CONFLICT)
  *
  * For a plain index attribute, 'name' is the name of the table column to
  * index, and 'expr' is NULL.  For an index expression, 'name' is NULL and
@@ -999,6 +1004,36 @@ typedef struct WithClause
 } WithClause;
 
 /*
+ * InferClause -
+ * 		ON CONFLICT unique index inference clause
+ *
+ * Note: InferClause does not propagate into the Query representation.
+ */
+typedef struct InferClause
+{
+	NodeTag		type;
+	List	   *indexElems;		/* IndexElems to infer unique index */
+	Node	   *whereClause;	/* qualification (partial-index predicate) */
+	int			location;		/* token location, or -1 if unknown */
+} InferClause;
+
+/*
+ * ConflictClause -
+ * 		representation of ON CONFLICT clause
+ *
+ * Note: ConflictClause does not propagate into the Query representation.
+ * However, Query may contain onConflict child Query.
+ */
+typedef struct ConflictClause
+{
+	NodeTag			type;
+	SpecCmd			specclause;		/* Variant specified */
+	InferClause	   *infer;			/* Optional index inference clause */
+	Node		   *updatequery;	/* Update parse stmt */
+	int				location;		/* token location, or -1 if unknown */
+} ConflictClause;
+
+/*
  * CommonTableExpr -
  *	   representation of WITH list element
  *
@@ -1048,6 +1083,7 @@ typedef struct InsertStmt
 	RangeVar   *relation;		/* relation to insert into */
 	List	   *cols;			/* optional: names of the target columns */
 	Node	   *selectStmt;		/* the source SELECT/VALUES, or NULL */
+	ConflictClause  *confClause;	/* ON CONFLICT clause */
 	List	   *returningList;	/* list of expressions to return */
 	WithClause *withClause;		/* WITH clause */
 } InsertStmt;
@@ -1073,7 +1109,7 @@ typedef struct DeleteStmt
 typedef struct UpdateStmt
 {
 	NodeTag		type;
-	RangeVar   *relation;		/* relation to update */
+	RangeVar   *relation;		/* relation to update (NULL for speculative) */
 	List	   *targetList;		/* the target list (of ResTarget) */
 	Node	   *whereClause;	/* qualifications */
 	List	   *fromClause;		/* optional from clause for more tables */
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 316c9ce..7e05cb7 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -177,6 +177,9 @@ typedef struct ModifyTable
 	List	   *resultRelations;	/* integer list of RT indexes */
 	int			resultRelIndex; /* index of first resultRel in plan's list */
 	List	   *plans;			/* plan(s) producing source data */
+	SpecCmd		spec;			/* speculative insertion specification */
+	Oid			arbiterIndex;	/* Oid of ON CONFLICT arbiter index */
+	Plan	   *onConflictPlan;	/* Plan for ON CONFLICT UPDATE auxiliary query */
 	List	   *withCheckOptionLists;	/* per-target-table WCO lists */
 	List	   *returningLists; /* per-target-table RETURNING tlists */
 	List	   *fdwPrivLists;	/* per-target-table FDW private data lists */
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 1d06f42..21c39dc 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -1147,6 +1147,53 @@ typedef struct CurrentOfExpr
 	int			cursor_param;	/* refcursor parameter number, or 0 */
 } CurrentOfExpr;
 
+/*
+ * ExcludedExpr - an EXCLUDED.* expression
+ *
+ * During parse analysis of ON CONFLICT UPDATE auxiliary queries, a dummy
+ * EXCLUDED range table entry is generated, which is actually just an alias for
+ * the target relation.  This is useful during parse analysis, allowing the
+ * parser to produce simple error messages, for example.  There is the
+ * appearance of a join within the auxiliary ON CONFLICT UPDATE, superficially
+ * similar to a join in an UPDATE ... FROM;  this is a limited, ad-hoc join
+ * though, as the executor needs to tightly control the referenced tuple/slot
+ * through which update evaluation references excluded values originally
+ * proposed for insertion.  Note that EXCLUDED.* values carry forward the
+ * effects of BEFORE ROW INSERT triggers.
+ *
+ * To implement a limited "join" for ON CONFLICT UPDATE auxiliary queries,
+ * during the rewrite stage, Vars referencing the alias EXCLUDED.* RTE are
+ * swapped with ExcludedExprs, which also contain Vars;  their Vars are
+ * equivalent, but reference the target instead.  The ExcludedExpr Var actually
+ * evaluates against varno INNER_VAR during expression evaluation (and not a
+ * varno INDEX_VAR associated with an entry in the flattened range table
+ * representing the target, which is necessarily being scanned whenever an
+ * ExcludedExpr is evaluated) while still being logically associated with the
+ * target.  The Var is only rigged to reference the inner slot during
+ * ExcludedExpr initialization.  The executor closely controls the evaluation
+ * expression, installing the EXCLUDED slot actually excluded from insertion
+ * into the inner slot of the child/auxiliary evaluation context in an ad-hoc
+ * fashion, which, after ExcludedExpr initialization, is expected (i.e. it is
+ * expected during ExcludedExpr evaluation that the parent insert will make
+ * each excluded tuple available in the inner slot in turn).  ExcludedExpr are
+ * only ever evaluated during special speculative insertion related EPQ
+ * expression evaluation, purely for the benefit of auxiliary UPDATE
+ * expressions.
+ *
+ * Aside from representing a logical choke point for this special expression
+ * evaluation, having a dedicated primnode also prevents the optimizer from
+ * considering various optimization that might otherwise be attempted.
+ * Obviously there is no useful join optimization possible within the auxiliary
+ * query, and an ExcludedExpr based post-rewrite query tree representation is a
+ * convenient way of preventing that, as well as related inapplicable
+ * optimizations concerning the equivalence of Vars.
+ */
+typedef struct ExcludedExpr
+{
+	Expr		xpr;
+	Node	   *arg;			/* argument (Var) */
+} ExcludedExpr;
+
 /*--------------------
  * TargetEntry -
  *	   a target entry (used in query target lists)
diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h
index 6cad92e..801effe 100644
--- a/src/include/optimizer/paths.h
+++ b/src/include/optimizer/paths.h
@@ -64,6 +64,7 @@ extern Expr *adjust_rowcompare_for_index(RowCompareExpr *clause,
 							int indexcol,
 							List **indexcolnos,
 							bool *var_on_left_p);
+extern Oid plan_speculative_use_index(PlannerInfo *root, List *indexList);
 
 /*
  * tidpath.h
diff --git a/src/include/optimizer/plancat.h b/src/include/optimizer/plancat.h
index 8eb2e57..878adfe 100644
--- a/src/include/optimizer/plancat.h
+++ b/src/include/optimizer/plancat.h
@@ -28,6 +28,8 @@ extern PGDLLIMPORT get_relation_info_hook_type get_relation_info_hook;
 extern void get_relation_info(PlannerInfo *root, Oid relationObjectId,
 				  bool inhparent, RelOptInfo *rel);
 
+extern Oid infer_unique_index(PlannerInfo *root);
+
 extern void estimate_rel_size(Relation rel, int32 *attr_widths,
 				  BlockNumber *pages, double *tuples, double *allvisfrac);
 
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index 082f7d7..d8cde27 100644
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -84,7 +84,8 @@ extern ModifyTable *make_modifytable(PlannerInfo *root,
 				 CmdType operation, bool canSetTag,
 				 List *resultRelations, List *subplans,
 				 List *withCheckOptionLists, List *returningLists,
-				 List *rowMarks, int epqParam);
+				 List *rowMarks, Plan *onConflictPlan, SpecCmd spec,
+				 int epqParam);
 extern bool is_projection_capable_plan(Plan *plan);
 
 /*
diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h
index 7c243ec..cf501e6 100644
--- a/src/include/parser/kwlist.h
+++ b/src/include/parser/kwlist.h
@@ -87,6 +87,7 @@ PG_KEYWORD("commit", COMMIT, UNRESERVED_KEYWORD)
 PG_KEYWORD("committed", COMMITTED, UNRESERVED_KEYWORD)
 PG_KEYWORD("concurrently", CONCURRENTLY, TYPE_FUNC_NAME_KEYWORD)
 PG_KEYWORD("configuration", CONFIGURATION, UNRESERVED_KEYWORD)
+PG_KEYWORD("conflict", CONFLICT, UNRESERVED_KEYWORD)
 PG_KEYWORD("connection", CONNECTION, UNRESERVED_KEYWORD)
 PG_KEYWORD("constraint", CONSTRAINT, RESERVED_KEYWORD)
 PG_KEYWORD("constraints", CONSTRAINTS, UNRESERVED_KEYWORD)
@@ -180,6 +181,7 @@ PG_KEYWORD("hold", HOLD, UNRESERVED_KEYWORD)
 PG_KEYWORD("hour", HOUR_P, UNRESERVED_KEYWORD)
 PG_KEYWORD("identity", IDENTITY_P, UNRESERVED_KEYWORD)
 PG_KEYWORD("if", IF_P, UNRESERVED_KEYWORD)
+PG_KEYWORD("ignore", IGNORE_P, UNRESERVED_KEYWORD)
 PG_KEYWORD("ilike", ILIKE, TYPE_FUNC_NAME_KEYWORD)
 PG_KEYWORD("immediate", IMMEDIATE, UNRESERVED_KEYWORD)
 PG_KEYWORD("immutable", IMMUTABLE, UNRESERVED_KEYWORD)
diff --git a/src/include/parser/parse_clause.h b/src/include/parser/parse_clause.h
index 6a4438f..d1d0d12 100644
--- a/src/include/parser/parse_clause.h
+++ b/src/include/parser/parse_clause.h
@@ -41,6 +41,8 @@ extern List *transformDistinctClause(ParseState *pstate,
 						List **targetlist, List *sortClause, bool is_agg);
 extern List *transformDistinctOnClause(ParseState *pstate, List *distinctlist,
 						  List **targetlist, List *sortClause);
+extern void transformConflictClause(ParseState *pstate, ConflictClause *confClause,
+									List **arbiterExpr, Node **arbiterWhere);
 
 extern List *addTargetToSortList(ParseState *pstate, TargetEntry *tle,
 					List *sortlist, List *targetlist, SortBy *sortby,
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index 3103b71..2b5804e 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -153,6 +153,7 @@ struct ParseState
 	bool		p_hasModifyingCTE;
 	bool		p_is_insert;
 	bool		p_is_update;
+	bool		p_is_speculative;
 	bool		p_locked_from_parent;
 	Relation	p_target_relation;
 	RangeTblEntry *p_target_rangetblentry;
diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h
index f5d70e5..6bb95fc 100644
--- a/src/include/storage/lmgr.h
+++ b/src/include/storage/lmgr.h
@@ -76,6 +76,11 @@ extern bool ConditionalXactLockTableWait(TransactionId xid);
 extern void WaitForLockers(LOCKTAG heaplocktag, LOCKMODE lockmode);
 extern void WaitForLockersMultiple(List *locktags, LOCKMODE lockmode);
 
+/* Lock an XID for tuple insertion (used to wait for an insertion to finish) */
+extern void SpeculativeInsertionLockAcquire(TransactionId xid);
+extern void SpeculativeInsertionLockRelease(TransactionId xid);
+extern void SpeculativeInsertionWait(TransactionId xid, uint32 token);
+
 /* Lock a general object (other than a relation) of the current database */
 extern void LockDatabaseObject(Oid classid, Oid objid, uint16 objsubid,
 				   LOCKMODE lockmode);
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index 1100923..9c21810 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -176,6 +176,8 @@ typedef enum LockTagType
 	/* ID info for a transaction is its TransactionId */
 	LOCKTAG_VIRTUALTRANSACTION, /* virtual transaction (ditto) */
 	/* ID info for a virtual transaction is its VirtualTransactionId */
+	LOCKTAG_PROMISE_TUPLE_INSERTION, /* tuple insertion, keyed by Xid */
+	/* ID info for a transaction is its TransactionId */
 	LOCKTAG_OBJECT,				/* non-relation database object */
 	/* ID info for an object is DB OID + CLASS OID + OBJECT OID + SUBID */
 
@@ -261,6 +263,14 @@ typedef struct LOCKTAG
 	 (locktag).locktag_type = LOCKTAG_VIRTUALTRANSACTION, \
 	 (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
 
+#define SET_LOCKTAG_SPECULATIVE_INSERTION(locktag,xid,token) \
+	((locktag).locktag_field1 = (xid), \
+	 (locktag).locktag_field2 = (token),		\
+	 (locktag).locktag_field3 = 0, \
+	 (locktag).locktag_field4 = 0, \
+	 (locktag).locktag_type = LOCKTAG_PROMISE_TUPLE_INSERTION, \
+	 (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
+
 #define SET_LOCKTAG_OBJECT(locktag,dboid,classoid,objoid,objsubid) \
 	((locktag).locktag_field1 = (dboid), \
 	 (locktag).locktag_field2 = (classoid), \
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index e807a2e..cd15570 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -16,9 +16,11 @@
 
 #include "access/xlogdefs.h"
 #include "lib/ilist.h"
+#include "storage/itemptr.h"
 #include "storage/latch.h"
 #include "storage/lock.h"
 #include "storage/pg_sema.h"
+#include "storage/relfilenode.h"
 
 /*
  * Each backend advertises up to PGPROC_MAX_CACHED_SUBXIDS TransactionIds
@@ -132,6 +134,17 @@ struct PGPROC
 	 */
 	SHM_QUEUE	myProcLocks[NUM_LOCK_PARTITIONS];
 
+	/*
+	 * Info to allow us to perform speculative insertion without "unprincipled
+	 * deadlocks". This state allows others to wait on the outcome of an
+	 * optimistically inserted speculative tuple for only the duration of the
+	 * insertion (not to the end of our xact) iff the insertion does not work
+	 * out (due to our detecting a conflict).
+	 */
+	uint32		specInsertToken;
+	RelFileNode	specInsertRel;
+	ItemPointerData specInsertTid;
+
 	struct XidCache subxids;	/* cache for subtransaction XIDs */
 
 	/* Per-backend LWLock.  Protects fields below. */
diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h
index 97c6e93..ea2bba9 100644
--- a/src/include/storage/procarray.h
+++ b/src/include/storage/procarray.h
@@ -55,6 +55,13 @@ extern TransactionId GetOldestXmin(Relation rel, bool ignoreVacuum);
 extern TransactionId GetOldestActiveTransactionId(void);
 extern TransactionId GetOldestSafeDecodingTransactionId(void);
 
+extern void SetSpeculativeInsertionToken(uint32 token);
+extern void SetSpeculativeInsertionTid(RelFileNode relnode, ItemPointer tid);
+extern void ClearSpeculativeInsertionState(void);
+extern uint32 SpeculativeInsertionIsInProgress(TransactionId xid,
+											   RelFileNode rel,
+											   ItemPointer tid);
+
 extern VirtualTransactionId *GetVirtualXIDsDelayingChkpt(int *nvxids);
 extern bool HaveVirtualXIDsDelayingChkpt(VirtualTransactionId *vxids, int nvxids);
 
diff --git a/src/include/utils/snapshot.h b/src/include/utils/snapshot.h
index 26fb257..cd5ad76 100644
--- a/src/include/utils/snapshot.h
+++ b/src/include/utils/snapshot.h
@@ -87,6 +87,17 @@ typedef struct SnapshotData
 	bool		copied;			/* false if it's a static snapshot */
 
 	/*
+	 * Snapshot's speculative token is value set by HeapTupleSatisfiesDirty,
+	 * indicating that the tuple is being inserted speculatively, and may yet
+	 * be "super-deleted" before EOX. The caller may use the value with
+	 * PromiseTupleInsertionWait to wait for the inserter to decide. It is only
+	 * set when a valid 'xmin' is set, too.  By convention, when
+	 * speculativeToken is zero, the caller must assume that is should wait on
+	 * a non-speculative tuple (i.e. wait for xmin/xmax to commit).
+	 */
+	uint32		speculativeToken;
+
+	/*
 	 * note: all ids in subxip[] are >= xmin, but we don't bother filtering
 	 * out any that are >= xmax
 	 */
-- 
1.9.1

