diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index bf2c91f..da29dab 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -109,6 +109,7 @@ typedef struct JhashState
 	HTAB	   *hash;
 	char	   *saved_scalar;
 	char	   *save_json_start;
+	JsonTokenType saved_token_type;
 } JHashState;
 
 /* hashtable element */
@@ -116,26 +117,49 @@ typedef struct JsonHashEntry
 {
 	char		fname[NAMEDATALEN];		/* hash key (MUST BE FIRST) */
 	char	   *val;
-	char	   *json;
-	bool		isnull;
+	JsonTokenType type;
 } JsonHashEntry;
 
-/* these two are stolen from hstore / record_out, used in populate_record* */
-typedef struct ColumnIOData
+/* structure to cache type I/O metadata needed for populate_scalar() */
+typedef struct ScalarIOData
 {
-	Oid			column_type;
-	Oid			typiofunc;
 	Oid			typioparam;
-	FmgrInfo	proc;
-} ColumnIOData;
+	FmgrInfo	typiofunc;
+} ScalarIOData;
+
+typedef struct ColumnIOData ColumnIOData;
+typedef struct RecordIOData RecordIOData;
 
-typedef struct RecordIOData
+/* structure to cache metadata needed for populate_composite() */
+typedef struct CompositeIOData
+{
+	/*
+	 * We use pointer to a RecordIOData here because variable-length
+	 * struct RecordIOData can't be used directly in ColumnIOData.io union
+	 */
+	RecordIOData   *record_io;	/* metadata cache for populate_record() */
+	TupleDesc		tupdesc;	/* cached tuple descriptor */
+} CompositeIOData;
+
+/* these two are stolen from hstore / record_out, used in populate_record* */
+
+/* structure to cache record metadata needed for populate_record_field() */
+struct ColumnIOData
+{
+	Oid			typid;		/* column type id */
+	int32		typmod;		/* column type modifier */
+	ScalarIOData scalar_io;	/* metadata cache for directi conversion
+							 * through input function */
+};
+
+/* structure to cache record metadata needed for populate_record() */
+struct RecordIOData
 {
 	Oid			record_type;
 	int32		record_typmod;
 	int			ncolumns;
 	ColumnIOData columns[FLEXIBLE_ARRAY_MEMBER];
-} RecordIOData;
+};
 
 /* state for populate_recordset */
 typedef struct PopulateRecordsetState
@@ -145,10 +169,11 @@ typedef struct PopulateRecordsetState
 	HTAB	   *json_hash;
 	char	   *saved_scalar;
 	char	   *save_json_start;
+	JsonTokenType saved_token_type;
 	Tuplestorestate *tuple_store;
 	TupleDesc	ret_tdesc;
 	HeapTupleHeader rec;
-	RecordIOData *my_extra;
+	RecordIOData **my_extra;
 	MemoryContext fn_mcxt;		/* used to stash IO funcs */
 } PopulateRecordsetState;
 
@@ -160,6 +185,55 @@ typedef struct StripnullState
 	bool		skip_next_null;
 } StripnullState;
 
+/* structure for generalized json/jsonb value passing */
+typedef struct JsValue
+{
+	bool is_json;				/* json/jsonb */
+	union
+	{
+		struct
+		{
+			char   *str;		/* json string */
+			int		len;		/* json string length or -1 if null-terminated */
+			JsonTokenType type;	/* json type */
+		} json;					/* json value */
+
+		JsonbValue *jsonb;		/* jsonb value */
+	} val;
+} JsValue;
+
+typedef struct JsObject
+{
+	bool		is_json;		/* json/jsonb */
+	union
+	{
+		HTAB		   *json_hash;
+		JsonbContainer *jsonb_cont;
+	} val;
+} JsObject;
+
+#define JsValueIsNull(jsv) \
+	((jsv)->is_json ?  \
+		(!(jsv)->val.json.str || (jsv)->val.json.type == JSON_TOKEN_NULL) : \
+		(!(jsv)->val.jsonb || (jsv)->val.jsonb->type == jbvNull))
+
+#define JsValueIsString(jsv) \
+	((jsv)->is_json ? (jsv)->val.json.type == JSON_TOKEN_STRING \
+		: ((jsv)->val.jsonb && (jsv)->val.jsonb->type == jbvString))
+
+#define JsObjectSize(jso) \
+	((jso)->is_json \
+		? hash_get_num_entries((jso)->val.json_hash) \
+		: !(jso)->val.jsonb_cont || JsonContainerSize((jso)->val.jsonb_cont))
+
+#define JsObjectIsEmpty(jso) (JsObjectSize(jso) == 0)
+
+#define JsObjectFree(jso) do { \
+		if ((jso)->is_json) \
+			hash_destroy((jso)->val.json_hash); \
+	} while (0)
+
+
 /* semantic action functions for json_object_keys */
 static void okeys_object_field_start(void *state, char *fname, bool isnull);
 static void okeys_array_start(void *state);
@@ -211,7 +285,7 @@ static void elements_array_element_end(void *state, bool isnull);
 static void elements_scalar(void *state, char *token, JsonTokenType tokentype);
 
 /* turn a json object into a hash table */
-static HTAB *get_json_object_as_hash(text *json, const char *funcname);
+static HTAB *get_json_object_as_hash(char *json, int len, const char *funcname);
 
 /* common worker for populate_record and to_record */
 static Datum populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
@@ -241,10 +315,6 @@ static void sn_object_field_start(void *state, char *fname, bool isnull);
 static void sn_array_element_start(void *state, bool isnull);
 static void sn_scalar(void *state, char *token, JsonTokenType tokentype);
 
-/* Turn a jsonb object into a record */
-static void make_row_from_rec_and_jsonb(Jsonb *element,
-							PopulateRecordsetState *state);
-
 /* worker function for populate_recordset and to_recordset */
 static Datum populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname,
 						  bool have_record_arg);
@@ -271,6 +341,16 @@ static void setPathArray(JsonbIterator **it, Datum *path_elems,
 			 int level, Jsonb *newval, uint32 nelems, int op_type);
 static void addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb);
 
+/* helper functions for populate_record[set] */
+static HeapTupleHeader
+populate_record(TupleDesc tupdesc, RecordIOData  **record_info,
+				HeapTupleHeader template, MemoryContext mcxt,
+				JsObject *obj);
+
+static Datum
+populate_record_field(ColumnIOData *col, Oid typid, int32 typmod,
+					  const char *colname, MemoryContext mcxt,
+					  Datum defaultval, JsValue *jsv, bool *isnull);
 
 /*
  * SQL function json_object_keys
@@ -2099,158 +2179,279 @@ json_to_record(PG_FUNCTION_ARGS)
 	return populate_record_worker(fcinfo, "json_to_record", false);
 }
 
-static Datum
-populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
-					   bool have_record_arg)
+static void
+JsValueToJsObject(JsValue *jsv, JsObject *jso)
 {
-	int			json_arg_num = have_record_arg ? 1 : 0;
-	Oid			jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
-	text	   *json;
-	Jsonb	   *jb = NULL;
-	HTAB	   *json_hash = NULL;
-	HeapTupleHeader rec = NULL;
-	Oid			tupType = InvalidOid;
-	int32		tupTypmod = -1;
-	TupleDesc	tupdesc;
-	HeapTupleData tuple;
-	HeapTuple	rettuple;
-	RecordIOData *my_extra;
-	int			ncolumns;
-	int			i;
-	Datum	   *values;
-	bool	   *nulls;
-
-	Assert(jtype == JSONOID || jtype == JSONBOID);
+	jso->is_json = jsv->is_json;
 
-	if (have_record_arg)
+	if (jsv->is_json)
 	{
-		Oid			argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+		/* convert plain-text json into a hash table */
+		jso->val.json_hash =
+				get_json_object_as_hash(jsv->val.json.str,
+										jsv->val.json.len >= 0
+											 ? jsv->val.json.len
+											 : strlen(jsv->val.json.str),
+										"populate_composite");
+	}
+	else
+	{
+		JsonbValue *jbv = jsv->val.jsonb;
 
-		if (!type_is_rowtype(argtype))
-			ereport(ERROR,
-					(errcode(ERRCODE_DATATYPE_MISMATCH),
-					 errmsg("first argument of %s must be a row type",
-							funcname)));
+		if (jbv->type == jbvBinary &&
+			JsonContainerIsObject(jbv->val.binary.data))
+			jso->val.jsonb_cont = jbv->val.binary.data;
+		else
+			jso->val.jsonb_cont = NULL;
+	}
+}
 
-		if (PG_ARGISNULL(0))
-		{
-			if (PG_ARGISNULL(1))
-				PG_RETURN_NULL();
+/* populate recursively composite (row type) value from json/jsonb */
+static Datum
+populate_composite(CompositeIOData *io,			/* metadata cache */
+				   Oid				typid,		/* row type id */
+				   int32			typmod,		/* row type modifier */
+				   const char	   *colname,	/* for diagnostics only */
+				   MemoryContext	mcxt,		/* cache memory context */
+				   HeapTupleHeader	defaultval,	/* default row value, if any */
+				   JsValue		   *jsv)		/* json/jsonb object */
+{
+	HeapTupleHeader	tuple;
+	JsObject		jso;
 
-			/*
-			 * have no tuple to look at, so the only source of type info is
-			 * the argtype. The lookup_rowtype_tupdesc call below will error
-			 * out if we don't have a known composite type oid here.
-			 */
-			tupType = argtype;
-			tupTypmod = -1;
-		}
-		else
-		{
-			rec = PG_GETARG_HEAPTUPLEHEADER(0);
+	/* acquire cached tuple descriptor */
+	if (!io->tupdesc ||
+		io->tupdesc->tdtypeid != typid ||
+		io->tupdesc->tdtypmod != typmod)
+	{
+		TupleDesc 		tupdesc = lookup_rowtype_tupdesc(typid, typmod);
+		MemoryContext	oldcxt;
 
-			if (PG_ARGISNULL(1))
-				PG_RETURN_POINTER(rec);
+		if (io->tupdesc)
+			FreeTupleDesc(io->tupdesc);
 
-			/* Extract type info from the tuple itself */
-			tupType = HeapTupleHeaderGetTypeId(rec);
-			tupTypmod = HeapTupleHeaderGetTypMod(rec);
-		}
+		/* copy tuple desc without constraints into cache memory context */
+		oldcxt = MemoryContextSwitchTo(mcxt);
+		io->tupdesc = CreateTupleDescCopy(tupdesc);
+		MemoryContextSwitchTo(oldcxt);
 
-		tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
+		ReleaseTupleDesc(tupdesc);
 	}
-	else
-	{
-		/* json{b}_to_record case */
-		if (PG_ARGISNULL(0))
-			PG_RETURN_NULL();
 
-		if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
-			ereport(ERROR,
-					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-					 errmsg("function returning record called in context "
-							"that cannot accept type record"),
-					 errhint("Try calling the function in the FROM clause "
-							 "using a column definition list.")));
-	}
+	/* prepare input value */
+	JsValueToJsObject(jsv, &jso);
 
-	if (jtype == JSONOID)
-	{
-		/* just get the text */
-		json = PG_GETARG_TEXT_PP(json_arg_num);
+	/* populate resulting record tuple */
+	tuple = populate_record(io->tupdesc, &io->record_io,
+							defaultval, mcxt, &jso);
+
+	JsObjectFree(&jso);
+
+	return HeapTupleHeaderGetDatum(tuple);
+}
 
-		json_hash = get_json_object_as_hash(json, funcname);
+/* populate non-null scalar value from json/jsonb value */
+static Datum
+populate_scalar(ScalarIOData   *io,			/* metadata cache */
+				Oid				typid,		/* scalar type id */
+				int32			typmod,		/* scalar type modifier */
+				JsValue		   *jsv)		/* json/jsonb value to convert */
+{
+	Datum		res;
+	char	   *str;
+	char	   *json = NULL;
 
+	if (!jsv)
+	{
 		/*
-		 * if the input json is empty, we can only skip the rest if we were
-		 * passed in a non-null record, since otherwise there may be issues
-		 * with domain nulls.
+		 * Need InputFunctionCall to happen even for NULLs, so that domain
+		 * checks are done.
 		 */
-		if (hash_get_num_entries(json_hash) == 0 && rec)
+		str = NULL;
+	}
+	else if (jsv->is_json)
+	{
+		/* already done the hard work in the json case */
+		int			len = jsv->val.json.len;
+
+		json = jsv->val.json.str;
+		Assert(json);
+
+		if (len >= 0)
 		{
-			hash_destroy(json_hash);
-			ReleaseTupleDesc(tupdesc);
-			PG_RETURN_POINTER(rec);
+			/* Need to copy non-null-terminated string */
+			str = palloc(len + 1 * sizeof(char));
+			memcpy(str, json, len);
+			str[len] = '\0';
 		}
+		else
+			str = json;		/* null-terminated string */
 	}
 	else
 	{
-		jb = PG_GETARG_JSONB(json_arg_num);
-
-		/* same logic as for json */
-		if (JB_ROOT_COUNT(jb) == 0 && rec)
+		JsonbValue *jbv = jsv->val.jsonb;
+
+		if (jbv->type == jbvString) /* quotes are stripped */
+			str = pnstrdup(jbv->val.string.val, jbv->val.string.len);
+		else if (jbv->type == jbvBool)
+			str = pnstrdup(jbv->val.boolean ? "t" : "f", 1);
+		else if (jbv->type == jbvNumeric)
+			str = DatumGetCString(DirectFunctionCall1(numeric_out,
+										PointerGetDatum(jbv->val.numeric)));
+		else if (jbv->type == jbvBinary)
+			str = JsonbToCString(NULL, jbv->val.binary.data,
+									   jbv->val.binary.len);
+		else
 		{
-			ReleaseTupleDesc(tupdesc);
-			PG_RETURN_POINTER(rec);
+			elog(ERROR, "unrecognized jsonb type: %d", (int) jbv->type);
+			str = NULL;
 		}
 	}
 
-	ncolumns = tupdesc->natts;
+	res = InputFunctionCall(&io->typiofunc, str, io->typioparam, typmod);
+
+	/* free temporary buffer */
+	if (str != json)
+		pfree(str);
+
+	return res;
+}
+
+/* prepare column metadata cache for the given type */
+static void
+prepare_column_cache(ColumnIOData  *column,		/* metadata cache */
+					 Oid			typid,		/* column type id */
+					 int32			typmod,		/* column type modifier */
+					 MemoryContext	mcxt,		/* cache memory context */
+					 bool			json)		/* json/jsonb */
+{
+	Oid				typioproc;
+
+	column->typid = typid;
+	column->typmod = typmod;
+
+	getTypeInputInfo(typid, &typioproc, &column->scalar_io.typioparam);
+	fmgr_info_cxt(typioproc, &column->scalar_io.typiofunc, mcxt);
+}
+
+/* populate recursively a record field or an array element from json/jsonb value */
+static Datum
+populate_record_field(ColumnIOData *col,		/* metadata cache */
+					  Oid			typid,		/* field type id */
+					  int32			typmod,		/* field type modifier */
+					  const char   *colname,	/* for diagnostics only */
+					  MemoryContext	mcxt,		/* cache memory context */
+					  Datum			defaultval,	/* default record value if any */
+					  JsValue	   *jsv,		/* json/jsonb value to convert */
+					  bool		   *isnull)		/* value is NULL */
+{
+	check_stack_depth();
+
+	/* prepare column metadata cache for the given type */
+	if (col->typid != typid || col->typmod != typmod)
+		prepare_column_cache(col, typid, typmod, mcxt, jsv->is_json);
+
+	*isnull = JsValueIsNull(jsv);
+
+	return populate_scalar(&col->scalar_io, typid, typmod,
+						   *isnull ?  NULL : jsv);
+}
+
+static RecordIOData *
+allocate_record_info(MemoryContext mcxt, int ncolumns)
+{
+	RecordIOData *data = (RecordIOData *)
+			MemoryContextAlloc(mcxt,
+								offsetof(RecordIOData, columns) +
+								ncolumns * sizeof(ColumnIOData));
 
-	if (rec)
+	data->record_type = InvalidOid;
+	data->record_typmod = 0;
+	data->ncolumns = ncolumns;
+	MemSet(data->columns, 0, sizeof(ColumnIOData) * ncolumns);
+
+	return data;
+}
+
+static bool
+JsObjectGetField(JsObject *obj, char *field, JsValue *jsv)
+{
+	jsv->is_json = obj->is_json;
+
+	if (jsv->is_json)
 	{
-		/* Build a temporary HeapTuple control structure */
-		tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
-		ItemPointerSetInvalid(&(tuple.t_self));
-		tuple.t_tableOid = InvalidOid;
-		tuple.t_data = rec;
+		JsonHashEntry *hashentry = hash_search(obj->val.json_hash, field,
+											   HASH_FIND, NULL);
+
+		jsv->val.json.type = hashentry ? hashentry->type : JSON_TOKEN_NULL;
+		jsv->val.json.str = jsv->val.json.type == JSON_TOKEN_NULL ? NULL :
+															hashentry->val;
+		jsv->val.json.len = jsv->val.json.str ? -1 : 0; /* null-terminated */
+
+		return hashentry != NULL;
+	}
+	else
+	{
+		jsv->val.jsonb = !obj->val.jsonb_cont ? NULL :
+				findJsonbValueFromContainerLen(obj->val.jsonb_cont, JB_FOBJECT,
+											   field, strlen(field));
+
+		return jsv->val.jsonb != NULL;
 	}
+}
+
+/* populate record tuple from json/jsonb value */
+static HeapTupleHeader
+populate_record(TupleDesc		tupdesc,	/* record tuple descriptor */
+				RecordIOData  **precord,	/* metadata cache location */
+				HeapTupleHeader	defaultval, /* default record value if any */
+				MemoryContext	mcxt,		/* cache memory context */
+				JsObject	   *obj)		/* json/jsonb object */
+{
+	RecordIOData   *record = *precord;
+	Datum		   *values;
+	bool		   *nulls;
+	HeapTuple		res;
+	int				ncolumns = tupdesc->natts;
+	int				i;
 
 	/*
-	 * We arrange to look up the needed I/O info just once per series of
-	 * calls, assuming the record type doesn't change underneath us.
+	 * if the input json is empty, we can only skip the rest if we were
+	 * passed in a non-null record, since otherwise there may be issues
+	 * with domain nulls.
 	 */
-	my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
-	if (my_extra == NULL ||
-		my_extra->ncolumns != ncolumns)
-	{
-		fcinfo->flinfo->fn_extra =
-			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
-							   offsetof(RecordIOData, columns) +
-							   ncolumns * sizeof(ColumnIOData));
-		my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
-		my_extra->record_type = InvalidOid;
-		my_extra->record_typmod = 0;
-		my_extra->ncolumns = ncolumns;
-		MemSet(my_extra->columns, 0, sizeof(ColumnIOData) * ncolumns);
-	}
+	if (defaultval && JsObjectIsEmpty(obj))
+		return defaultval;
 
-	if (have_record_arg && (my_extra->record_type != tupType ||
-							my_extra->record_typmod != tupTypmod))
+	/* (re)allocate metadata cache */
+	if (record == NULL ||
+		record->ncolumns != ncolumns)
+		*precord = record = allocate_record_info(mcxt, ncolumns);
+
+	/* invalidate metadata cache if the record type has changed */
+	if (record->record_type != tupdesc->tdtypeid ||
+		record->record_typmod != tupdesc->tdtypmod)
 	{
-		MemSet(my_extra, 0,
-			   offsetof(RecordIOData, columns) +
-			   ncolumns * sizeof(ColumnIOData));
-		my_extra->record_type = tupType;
-		my_extra->record_typmod = tupTypmod;
-		my_extra->ncolumns = ncolumns;
+		MemSet(record, 0, offsetof(RecordIOData, columns) +
+							ncolumns * sizeof(ColumnIOData));
+		record->record_type = tupdesc->tdtypeid;
+		record->record_typmod = tupdesc->tdtypmod;
+		record->ncolumns = ncolumns;
 	}
 
 	values = (Datum *) palloc(ncolumns * sizeof(Datum));
 	nulls = (bool *) palloc(ncolumns * sizeof(bool));
 
-	if (rec)
+	if (defaultval)
 	{
+		HeapTupleData tuple;
+
+		/* Build a temporary HeapTuple control structure */
+		tuple.t_len = HeapTupleHeaderGetDatumLength(defaultval);
+		ItemPointerSetInvalid(&(tuple.t_self));
+		tuple.t_tableOid = InvalidOid;
+		tuple.t_data = defaultval;
+
 		/* Break down the tuple into fields */
 		heap_deform_tuple(&tuple, tupdesc, values, nulls);
 	}
@@ -2265,31 +2466,19 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
 
 	for (i = 0; i < ncolumns; ++i)
 	{
-		ColumnIOData *column_info = &my_extra->columns[i];
-		Oid			column_type = tupdesc->attrs[i]->atttypid;
-		JsonbValue *v = NULL;
-		JsonHashEntry *hashentry = NULL;
+		Form_pg_attribute	att = tupdesc->attrs[i];
+		char			   *colname = NameStr(att->attname);
+		JsValue				field = { 0 };
+		bool				found;
 
 		/* Ignore dropped columns in datatype */
-		if (tupdesc->attrs[i]->attisdropped)
+		if (att->attisdropped)
 		{
 			nulls[i] = true;
 			continue;
 		}
 
-		if (jtype == JSONOID)
-		{
-			hashentry = hash_search(json_hash,
-									NameStr(tupdesc->attrs[i]->attname),
-									HASH_FIND, NULL);
-		}
-		else
-		{
-			char	   *key = NameStr(tupdesc->attrs[i]->attname);
-
-			v = findJsonbValueFromContainerLen(&jb->root, JB_FOBJECT, key,
-											   strlen(key));
-		}
+		found = JsObjectGetField(obj, colname, &field);
 
 		/*
 		 * we can't just skip here if the key wasn't found since we might have
@@ -2299,73 +2488,158 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
 		 * then every field which we don't populate needs to be run through
 		 * the input function just in case it's a domain type.
 		 */
-		if (((jtype == JSONOID && hashentry == NULL) ||
-			 (jtype == JSONBOID && v == NULL)) && rec)
+		if (defaultval && !found)
 			continue;
 
-		/*
-		 * Prepare to convert the column value from text
-		 */
-		if (column_info->column_type != column_type)
+		values[i] = populate_record_field(&record->columns[i],
+										  att->atttypid,
+										  att->atttypmod,
+										  colname,
+										  mcxt,
+										  nulls[i] ? (Datum) 0 : values[i],
+										  &field,
+										  &nulls[i]);
+	}
+
+	res = heap_form_tuple(tupdesc, values, nulls);
+
+	pfree(values);
+	pfree(nulls);
+
+	return res->t_data;
+}
+
+/* structure to cache metadata needed for populate_record_worker() */
+typedef struct PopulateRecordCache
+{
+	Oid				argtype;	/* verified row type of the first argument */
+	CompositeIOData io;			/* metadata cache for populate_composite() */
+} PopulateRecordCache;
+
+static Datum
+populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
+					   bool have_record_arg)
+{
+	int			json_arg_num = have_record_arg ? 1 : 0;
+	Oid			jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
+	JsValue		jsv = { 0 };
+	HeapTupleHeader rec = NULL;
+	Oid			tupType;
+	int32		tupTypmod;
+	TupleDesc	tupdesc = NULL;
+	Datum		rettuple;
+	JsonbValue	jbv;
+	MemoryContext fnmcxt = fcinfo->flinfo->fn_mcxt;
+	PopulateRecordCache *cache = fcinfo->flinfo->fn_extra;
+
+	Assert(jtype == JSONOID || jtype == JSONBOID);
+
+	/*
+	 * We arrange to look up the needed I/O info just once per series of
+	 * calls, assuming the record type doesn't change underneath us.
+	 */
+	if (!cache)
+		fcinfo->flinfo->fn_extra = cache =
+				MemoryContextAllocZero(fnmcxt, sizeof(*cache));
+
+	if (have_record_arg)
+	{
+		Oid			argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+
+		if (cache->argtype != argtype)
 		{
-			getTypeInputInfo(column_type,
-							 &column_info->typiofunc,
-							 &column_info->typioparam);
-			fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
-						  fcinfo->flinfo->fn_mcxt);
-			column_info->column_type = column_type;
+			if (!type_is_rowtype(argtype))
+				ereport(ERROR,
+						(errcode(ERRCODE_DATATYPE_MISMATCH),
+						 errmsg("first argument of %s must be a row type",
+								funcname)));
+
+			cache->argtype = argtype;
 		}
-		if ((jtype == JSONOID && (hashentry == NULL || hashentry->isnull)) ||
-			(jtype == JSONBOID && (v == NULL || v->type == jbvNull)))
+
+		if (PG_ARGISNULL(0))
 		{
+			if (PG_ARGISNULL(1))
+				PG_RETURN_NULL();
+
 			/*
-			 * need InputFunctionCall to happen even for nulls, so that domain
-			 * checks are done
+			 * have no tuple to look at, so the only source of type info is
+			 * the argtype. The lookup_rowtype_tupdesc call below will error
+			 * out if we don't have a known composite type oid here.
 			 */
-			values[i] = InputFunctionCall(&column_info->proc, NULL,
-										  column_info->typioparam,
-										  tupdesc->attrs[i]->atttypmod);
-			nulls[i] = true;
+			tupType = argtype;
+			tupTypmod = -1;
 		}
 		else
 		{
-			char	   *s = NULL;
+			rec = PG_GETARG_HEAPTUPLEHEADER(0);
 
-			if (jtype == JSONOID)
-			{
-				/* already done the hard work in the json case */
-				s = hashentry->val;
-			}
-			else
-			{
-				if (v->type == jbvString)
-					s = pnstrdup(v->val.string.val, v->val.string.len);
-				else if (v->type == jbvBool)
-					s = pnstrdup((v->val.boolean) ? "t" : "f", 1);
-				else if (v->type == jbvNumeric)
-					s = DatumGetCString(DirectFunctionCall1(numeric_out,
-										   PointerGetDatum(v->val.numeric)));
-				else if (v->type == jbvBinary)
-					s = JsonbToCString(NULL, (JsonbContainer *) v->val.binary.data, v->val.binary.len);
-				else
-					elog(ERROR, "unrecognized jsonb type: %d", (int) v->type);
-			}
+			if (PG_ARGISNULL(1))
+				PG_RETURN_POINTER(rec);
 
-			values[i] = InputFunctionCall(&column_info->proc, s,
-										  column_info->typioparam,
-										  tupdesc->attrs[i]->atttypmod);
-			nulls[i] = false;
+			/* Extract type info from the tuple itself */
+			tupType = HeapTupleHeaderGetTypeId(rec);
+			tupTypmod = HeapTupleHeaderGetTypMod(rec);
 		}
 	}
+	else
+	{
+		/* json{b}_to_record case */
+		if (PG_ARGISNULL(0))
+			PG_RETURN_NULL();
 
-	rettuple = heap_form_tuple(tupdesc, values, nulls);
+		if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+			ereport(ERROR,
+					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+					 errmsg("function returning record called in context "
+							"that cannot accept type record"),
+					 errhint("Try calling the function in the FROM clause "
+							 "using a column definition list.")));
+
+		Assert(tupdesc);
+
+		/*
+		 * Add tupdesc to the cache and set the appropriate values of
+		 * tupType/tupTypmod for proper cache usage in populate_composite().
+		 */
+		cache->io.tupdesc = tupdesc;
+
+		tupType = tupdesc->tdtypeid;
+		tupTypmod = tupdesc->tdtypmod;
+	}
+
+	jsv.is_json = jtype == JSONOID;
+
+	if (jsv.is_json)
+	{
+		text	   *json = PG_GETARG_TEXT_PP(json_arg_num);
+
+		jsv.val.json.str = VARDATA_ANY(json);
+		jsv.val.json.len = VARSIZE_ANY_EXHDR(json);
+		jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */
+	}
+	else
+	{
+		Jsonb	   *jb = PG_GETARG_JSONB(json_arg_num);
+
+		jsv.val.jsonb = &jbv;
+
+		/* fill binary jsonb value pointing to jb */
+		jbv.type = jbvBinary;
+		jbv.val.binary.data = &jb->root;
+		jbv.val.binary.len = VARSIZE(jb) - VARHDRSZ;
+	}
 
-	ReleaseTupleDesc(tupdesc);
+	rettuple = populate_composite(&cache->io, tupType, tupTypmod,
+								  NULL, fnmcxt, rec, &jsv);
 
-	if (json_hash)
-		hash_destroy(json_hash);
+	if (tupdesc)
+	{
+		cache->io.tupdesc = NULL;
+		ReleaseTupleDesc(tupdesc);
+	}
 
-	PG_RETURN_DATUM(HeapTupleGetDatum(rettuple));
+	PG_RETURN_DATUM(rettuple);
 }
 
 /*
@@ -2374,12 +2648,12 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
  * decompose a json object into a hash table.
  */
 static HTAB *
-get_json_object_as_hash(text *json, const char *funcname)
+get_json_object_as_hash(char *json, int len, const char *funcname)
 {
 	HASHCTL		ctl;
 	HTAB	   *tab;
 	JHashState *state;
-	JsonLexContext *lex = makeJsonLexContext(json, true);
+	JsonLexContext *lex = makeJsonLexContextCstringLen(json, len, true);
 	JsonSemAction *sem;
 
 	memset(&ctl, 0, sizeof(ctl));
@@ -2417,6 +2691,9 @@ hash_object_field_start(void *state, char *fname, bool isnull)
 	if (_state->lex->lex_level > 1)
 		return;
 
+	/* remember token type */
+	_state->saved_token_type = _state->lex->token_type;
+
 	if (_state->lex->token_type == JSON_TOKEN_ARRAY_START ||
 		_state->lex->token_type == JSON_TOKEN_OBJECT_START)
 	{
@@ -2460,7 +2737,9 @@ hash_object_field_end(void *state, char *fname, bool isnull)
 	 * that, a later field with the same name overrides the earlier field.
 	 */
 
-	hashentry->isnull = isnull;
+	hashentry->type = _state->saved_token_type;
+	Assert(isnull == (hashentry->type == JSON_TOKEN_NULL));
+
 	if (_state->save_json_start != NULL)
 	{
 		int			len = _state->lex->prev_token_terminator - _state->save_json_start;
@@ -2499,7 +2778,11 @@ hash_scalar(void *state, char *token, JsonTokenType tokentype)
 			   errmsg("cannot call %s on a scalar", _state->function_name)));
 
 	if (_state->lex->lex_level == 1)
+	{
 		_state->saved_scalar = token;
+		/* saved_token_type must already be set in hash_object_field_start() */
+		Assert(_state->saved_token_type == tokentype);
+	}
 }
 
 
@@ -2538,121 +2821,21 @@ json_to_recordset(PG_FUNCTION_ARGS)
 }
 
 static void
-make_row_from_rec_and_jsonb(Jsonb *element, PopulateRecordsetState *state)
+populate_recordset_record(PopulateRecordsetState *state, JsObject *obj)
 {
-	Datum	   *values;
-	bool	   *nulls;
-	int			i;
-	RecordIOData *my_extra = state->my_extra;
-	int			ncolumns = my_extra->ncolumns;
-	TupleDesc	tupdesc = state->ret_tdesc;
-	HeapTupleHeader rec = state->rec;
-	HeapTuple	rettuple;
-
-	values = (Datum *) palloc(ncolumns * sizeof(Datum));
-	nulls = (bool *) palloc(ncolumns * sizeof(bool));
-
-	if (state->rec)
-	{
-		HeapTupleData tuple;
-
-		/* Build a temporary HeapTuple control structure */
-		tuple.t_len = HeapTupleHeaderGetDatumLength(state->rec);
-		ItemPointerSetInvalid(&(tuple.t_self));
-		tuple.t_tableOid = InvalidOid;
-		tuple.t_data = state->rec;
-
-		/* Break down the tuple into fields */
-		heap_deform_tuple(&tuple, tupdesc, values, nulls);
-	}
-	else
-	{
-		for (i = 0; i < ncolumns; ++i)
-		{
-			values[i] = (Datum) 0;
-			nulls[i] = true;
-		}
-	}
-
-	for (i = 0; i < ncolumns; ++i)
-	{
-		ColumnIOData *column_info = &my_extra->columns[i];
-		Oid			column_type = tupdesc->attrs[i]->atttypid;
-		JsonbValue *v = NULL;
-		char	   *key;
-
-		/* Ignore dropped columns in datatype */
-		if (tupdesc->attrs[i]->attisdropped)
-		{
-			nulls[i] = true;
-			continue;
-		}
-
-		key = NameStr(tupdesc->attrs[i]->attname);
-
-		v = findJsonbValueFromContainerLen(&element->root, JB_FOBJECT,
-										   key, strlen(key));
-
-		/*
-		 * We can't just skip here if the key wasn't found since we might have
-		 * a domain to deal with. If we were passed in a non-null record
-		 * datum, we assume that the existing values are valid (if they're
-		 * not, then it's not our fault), but if we were passed in a null,
-		 * then every field which we don't populate needs to be run through
-		 * the input function just in case it's a domain type.
-		 */
-		if (v == NULL && rec)
-			continue;
-
-		/*
-		 * Prepare to convert the column value from text
-		 */
-		if (column_info->column_type != column_type)
-		{
-			getTypeInputInfo(column_type,
-							 &column_info->typiofunc,
-							 &column_info->typioparam);
-			fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
-						  state->fn_mcxt);
-			column_info->column_type = column_type;
-		}
-		if (v == NULL || v->type == jbvNull)
-		{
-			/*
-			 * Need InputFunctionCall to happen even for nulls, so that domain
-			 * checks are done
-			 */
-			values[i] = InputFunctionCall(&column_info->proc, NULL,
-										  column_info->typioparam,
-										  tupdesc->attrs[i]->atttypmod);
-			nulls[i] = true;
-		}
-		else
-		{
-			char	   *s = NULL;
-
-			if (v->type == jbvString)
-				s = pnstrdup(v->val.string.val, v->val.string.len);
-			else if (v->type == jbvBool)
-				s = pnstrdup((v->val.boolean) ? "t" : "f", 1);
-			else if (v->type == jbvNumeric)
-				s = DatumGetCString(DirectFunctionCall1(numeric_out,
-										   PointerGetDatum(v->val.numeric)));
-			else if (v->type == jbvBinary)
-				s = JsonbToCString(NULL, (JsonbContainer *) v->val.binary.data, v->val.binary.len);
-			else
-				elog(ERROR, "unrecognized jsonb type: %d", (int) v->type);
-
-			values[i] = InputFunctionCall(&column_info->proc, s,
-										  column_info->typioparam,
-										  tupdesc->attrs[i]->atttypmod);
-			nulls[i] = false;
-		}
-	}
-
-	rettuple = heap_form_tuple(tupdesc, values, nulls);
-
-	tuplestore_puttuple(state->tuple_store, rettuple);
+	HeapTupleData	tuple;
+	HeapTupleHeader	tuphead = populate_record(state->ret_tdesc,
+											  state->my_extra,
+											  state->rec,
+											  state->fn_mcxt,
+											  obj);
+
+	tuple.t_len = HeapTupleHeaderGetDatumLength(tuphead);
+	ItemPointerSetInvalid(&(tuple.t_self));
+	tuple.t_tableOid = InvalidOid;
+	tuple.t_data = tuphead;
+
+	tuplestore_puttuple(state->tuple_store, &tuple);
 }
 
 /*
@@ -2666,12 +2849,8 @@ populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname,
 	Oid			jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
 	ReturnSetInfo *rsi;
 	MemoryContext old_cxt;
-	Oid			tupType;
-	int32		tupTypmod;
 	HeapTupleHeader rec;
 	TupleDesc	tupdesc;
-	RecordIOData *my_extra;
-	int			ncolumns;
 	PopulateRecordsetState *state;
 
 	if (have_record_arg)
@@ -2717,38 +2896,6 @@ populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname,
 	else
 		rec = PG_GETARG_HEAPTUPLEHEADER(0);
 
-	tupType = tupdesc->tdtypeid;
-	tupTypmod = tupdesc->tdtypmod;
-	ncolumns = tupdesc->natts;
-
-	/*
-	 * We arrange to look up the needed I/O info just once per series of
-	 * calls, assuming the record type doesn't change underneath us.
-	 */
-	my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
-	if (my_extra == NULL ||
-		my_extra->ncolumns != ncolumns)
-	{
-		fcinfo->flinfo->fn_extra =
-			MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
-							   offsetof(RecordIOData, columns) +
-							   ncolumns * sizeof(ColumnIOData));
-		my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
-		my_extra->record_type = InvalidOid;
-		my_extra->record_typmod = 0;
-	}
-
-	if (my_extra->record_type != tupType ||
-		my_extra->record_typmod != tupTypmod)
-	{
-		MemSet(my_extra, 0,
-			   offsetof(RecordIOData, columns) +
-			   ncolumns * sizeof(ColumnIOData));
-		my_extra->record_type = tupType;
-		my_extra->record_typmod = tupTypmod;
-		my_extra->ncolumns = ncolumns;
-	}
-
 	state = palloc0(sizeof(PopulateRecordsetState));
 
 	/* make these in a sufficiently long-lived memory context */
@@ -2761,7 +2908,7 @@ populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname,
 	MemoryContextSwitchTo(old_cxt);
 
 	state->function_name = funcname;
-	state->my_extra = my_extra;
+	state->my_extra = (RecordIOData **) &fcinfo->flinfo->fn_extra;
 	state->rec = rec;
 	state->fn_mcxt = fcinfo->flinfo->fn_mcxt;
 
@@ -2812,14 +2959,19 @@ populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname,
 
 			if (r == WJB_ELEM)
 			{
-				Jsonb	   *element = JsonbValueToJsonb(&v);
+				JsObject	obj;
 
-				if (!JB_ROOT_IS_OBJECT(element))
+				if (v.type != jbvBinary ||
+					!JsonContainerIsObject(v.val.binary.data))
 					ereport(ERROR,
 							(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 						 errmsg("argument of %s must be an array of objects",
 								funcname)));
-				make_row_from_rec_and_jsonb(element, state);
+
+				obj.is_json = false;
+				obj.val.jsonb_cont = v.val.binary.data;
+
+				populate_recordset_record(state, &obj);
 			}
 		}
 	}
@@ -2863,115 +3015,20 @@ static void
 populate_recordset_object_end(void *state)
 {
 	PopulateRecordsetState *_state = (PopulateRecordsetState *) state;
-	HTAB	   *json_hash = _state->json_hash;
-	Datum	   *values;
-	bool	   *nulls;
-	int			i;
-	RecordIOData *my_extra = _state->my_extra;
-	int			ncolumns = my_extra->ncolumns;
-	TupleDesc	tupdesc = _state->ret_tdesc;
-	JsonHashEntry *hashentry;
-	HeapTupleHeader rec = _state->rec;
-	HeapTuple	rettuple;
+	JsObject	obj;
 
 	/* Nested objects require no special processing */
 	if (_state->lex->lex_level > 1)
 		return;
 
-	/* Otherwise, construct and return a tuple based on this level-1 object */
-	values = (Datum *) palloc(ncolumns * sizeof(Datum));
-	nulls = (bool *) palloc(ncolumns * sizeof(bool));
-
-	if (_state->rec)
-	{
-		HeapTupleData tuple;
-
-		/* Build a temporary HeapTuple control structure */
-		tuple.t_len = HeapTupleHeaderGetDatumLength(_state->rec);
-		ItemPointerSetInvalid(&(tuple.t_self));
-		tuple.t_tableOid = InvalidOid;
-		tuple.t_data = _state->rec;
+	obj.is_json = true;
+	obj.val.json_hash = _state->json_hash;
 
-		/* Break down the tuple into fields */
-		heap_deform_tuple(&tuple, tupdesc, values, nulls);
-	}
-	else
-	{
-		for (i = 0; i < ncolumns; ++i)
-		{
-			values[i] = (Datum) 0;
-			nulls[i] = true;
-		}
-	}
-
-	for (i = 0; i < ncolumns; ++i)
-	{
-		ColumnIOData *column_info = &my_extra->columns[i];
-		Oid			column_type = tupdesc->attrs[i]->atttypid;
-		char	   *value;
-
-		/* Ignore dropped columns in datatype */
-		if (tupdesc->attrs[i]->attisdropped)
-		{
-			nulls[i] = true;
-			continue;
-		}
-
-		hashentry = hash_search(json_hash,
-								NameStr(tupdesc->attrs[i]->attname),
-								HASH_FIND, NULL);
-
-		/*
-		 * we can't just skip here if the key wasn't found since we might have
-		 * a domain to deal with. If we were passed in a non-null record
-		 * datum, we assume that the existing values are valid (if they're
-		 * not, then it's not our fault), but if we were passed in a null,
-		 * then every field which we don't populate needs to be run through
-		 * the input function just in case it's a domain type.
-		 */
-		if (hashentry == NULL && rec)
-			continue;
-
-		/*
-		 * Prepare to convert the column value from text
-		 */
-		if (column_info->column_type != column_type)
-		{
-			getTypeInputInfo(column_type,
-							 &column_info->typiofunc,
-							 &column_info->typioparam);
-			fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
-						  _state->fn_mcxt);
-			column_info->column_type = column_type;
-		}
-		if (hashentry == NULL || hashentry->isnull)
-		{
-			/*
-			 * need InputFunctionCall to happen even for nulls, so that domain
-			 * checks are done
-			 */
-			values[i] = InputFunctionCall(&column_info->proc, NULL,
-										  column_info->typioparam,
-										  tupdesc->attrs[i]->atttypmod);
-			nulls[i] = true;
-		}
-		else
-		{
-			value = hashentry->val;
-
-			values[i] = InputFunctionCall(&column_info->proc, value,
-										  column_info->typioparam,
-										  tupdesc->attrs[i]->atttypmod);
-			nulls[i] = false;
-		}
-	}
-
-	rettuple = heap_form_tuple(tupdesc, values, nulls);
-
-	tuplestore_puttuple(_state->tuple_store, rettuple);
+	/* Otherwise, construct and return a tuple based on this level-1 object */
+	populate_recordset_record(_state, &obj);
 
 	/* Done with hash for this object */
-	hash_destroy(json_hash);
+	hash_destroy(_state->json_hash);
 	_state->json_hash = NULL;
 }
 
@@ -3017,6 +3074,8 @@ populate_recordset_object_field_start(void *state, char *fname, bool isnull)
 	if (_state->lex->lex_level > 2)
 		return;
 
+	_state->saved_token_type = _state->lex->token_type;
+
 	if (_state->lex->token_type == JSON_TOKEN_ARRAY_START ||
 		_state->lex->token_type == JSON_TOKEN_OBJECT_START)
 	{
@@ -3058,7 +3117,9 @@ populate_recordset_object_field_end(void *state, char *fname, bool isnull)
 	 * that, a later field with the same name overrides the earlier field.
 	 */
 
-	hashentry->isnull = isnull;
+	hashentry->type = _state->saved_token_type;
+	Assert(isnull == (hashentry->type == JSON_TOKEN_NULL));
+
 	if (_state->save_json_start != NULL)
 	{
 		int			len = _state->lex->prev_token_terminator - _state->save_json_start;
