diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index 3adb365..592420a 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -10161,6 +10161,17 @@ table2-mapping
 
   <note>
     <para>
+      The text-returning variants of these functions and operators will convert Unicode escapes
+      in the JSON text to the appropriate UTF8 character when the database encoding is UTF8. In
+      other encodings the escape sequence is simply preserved as part of the text value, since we
+      can't be sure that the Unicode code point has a matching code point in the database encoding.
+      In general, it is best to avoid mixing Unicode escapes in JSON with a non-UTF8 database
+      encoding, if possible.
+    </para>
+  </note>
+
+  <note>
+    <para>
       The <xref linkend="hstore"> extension has a cast from <type>hstore</type> to
       <type>json</type>, so that converted <type>hstore</type> values are represented as JSON objects,
       not as string values.
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index d8046c5..bb8aa4f 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -717,7 +717,6 @@ json_lex_string(JsonLexContext *lex)
 				{
 					char		utf8str[5];
 					int			utf8len;
-					char	   *converted;
 
 					if (ch >= 0xd800 && ch <= 0xdbff)
 					{
@@ -749,13 +748,31 @@ json_lex_string(JsonLexContext *lex)
 								 errdetail("low order surrogate must follow a high order surrogate."),
 								 report_json_context(lex)));
 
-					unicode_to_utf8(ch, (unsigned char *) utf8str);
-					utf8len = pg_utf_mblen((unsigned char *) utf8str);
-					utf8str[utf8len] = '\0';
-					converted = pg_any_to_server(utf8str, utf8len, PG_UTF8);
-					appendStringInfoString(lex->strval, converted);
-					if (converted != utf8str)
-						pfree(converted);
+					/*
+					 * For UTF8, replace the escape sequence by the actual utf8
+					 * character in lex->strval. For other encodings, just pass
+					 * the escape sequence through, since the chances are very
+					 * high that the database encoding won't have a matching
+					 * codepoint - that's one of the possible reasons that the
+					 * user used unicode escapes in the first place.
+					 */
+
+					if (GetDatabaseEncoding() == PG_UTF8)
+					{
+						unicode_to_utf8(ch, (unsigned char *) utf8str);
+						utf8len = pg_utf_mblen((unsigned char *) utf8str);
+						appendBinaryStringInfo(lex->strval, utf8str, utf8len);
+					}
+					else if (ch >= 0x10000)
+					{
+						/* must have been a surrogate pair */
+						appendBinaryStringInfo(lex->strval, s-12, 12);
+					}
+					else
+					{
+						/* simple escape - a single \uxxxx */
+						appendBinaryStringInfo(lex->strval, s-6, 6);
+					}
 
 				}
 			}
