From 594edfbbcacc332172db491998117cd1e7781770 Mon Sep 17 00:00:00 2001
From: Marina Polyakova <m.polyakova@postgrespro.ru>
Date: Wed, 4 Apr 2018 16:00:46 +0300
Subject: [PATCH v7] Pgbench errors and serialization/deadlock retries

Client's run is aborted only in case of a serious error, for example, the
connection with the backend was lost. Otherwise if the execution of SQL or meta
command fails, the client's run continues normally until the end of the current
script execution (it is assumed that one transaction script contains only one
transaction).

Transactions with serialization or deadlock failures are rolled back and
repeated until they complete successfully or reach the maximum number of tries
(specified by the --max-tries option) / the maximum time of tries (specified by
the --max-tries-time option). These options can be combined together; but if
none of them are used, failed transactions are not retried at all. If the last
transaction run fails, this transaction will be reported as failed, and the
client variables will be set as they were before the first run of this
transaction.

If there're retries and/or errors their statistics are printed in the progress,
in the transaction / aggregation logs and in the end with other results (all and
for each script). A transaction error is reported here only if the last try of
this transaction fails. Also retries and/or errors are printed per-command with
average latencies if you use the appropriate benchmarking option
(--report-per-command, -r) and the total number of retries and/or errors is not
zero.

If a failed transaction block does not terminate in the current script, the
commands of the following scripts are processed as usual so you can get a lot of
errors of type "in failed SQL transaction" (when the current SQL transaction is
aborted and commands ignored until end of transaction block). In such cases you
can use separate statistics of these errors in all reports.

If you want to distinguish between failures or errors by type (including which
limit for retries was violated and how far it was exceeded for the
serialization/deadlock errors), use the pgbench debugging output created with
the option --debug and with the debugging level "fails" or "all". The first
variant is recommended for this purpose because with in the second case the
debugging output can be very large.
---
 doc/src/sgml/ref/pgbench.sgml                      |  332 ++++-
 src/bin/pgbench/pgbench.c                          | 1291 ++++++++++++++++----
 src/bin/pgbench/t/001_pgbench_with_server.pl       |   49 +-
 src/bin/pgbench/t/002_pgbench_no_server.pl         |    6 +-
 .../t/003_serialization_and_deadlock_fails.pl      |  739 +++++++++++
 5 files changed, 2138 insertions(+), 279 deletions(-)
 create mode 100644 src/bin/pgbench/t/003_serialization_and_deadlock_fails.pl

diff --git a/doc/src/sgml/ref/pgbench.sgml b/doc/src/sgml/ref/pgbench.sgml
index 41d9030..6b691fd 100644
--- a/doc/src/sgml/ref/pgbench.sgml
+++ b/doc/src/sgml/ref/pgbench.sgml
@@ -55,16 +55,19 @@ number of clients: 10
 number of threads: 1
 number of transactions per client: 1000
 number of transactions actually processed: 10000/10000
+maximum number of tries: 1
 tps = 85.184871 (including connections establishing)
 tps = 85.296346 (excluding connections establishing)
 </screen>
 
-  The first six lines report some of the most important parameter
-  settings.  The next line reports the number of transactions completed
-  and intended (the latter being just the product of number of clients
+  The first six lines and the eighth line report some of the most important
+  parameter settings.  The seventh line reports the number of transactions
+  completed and intended (the latter being just the product of number of clients
   and number of transactions per client); these will be equal unless the run
-  failed before completion.  (In <option>-T</option> mode, only the actual
-  number of transactions is printed.)
+  failed before completion or some SQL/meta command(s) failed.  (In
+  <option>-T</option> mode, only the actual number of transactions is printed.)
+  (see <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+  for more information)
   The last two lines report the number of transactions per second,
   figured with and without counting the time to start database sessions.
  </para>
@@ -380,11 +383,28 @@ pgbench <optional> <replaceable>options</replaceable> </optional> <replaceable>d
      </varlistentry>
 
      <varlistentry>
-      <term><option>-d</option></term>
-      <term><option>--debug</option></term>
+      <term><option>-d</option> <replaceable>debug_level</replaceable></term>
+      <term><option>--debug=</option><replaceable>debug_level</replaceable></term>
       <listitem>
        <para>
-        Print debugging output.
+        Print debugging output. You can use the following debugging levels:
+          <itemizedlist>
+           <listitem>
+            <para><literal>no</literal>: no debugging output (except built-in
+            function <function>debug</function>, see <xref
+            linkend="pgbench-functions"/>).</para>
+           </listitem>
+           <listitem>
+            <para><literal>fails</literal>: print only failure messages, errors
+            and retries (see <xref linkend="errors-and-retries"
+            endterm="errors-and-retries-title"/> for more information).</para>
+           </listitem>
+           <listitem>
+            <para><literal>all</literal>: print all debugging output
+            (throttling, executed/sent/received commands etc.).</para>
+           </listitem>
+          </itemizedlist>
+        The default is no debugging output.
        </para>
       </listitem>
      </varlistentry>
@@ -513,22 +533,38 @@ pgbench <optional> <replaceable>options</replaceable> </optional> <replaceable>d
         Show progress report every <replaceable>sec</replaceable> seconds.  The report
         includes the time since the beginning of the run, the tps since the
         last report, and the transaction latency average and standard
-        deviation since the last report.  Under throttling (<option>-R</option>),
-        the latency is computed with respect to the transaction scheduled
-        start time, not the actual transaction beginning time, thus it also
-        includes the average schedule lag time.
+        deviation since the last report.  If any transactions ended with a
+        failed SQL or meta command since the last report, they are also reported
+        as failed.  If any transactions ended with an error "in failed SQL
+        transaction block", they are reported separatly as <literal>in failed
+        tx</literal> (see <xref linkend="errors-and-retries"
+        endterm="errors-and-retries-title"/> for more information).  Under
+        throttling (<option>-R</option>), the latency is computed with respect
+        to the transaction scheduled start time, not the actual transaction
+        beginning time, thus it also includes the average schedule lag time.  If
+        any transactions have been rolled back and retried after a
+        serialization/deadlock failure since the last report, the report
+        includes the number of such transactions and the sum of all retries. Use
+        the options <option>--max-tries</option> and/or
+        <option>--max-tries-time</option> to enable transactions retries after
+        serialization/deadlock failures.
        </para>
       </listitem>
      </varlistentry>
 
      <varlistentry>
       <term><option>-r</option></term>
-      <term><option>--report-latencies</option></term>
+      <term><option>--report-per-command</option></term>
       <listitem>
        <para>
-        Report the average per-statement latency (execution time from the
-        perspective of the client) of each command after the benchmark
-        finishes.  See below for details.
+        Report the following statistics for each command after the benchmark
+        finishes: the average per-statement latency (execution time from the
+        perspective of the client), the number of all errors, the number of
+        errors "in failed SQL transaction block", and the number of retries
+        after serialization or deadlock failures.  The report displays the
+        columns with statistics on errors and retries only if the current
+        <application>pgbench</application> run has an error of the corresponding
+        type or retry, respectively. See below for details.
        </para>
       </listitem>
      </varlistentry>
@@ -667,6 +703,42 @@ pgbench <optional> <replaceable>options</replaceable> </optional> <replaceable>d
      </varlistentry>
 
      <varlistentry>
+      <term><option>--max-tries=<replaceable>number_of_tries</replaceable></option></term>
+      <listitem>
+       <para>
+        Set the maximum number of tries for transactions with
+        serialization/deadlock failures.
+       </para>
+       <para>
+        This option can be combined with the option
+        <option>--max-tries-time</option>. But if none of them are used, failed
+        transactions are not retried at all. See
+        <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+        for more information about retrying failed transactions.
+       </para>
+      </listitem>
+     </varlistentry>
+
+     <varlistentry>
+      <term><option>--max-tries-time=<replaceable>time_of_tries</replaceable></option></term>
+      <listitem>
+       <para>
+        Set the maximum time (in milliseconds) of tries for transactions with
+        serialization/deadlock failures. The transaction with serialization or
+        deadlock failure can be retried if the total time of all its tries is
+        less than <replaceable>time_of_tries</replaceable> ms.
+       </para>
+       <para>
+        This option can be combined with the option
+        <option>--max-tries</option>. But if none of them are used, failed
+        transactions are not retried at all. See
+        <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+        for more information about retrying failed transactions.
+       </para>
+      </listitem>
+     </varlistentry>
+
+     <varlistentry>
       <term><option>--progress-timestamp</option></term>
       <listitem>
        <para>
@@ -807,8 +879,8 @@ pgbench <optional> <replaceable>options</replaceable> </optional> <replaceable>d
  <refsect1>
   <title>Notes</title>
 
- <refsect2>
-  <title>What is the <quote>Transaction</quote> Actually Performed in <application>pgbench</application>?</title>
+ <refsect2 id="transactions-and-scripts">
+  <title id="transactions-and-scripts-title">What is the <quote>Transaction</quote> Actually Performed in <application>pgbench</application>?</title>
 
   <para>
    <application>pgbench</application> executes test scripts chosen randomly
@@ -1583,7 +1655,7 @@ END;
    The format of the log is:
 
 <synopsis>
-<replaceable>client_id</replaceable> <replaceable>transaction_no</replaceable> <replaceable>time</replaceable> <replaceable>script_no</replaceable> <replaceable>time_epoch</replaceable> <replaceable>time_us</replaceable> <optional> <replaceable>schedule_lag</replaceable> </optional>
+<replaceable>client_id</replaceable> <replaceable>transaction_no</replaceable> <replaceable>time</replaceable> <replaceable>script_no</replaceable> <replaceable>time_epoch</replaceable> <replaceable>time_us</replaceable> <optional> <replaceable>schedule_lag</replaceable> </optional> <optional> <replaceable>retries</replaceable> </optional>
 </synopsis>
 
    where
@@ -1604,6 +1676,17 @@ END;
    When both <option>--rate</option> and <option>--latency-limit</option> are used,
    the <replaceable>time</replaceable> for a skipped transaction will be reported as
    <literal>skipped</literal>.
+   <replaceable>retries</replaceable> is the sum of all the retries after the
+   serialization or deadlock failures during the current script execution. It is
+   only present when the maximum number of tries for transactions is more than 1
+   (<option>--max-tries</option>) and/or the maximum time of tries for
+   transactions is used (<option>--max-tries-time</option>). If the transaction
+   ended with an error "in failed SQL transaction", its
+   <replaceable>time</replaceable> will be reported as
+   <literal>in_failed_tx</literal>. If the transaction ended with other error,
+   its <replaceable>time</replaceable> will be reported as
+   <literal>failed</literal> (see <xref linkend="errors-and-retries"
+   endterm="errors-and-retries-title"/> for more information).
   </para>
 
   <para>
@@ -1633,6 +1716,24 @@ END;
   </para>
 
   <para>
+   The following example shows a snippet of a log file with errors and retries,
+   with the maximum number of tries set to 10 (note the additional
+   <replaceable>retries</replaceable> column):
+<screen>
+3 0 47423 0 1499414498 34501 4
+3 1 8333 0 1499414498 42848 1
+3 2 8358 0 1499414498 51219 1
+4 0 72345 0 1499414498 59433 7
+1 3 41718 0 1499414498 67879 5
+1 4 8416 0 1499414498 76311 1
+3 3 33235 0 1499414498 84469 4
+0 0 failed 0 1499414498 84905 10
+2 0 failed 0 1499414498 86248 10
+3 4 8307 0 1499414498 92788 1
+</screen>
+  </para>
+
+  <para>
    When running a long test on hardware that can handle a lot of transactions,
    the log files can become very large.  The <option>--sampling-rate</option> option
    can be used to log only a random sample of transactions.
@@ -1647,7 +1748,7 @@ END;
    format is used for the log files:
 
 <synopsis>
-<replaceable>interval_start</replaceable> <replaceable>num_transactions</replaceable> <replaceable>sum_latency</replaceable> <replaceable>sum_latency_2</replaceable> <replaceable>min_latency</replaceable> <replaceable>max_latency</replaceable> <optional> <replaceable>sum_lag</replaceable> <replaceable>sum_lag_2</replaceable> <replaceable>min_lag</replaceable> <replaceable>max_lag</replaceable> <optional> <replaceable>skipped</replaceable> </optional> </optional>
+<replaceable>interval_start</replaceable> <replaceable>num_transactions</replaceable> <replaceable>sum_latency</replaceable> <replaceable>sum_latency_2</replaceable> <replaceable>min_latency</replaceable> <replaceable>max_latency</replaceable> <replaceable>failed_tx</replaceable> <replaceable>in_failed_tx</replaceable> <optional> <replaceable>sum_lag</replaceable> <replaceable>sum_lag_2</replaceable> <replaceable>min_lag</replaceable> <replaceable>max_lag</replaceable> <optional> <replaceable>skipped</replaceable> </optional> </optional> <optional> <replaceable>retried_tx</replaceable> <replaceable>retries</replaceable> </optional>
 </synopsis>
 
    where
@@ -1661,7 +1762,13 @@ END;
    transaction latencies within the interval,
    <replaceable>min_latency</replaceable> is the minimum latency within the interval,
    and
-   <replaceable>max_latency</replaceable> is the maximum latency within the interval.
+   <replaceable>max_latency</replaceable> is the maximum latency within the interval,
+   <replaceable>failed_tx</replaceable> is the number of transactions that ended
+   with a failed SQL or meta command within the interval,
+   <replaceable>in_failed_tx</replaceable> is the number of transactions that
+   ended with an error "in failed SQL transaction block" (see
+   <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+   for more information).
    The next fields,
    <replaceable>sum_lag</replaceable>, <replaceable>sum_lag_2</replaceable>, <replaceable>min_lag</replaceable>,
    and <replaceable>max_lag</replaceable>, are only present if the <option>--rate</option>
@@ -1669,21 +1776,28 @@ END;
    They provide statistics about the time each transaction had to wait for the
    previous one to finish, i.e. the difference between each transaction's
    scheduled start time and the time it actually started.
-   The very last field, <replaceable>skipped</replaceable>,
+   The next field, <replaceable>skipped</replaceable>,
    is only present if the <option>--latency-limit</option> option is used, too.
    It counts the number of transactions skipped because they would have
    started too late.
+   The <replaceable>retried_tx</replaceable> and
+   <replaceable>retries</replaceable> fields are only present if the maximum
+   number of tries for transactions is more than 1
+   (<option>--max-tries</option>) and/or the maximum time of tries for
+   transactions is used (<option>--max-tries-time</option>). They report the
+   number of retried transactions and the sum of all the retries after
+   serialization or deadlock failures within the interval.
    Each transaction is counted in the interval when it was committed.
   </para>
 
   <para>
    Here is some example output:
 <screen>
-1345828501 5601 1542744 483552416 61 2573
-1345828503 7884 1979812 565806736 60 1479
-1345828505 7208 1979422 567277552 59 1391
-1345828507 7685 1980268 569784714 60 1398
-1345828509 7073 1979779 573489941 236 1411
+1345828501 5601 1542744 483552416 61 2573 0 0
+1345828503 7884 1979812 565806736 60 1479 0 0
+1345828505 7208 1979422 567277552 59 1391 0 0
+1345828507 7685 1980268 569784714 60 1398 0 0
+1345828509 7073 1979779 573489941 236 1411 0 0
 </screen></para>
 
   <para>
@@ -1695,16 +1809,55 @@ END;
  </refsect2>
 
  <refsect2>
-  <title>Per-Statement Latencies</title>
+  <title>Per-Statement Report</title>
 
   <para>
-   With the <option>-r</option> option, <application>pgbench</application> collects
-   the elapsed transaction time of each statement executed by every
-   client.  It then reports an average of those values, referred to
-   as the latency for each statement, after the benchmark has finished.
+   With the <option>-r</option> option, <application>pgbench</application>
+   collects the following statistics for each statement:
+   <itemizedlist>
+     <listitem>
+       <para>
+         <literal>latency</literal> &mdash; elapsed transaction time for each
+         statement. <application>pgbench</application> reports an average value
+         of all successful runs of the statement.
+       </para>
+     </listitem>
+     <listitem>
+       <para>
+         The number of errors in this statement. See
+         <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+         for more information.
+       </para>
+     </listitem>
+     <listitem>
+       <para>
+         The number of errors "in failed SQL transaction" in this statement. See
+         <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+         for more information.
+       </para>
+     </listitem>
+     <listitem>
+       <para>
+         The number of retries after a serialization or a deadlock failure in
+         this statement. See <xref linkend="errors-and-retries"
+         endterm="errors-and-retries-title"/> for more information.
+       </para>
+     </listitem>
+   </itemizedlist>
   </para>
 
   <para>
+   The report displays the columns with statistics on errors and retries only if
+   the current <application>pgbench</application> run has an error or retry,
+   respectively.
+  </para>
+
+   <para>
+   All values are computed for each statement executed by every client and are
+   reported after the benchmark has finished.
+   </para>
+
+  <para>
    For the default script, the output will look similar to this:
 <screen>
 starting vacuum...end.
@@ -1715,6 +1868,7 @@ number of clients: 10
 number of threads: 1
 number of transactions per client: 1000
 number of transactions actually processed: 10000/10000
+maximum number of tries: 1
 latency average = 15.844 ms
 latency stddev = 2.715 ms
 tps = 618.764555 (including connections establishing)
@@ -1732,10 +1886,50 @@ statement latencies in milliseconds:
         0.371  INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
         1.212  END;
 </screen>
+
+   Another example of output for the default script using serializable default
+   transaction isolation level (<command>PGOPTIONS='-c
+   default_transaction_isolation=serializable' pgbench ...</command>):
+<screen>
+starting vacuum...end.
+transaction type: &lt;builtin: TPC-B (sort of)&gt;
+scaling factor: 1
+query mode: simple
+number of clients: 10
+number of threads: 1
+number of transactions per client: 1000
+number of transactions actually processed: 5293/10000
+number of errors: 4707 (47.070%)
+number of retried: 7164 (71.640%)
+number of retries: 255928
+maximum number of tries: 100
+maximum time of tries: 100.0 ms
+latency average = 34.817 ms
+latency stddev = 37.347 ms
+tps = 71.083700 (including connections establishing)
+tps = 71.088507 (excluding connections establishing)
+statement latencies in milliseconds, errors and retries:
+  0.003     0       0  \set aid random(1, 100000 * :scale)
+  0.000     0       0  \set bid random(1, 1 * :scale)
+  0.000     0       0  \set tid random(1, 10 * :scale)
+  0.000     0       0  \set delta random(-5000, 5000)
+  0.186     0       0  BEGIN;
+  0.337     0       0  UPDATE pgbench_accounts
+                       SET abalance = abalance + :delta WHERE aid = :aid;
+  0.295     0       0  SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
+  0.349  4168  247084  UPDATE pgbench_tellers
+                       SET tbalance = tbalance + :delta WHERE tid = :tid;
+  0.277   539    8839  UPDATE pgbench_branches
+                       SET bbalance = bbalance + :delta WHERE bid = :bid;
+  0.264     0       0  INSERT INTO pgbench_history
+                              (tid, bid, aid, delta, mtime)
+                       VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
+  0.444     0       5  END;
+</screen>
   </para>
 
   <para>
-   If multiple script files are specified, the averages are reported
+   If multiple script files are specified, all statistics are reported
    separately for each script file.
   </para>
 
@@ -1749,6 +1943,78 @@ statement latencies in milliseconds:
   </para>
  </refsect2>
 
+ <refsect2 id="errors-and-retries">
+  <title id="errors-and-retries-title">Errors and Serialization/Deadlock Retries</title>
+
+  <para>
+   Client's run is aborted only in case of a serious error, for example, the
+   connection with the backend was lost. Otherwise if the execution of SQL or
+   meta command fails, the client's run continues normally until the end of the
+   current script execution (it is assumed that one transaction script contains
+   only one transaction; see <xref linkend="transactions-and-scripts"
+   endterm="transactions-and-scripts-title"/> for more information).
+   Transactions with serialization or deadlock failures are rolled back and
+   repeated until they complete successfully or reach the maximum number of
+   tries (specified by the <option>--max-tries</option> option) / the maximum
+   time of tries (specified by the <option>--max-tries-time</option> option). If
+   the last transaction run fails, this transaction will be reported as failed,
+   and the client variables will be set as they were before the first run of
+   this transaction.
+  </para>
+
+  <note>
+   <para>
+    Be careful when repeating scripts that contain multiple transactions: the
+    script is always retried completely, so the successful transactions can be
+    performed several times.
+   </para>
+   <para>
+    Be careful when repeating transactions with shell commands. Unlike the
+    results of SQL commands, the results of shell commands are not rolled back,
+    except for the variable value of the <command>\setshell</command> command.
+   </para>
+   <para>
+    If a failed transaction block does not terminate in the current script, the
+    commands of the following scripts are processed as usual so you can get a
+    lot of errors of type "in failed SQL transaction" (when the current SQL
+    transaction is aborted and commands ignored until end of transaction block).
+    In such cases you can use separate statistics of these errors in all
+    reports.
+   </para>
+  </note>
+
+  <para>
+   The latency of a successful transaction includes the entire time of
+   transaction execution with rollbacks and retries. The latency for failed
+   transactions and commands is not computed separately.
+  </para>
+
+  <para>
+   The main report contains the number of failed transactions if it is non-zero.
+   If the total number of transactions ended with an error "in failed SQL
+   transaction block" is non-zero, the main report also contains it. If the
+   total number of retried transactions is non-zero, the main report also
+   contains the statistics related to retries: the total number of retried
+   transactions and total number of retries (use the options
+   <option>--max-tries</option> and/or <option>--max-tries-time</option> to make
+   it possible). The per-statement report inherits all columns from the main
+   report. Note that if a failure/error occurs, the following failures/errors in
+   the current script execution are not shown in the reports. The retry is only
+   reported for the first command where the failure occured during the current
+   script execution.
+  </para>
+
+  <para>
+   If you want to distinguish between failures or errors by type (including
+   which limit for retries was violated and how far it was exceeded for the
+   serialization/deadlock errors), use the <application>pgbench</application>
+   debugging output created with the option <option>--debug</option> and with
+   the debugging level <literal>fails</literal> or <literal>all</literal>. The
+   first variant is recommended for this purpose because with in the second case
+   the debugging output can be very large.
+  </para>
+ </refsect2>
+
  <refsect2>
   <title>Good Practices</title>
 
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index fd18568..d35cc1d 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -59,6 +59,9 @@
 
 #include "pgbench.h"
 
+#define ERRCODE_IN_FAILED_SQL_TRANSACTION  "25P02"
+#define ERRCODE_T_R_SERIALIZATION_FAILURE  "40001"
+#define ERRCODE_T_R_DEADLOCK_DETECTED  "40P01"
 #define ERRCODE_UNDEFINED_TABLE  "42P01"
 
 /*
@@ -186,9 +189,26 @@ bool		progress_timestamp = false; /* progress report with Unix time */
 int			nclients = 1;		/* number of clients */
 int			nthreads = 1;		/* number of threads */
 bool		is_connect;			/* establish connection for each transaction */
-bool		is_latencies;		/* report per-command latencies */
+bool		report_per_command = false;	/* report per-command latencies, retries
+										 * after the failures and errors
+										 * (failures without retrying) */
 int			main_pid;			/* main process id used in log filename */
 
+/*
+ * There're different types of restrictions for deciding that the current failed
+ * transaction can no longer be retried and should be reported as failed. They
+ * can be combined together, and you need to use at least one of them to retry
+ * the failed transactions. By default, failed transactions are not retried at
+ * all.
+ */
+uint32		max_tries = 0;		/* we cannot retry a failed transaction if its
+								 * number of tries reaches this maximum; if its
+								 * value is zero, it is not used */
+uint64		max_tries_time = 0;	/* we cannot retry a failed transaction if we
+								 * spent more time on it than indicated in this
+								 * limit (in usec); if its value is zero, it is
+								 * not used */
+
 char	   *pghost = "";
 char	   *pgport = "";
 char	   *login = NULL;
@@ -242,14 +262,73 @@ typedef struct SimpleStats
 typedef struct StatsData
 {
 	time_t		start_time;		/* interval start time, for aggregates */
-	int64		cnt;			/* number of transactions, including skipped */
+	int64		cnt;			/* number of sucessfull transactions, including
+								 * skipped */
 	int64		skipped;		/* number of transactions skipped under --rate
 								 * and --latency-limit */
+	int64		retries;
+	int64		retried;		/* number of transactions that were retried
+								 * after a serialization or a deadlock
+								 * failure */
+	int64		errors;			/* number of transactions that were not retried
+								 * after a serialization or a deadlock
+								 * failure or had another error (including meta
+								 * commands errors) */
+	int64		errors_in_failed_tx;	/* number of transactions that failed in
+										 * a error
+										 * ERRCODE_IN_FAILED_SQL_TRANSACTION */
 	SimpleStats latency;
 	SimpleStats lag;
 } StatsData;
 
 /*
+ * Data structure for client variables.
+ */
+typedef struct Variables
+{
+	Variable   *array;			/* array of variable definitions */
+	int			nvariables;		/* number of variables */
+	bool		vars_sorted;	/* are variables sorted by name? */
+} Variables;
+
+/*
+ * Data structure for thread/client random seed.
+ */
+typedef struct RandomState
+{
+	unsigned short data[3];
+} RandomState;
+
+/*
+ * Data structure for repeating a transaction from the beginnning with the same
+ * parameters.
+ */
+typedef struct RetryState
+{
+	RandomState random_state;	/* random seed */
+	Variables   variables;		/* client variables */
+} RetryState;
+
+/*
+ * For the failures during script execution.
+ */
+typedef enum FailureStatus
+{
+	NO_FAILURE = 0,
+	SERIALIZATION_FAILURE,
+	DEADLOCK_FAILURE,
+	IN_FAILED_SQL_TRANSACTION,
+	ANOTHER_FAILURE
+} FailureStatus;
+
+typedef struct Failure
+{
+	FailureStatus status;		/* type of the failure */
+	int			command;		/* command number in script where the failure
+								 * occurred */
+} Failure;
+
+/*
  * Connection state machine states.
  */
 typedef enum
@@ -304,6 +383,22 @@ typedef enum
 	CSTATE_END_COMMAND,
 
 	/*
+	 * States for transactions with serialization or deadlock failures.
+	 *
+	 * First, remember the failure in CSTATE_FAILURE. Then process other
+	 * commands of the failed transaction if any and go to CSTATE_RETRY. If we
+	 * can re-execute the transaction from the very beginning, report this as a
+	 * failure, set the same parameters for the transaction execution as in the
+	 * previous tries and process the first transaction command in
+	 * CSTATE_START_COMMAND. Otherwise, report this as an error, set the
+	 * parameters for the transaction execution as they were before the first
+	 * run of this transaction (except for a random state) and go to
+	 * CSTATE_END_TX to complete this transaction.
+	 */
+	CSTATE_FAILURE,
+	CSTATE_RETRY,
+
+	/*
 	 * CSTATE_END_TX performs end-of-transaction processing.  Calculates
 	 * latency, and logs the transaction.  In --connect mode, closes the
 	 * current connection.  Chooses the next script to execute and starts over
@@ -329,14 +424,13 @@ typedef struct
 	int			id;				/* client No. */
 	ConnectionStateEnum state;	/* state machine's current state. */
 	ConditionalStack cstack;	/* enclosing conditionals state */
+	RandomState random_state;	/* separate randomness for each client */
 
 	int			use_file;		/* index in sql_script for this client */
 	int			command;		/* command number in script */
 
 	/* client variables */
-	Variable   *variables;		/* array of variable definitions */
-	int			nvariables;		/* number of variables */
-	bool		vars_sorted;	/* are variables sorted by name? */
+	Variables   variables;
 
 	/* various times about current transaction */
 	int64		txn_scheduled;	/* scheduled start time of transaction (usec) */
@@ -346,6 +440,18 @@ typedef struct
 
 	bool		prepared[MAX_SCRIPTS];	/* whether client prepared the script */
 
+	/*
+	 * For processing errors and repeating transactions with serialization or
+	 * deadlock failures:
+	 */
+	Failure		first_failure;	/* status and command number of the first
+								 * failure in the current transaction execution;
+								 * status NO_FAILURE if there were no failures
+								 * or errors */
+	RetryState  retry_state;
+	uint32			retries;	/* how many times have we already retried the
+								 * current transaction? */
+
 	/* per client collected stats */
 	int64		cnt;			/* client transaction count, for -t */
 	int			ecnt;			/* error count */
@@ -389,7 +495,7 @@ typedef struct
 	pthread_t	thread;			/* thread handle */
 	CState	   *state;			/* array of CState */
 	int			nstate;			/* length of state[] */
-	unsigned short random_state[3]; /* separate randomness for each thread */
+	RandomState random_state; 	/* separate randomness for each thread */
 	int64		throttle_trigger;	/* previous/next throttling (us) */
 	FILE	   *logfile;		/* where to log, or NULL */
 	ZipfCache	zipf_cache;		/* for thread-safe  zipfian random number
@@ -445,6 +551,10 @@ typedef struct
 	char	   *argv[MAX_ARGS]; /* command word list */
 	PgBenchExpr *expr;			/* parsed expression, if needed */
 	SimpleStats stats;			/* time spent in this command */
+	int64		retries;
+	int64		errors;			/* number of failures that were not retried */
+	int64		errors_in_failed_tx;	/* number of errors
+										 * ERRCODE_IN_FAILED_SQL_TRANSACTION */
 } Command;
 
 typedef struct ParsedScript
@@ -460,7 +570,18 @@ static int	num_scripts;		/* number of scripts in sql_script[] */
 static int	num_commands = 0;	/* total number of Command structs */
 static int64 total_weight = 0;
 
-static int	debug = 0;			/* debug flag */
+typedef enum Debuglevel
+{
+	NO_DEBUG = 0,				/* no debugging output (except PGBENCH_DEBUG) */
+	DEBUG_FAILS,				/* print only failure messages, errors and
+								 * retries */
+	DEBUG_ALL,					/* print all debugging output (throttling,
+								 * executed/sent/received commands etc.) */
+	NUM_DEBUGLEVEL
+} Debuglevel;
+
+static Debuglevel debug_level = NO_DEBUG;	/* debug flag */
+static const char *DEBUGLEVEl[] = {"no", "fails", "all"};
 
 /* Builtin test scripts */
 typedef struct BuiltinScript
@@ -572,7 +693,7 @@ usage(void)
 		   "                           protocol for submitting queries (default: simple)\n"
 		   "  -n, --no-vacuum          do not run VACUUM before tests\n"
 		   "  -P, --progress=NUM       show thread progress report every NUM seconds\n"
-		   "  -r, --report-latencies   report average latency per command\n"
+		   "  -r, --report-per-command report latencies, errors and retries per command\n"
 		   "  -R, --rate=NUM           target rate in transactions per second\n"
 		   "  -s, --scale=NUM          report this scale factor in output\n"
 		   "  -t, --transactions=NUM   number of transactions each client runs (default: 10)\n"
@@ -581,11 +702,13 @@ usage(void)
 		   "  --aggregate-interval=NUM aggregate data over NUM seconds\n"
 		   "  --log-prefix=PREFIX      prefix for transaction time log file\n"
 		   "                           (default: \"pgbench_log\")\n"
+		   "  --max-tries=NUM          max number of tries to run transaction\n"
+		   "  --max-tries-time=NUM     max time (in ms) of tries to run transaction\n"
 		   "  --progress-timestamp     use Unix epoch timestamps for progress\n"
 		   "  --random-seed=SEED       set random seed (\"time\", \"rand\", integer)\n"
 		   "  --sampling-rate=NUM      fraction of transactions to log (e.g., 0.01 for 1%%)\n"
 		   "\nCommon options:\n"
-		   "  -d, --debug              print debugging output\n"
+		   "  -d, --debug=no|fails|all print debugging output (default: no)\n"
 		   "  -h, --host=HOSTNAME      database server host or socket directory\n"
 		   "  -p, --port=PORT          database server port number\n"
 		   "  -U, --username=USERNAME  connect as specified database user\n"
@@ -693,7 +816,7 @@ gotdigits:
 
 /* random number generator: uniform distribution from min to max inclusive */
 static int64
-getrand(TState *thread, int64 min, int64 max)
+getrand(RandomState *random_state, int64 min, int64 max)
 {
 	/*
 	 * Odd coding is so that min and max have approximately the same chance of
@@ -704,7 +827,7 @@ getrand(TState *thread, int64 min, int64 max)
 	 * protected by a mutex, and therefore a bottleneck on machines with many
 	 * CPUs.
 	 */
-	return min + (int64) ((max - min + 1) * pg_erand48(thread->random_state));
+	return min + (int64) ((max - min + 1) * pg_erand48(random_state->data));
 }
 
 /*
@@ -713,7 +836,8 @@ getrand(TState *thread, int64 min, int64 max)
  * value is exp(-parameter).
  */
 static int64
-getExponentialRand(TState *thread, int64 min, int64 max, double parameter)
+getExponentialRand(RandomState *random_state, int64 min, int64 max,
+				   double parameter)
 {
 	double		cut,
 				uniform,
@@ -723,7 +847,7 @@ getExponentialRand(TState *thread, int64 min, int64 max, double parameter)
 	Assert(parameter > 0.0);
 	cut = exp(-parameter);
 	/* erand in [0, 1), uniform in (0, 1] */
-	uniform = 1.0 - pg_erand48(thread->random_state);
+	uniform = 1.0 - pg_erand48(random_state->data);
 
 	/*
 	 * inner expression in (cut, 1] (if parameter > 0), rand in [0, 1)
@@ -736,7 +860,8 @@ getExponentialRand(TState *thread, int64 min, int64 max, double parameter)
 
 /* random number generator: gaussian distribution from min to max inclusive */
 static int64
-getGaussianRand(TState *thread, int64 min, int64 max, double parameter)
+getGaussianRand(RandomState *random_state, int64 min, int64 max,
+				double parameter)
 {
 	double		stdev;
 	double		rand;
@@ -764,8 +889,8 @@ getGaussianRand(TState *thread, int64 min, int64 max, double parameter)
 		 * are expected in (0, 1] (see
 		 * http://en.wikipedia.org/wiki/Box_muller)
 		 */
-		double		rand1 = 1.0 - pg_erand48(thread->random_state);
-		double		rand2 = 1.0 - pg_erand48(thread->random_state);
+		double		rand1 = 1.0 - pg_erand48(random_state->data);
+		double		rand2 = 1.0 - pg_erand48(random_state->data);
 
 		/* Box-Muller basic form transform */
 		double		var_sqrt = sqrt(-2.0 * log(rand1));
@@ -792,7 +917,7 @@ getGaussianRand(TState *thread, int64 min, int64 max, double parameter)
  * will approximate a Poisson distribution centered on the given value.
  */
 static int64
-getPoissonRand(TState *thread, int64 center)
+getPoissonRand(RandomState *random_state, int64 center)
 {
 	/*
 	 * Use inverse transform sampling to generate a value > 0, such that the
@@ -801,7 +926,7 @@ getPoissonRand(TState *thread, int64 center)
 	double		uniform;
 
 	/* erand in [0, 1), uniform in (0, 1] */
-	uniform = 1.0 - pg_erand48(thread->random_state);
+	uniform = 1.0 - pg_erand48(random_state->data);
 
 	return (int64) (-log(uniform) * ((double) center) + 0.5);
 }
@@ -879,7 +1004,7 @@ zipfFindOrCreateCacheCell(ZipfCache * cache, int64 n, double s)
  * Luc Devroye, p. 550-551, Springer 1986.
  */
 static int64
-computeIterativeZipfian(TState *thread, int64 n, double s)
+computeIterativeZipfian(RandomState *random_state, int64 n, double s)
 {
 	double		b = pow(2.0, s - 1.0);
 	double		x,
@@ -890,8 +1015,8 @@ computeIterativeZipfian(TState *thread, int64 n, double s)
 	while (true)
 	{
 		/* random variates */
-		u = pg_erand48(thread->random_state);
-		v = pg_erand48(thread->random_state);
+		u = pg_erand48(random_state->data);
+		v = pg_erand48(random_state->data);
 
 		x = floor(pow(u, -1.0 / (s - 1.0)));
 
@@ -909,10 +1034,11 @@ computeIterativeZipfian(TState *thread, int64 n, double s)
  * Jim Gray et al, SIGMOD 1994
  */
 static int64
-computeHarmonicZipfian(TState *thread, int64 n, double s)
+computeHarmonicZipfian(TState *thread, RandomState *random_state, int64 n,
+					   double s)
 {
 	ZipfCell   *cell = zipfFindOrCreateCacheCell(&thread->zipf_cache, n, s);
-	double		uniform = pg_erand48(thread->random_state);
+	double		uniform = pg_erand48(random_state->data);
 	double		uz = uniform * cell->harmonicn;
 
 	if (uz < 1.0)
@@ -924,7 +1050,8 @@ computeHarmonicZipfian(TState *thread, int64 n, double s)
 
 /* random number generator: zipfian distribution from min to max inclusive */
 static int64
-getZipfianRand(TState *thread, int64 min, int64 max, double s)
+getZipfianRand(TState *thread, RandomState *random_state, int64 min,
+			   int64 max, double s)
 {
 	int64		n = max - min + 1;
 
@@ -933,8 +1060,8 @@ getZipfianRand(TState *thread, int64 min, int64 max, double s)
 
 
 	return min - 1 + ((s > 1)
-					  ? computeIterativeZipfian(thread, n, s)
-					  : computeHarmonicZipfian(thread, n, s));
+					? computeIterativeZipfian(random_state, n, s)
+					: computeHarmonicZipfian(thread, random_state, n, s));
 }
 
 /*
@@ -1034,6 +1161,10 @@ initStats(StatsData *sd, time_t start_time)
 	sd->start_time = start_time;
 	sd->cnt = 0;
 	sd->skipped = 0;
+	sd->retries = 0;
+	sd->retried = 0;
+	sd->errors = 0;
+	sd->errors_in_failed_tx = 0;
 	initSimpleStats(&sd->latency);
 	initSimpleStats(&sd->lag);
 }
@@ -1042,8 +1173,30 @@ initStats(StatsData *sd, time_t start_time)
  * Accumulate one additional item into the given stats object.
  */
 static void
-accumStats(StatsData *stats, bool skipped, double lat, double lag)
+accumStats(StatsData *stats, bool skipped, double lat, double lag,
+		   FailureStatus first_error, int64 retries)
 {
+	/*
+	 * Record the number of retries regardless of whether the transaction was
+	 * successful or failed.
+	 */
+	stats->retries += retries;
+	if (retries > 0)
+		stats->retried++;
+
+	/* Record the failed transaction */
+	if (first_error != NO_FAILURE)
+	{
+		stats->errors++;
+
+		if (first_error == IN_FAILED_SQL_TRANSACTION)
+			stats->errors_in_failed_tx++;
+
+		return;
+	}
+
+	/* Record the successful transaction */
+
 	stats->cnt++;
 
 	if (skipped)
@@ -1184,39 +1337,39 @@ compareVariableNames(const void *v1, const void *v2)
 
 /* Locate a variable by name; returns NULL if unknown */
 static Variable *
-lookupVariable(CState *st, char *name)
+lookupVariable(Variables *variables, char *name)
 {
 	Variable	key;
 
 	/* On some versions of Solaris, bsearch of zero items dumps core */
-	if (st->nvariables <= 0)
+	if (variables->nvariables <= 0)
 		return NULL;
 
 	/* Sort if we have to */
-	if (!st->vars_sorted)
+	if (!variables->vars_sorted)
 	{
-		qsort((void *) st->variables, st->nvariables, sizeof(Variable),
-			  compareVariableNames);
-		st->vars_sorted = true;
+		qsort((void *) variables->array, variables->nvariables,
+			  sizeof(Variable), compareVariableNames);
+		variables->vars_sorted = true;
 	}
 
 	/* Now we can search */
 	key.name = name;
 	return (Variable *) bsearch((void *) &key,
-								(void *) st->variables,
-								st->nvariables,
+								(void *) variables->array,
+								variables->nvariables,
 								sizeof(Variable),
 								compareVariableNames);
 }
 
 /* Get the value of a variable, in string form; returns NULL if unknown */
 static char *
-getVariable(CState *st, char *name)
+getVariable(Variables *variables, char *name)
 {
 	Variable   *var;
 	char		stringform[64];
 
-	var = lookupVariable(st, name);
+	var = lookupVariable(variables, name);
 	if (var == NULL)
 		return NULL;			/* not found */
 
@@ -1290,9 +1443,12 @@ makeVariableValue(Variable *var)
 
 		if (sscanf(var->svalue, "%lf%c", &dv, &xs) != 1)
 		{
-			fprintf(stderr,
-					"malformed variable \"%s\" value: \"%s\"\n",
-					var->name, var->svalue);
+			if (debug_level >= DEBUG_FAILS)
+			{
+				fprintf(stderr,
+						"malformed variable \"%s\" value: \"%s\"\n",
+						var->name, var->svalue);
+			}
 			return false;
 		}
 		setDoubleValue(&var->value, dv);
@@ -1340,11 +1496,12 @@ valid_variable_name(const char *name)
  * Returns NULL on failure (bad name).
  */
 static Variable *
-lookupCreateVariable(CState *st, const char *context, char *name)
+lookupCreateVariable(Variables *variables, const char *context, char *name,
+					 bool aborted)
 {
 	Variable   *var;
 
-	var = lookupVariable(st, name);
+	var = lookupVariable(variables, name);
 	if (var == NULL)
 	{
 		Variable   *newvars;
@@ -1355,29 +1512,32 @@ lookupCreateVariable(CState *st, const char *context, char *name)
 		 */
 		if (!valid_variable_name(name))
 		{
-			fprintf(stderr, "%s: invalid variable name: \"%s\"\n",
-					context, name);
+			if (aborted || debug_level >= DEBUG_FAILS)
+			{
+				fprintf(stderr, "%s: invalid variable name: \"%s\"\n",
+						context, name);
+			}
 			return NULL;
 		}
 
 		/* Create variable at the end of the array */
-		if (st->variables)
-			newvars = (Variable *) pg_realloc(st->variables,
-											  (st->nvariables + 1) * sizeof(Variable));
+		if (variables->array)
+			newvars = (Variable *) pg_realloc(variables->array,
+								(variables->nvariables + 1) * sizeof(Variable));
 		else
 			newvars = (Variable *) pg_malloc(sizeof(Variable));
 
-		st->variables = newvars;
+		variables->array = newvars;
 
-		var = &newvars[st->nvariables];
+		var = &newvars[variables->nvariables];
 
 		var->name = pg_strdup(name);
 		var->svalue = NULL;
 		/* caller is expected to initialize remaining fields */
 
-		st->nvariables++;
+		variables->nvariables++;
 		/* we don't re-sort the array till we have to */
-		st->vars_sorted = false;
+		variables->vars_sorted = false;
 	}
 
 	return var;
@@ -1386,12 +1546,13 @@ lookupCreateVariable(CState *st, const char *context, char *name)
 /* Assign a string value to a variable, creating it if need be */
 /* Returns false on failure (bad name) */
 static bool
-putVariable(CState *st, const char *context, char *name, const char *value)
+putVariable(Variables *variables, const char *context, char *name,
+			const char *value)
 {
 	Variable   *var;
 	char	   *val;
 
-	var = lookupCreateVariable(st, context, name);
+	var = lookupCreateVariable(variables, context, name, true);
 	if (!var)
 		return false;
 
@@ -1409,12 +1570,12 @@ putVariable(CState *st, const char *context, char *name, const char *value)
 /* Assign a value to a variable, creating it if need be */
 /* Returns false on failure (bad name) */
 static bool
-putVariableValue(CState *st, const char *context, char *name,
-				  const PgBenchValue *value)
+putVariableValue(Variables *variables, const char *context, char *name,
+				  const PgBenchValue *value, bool aborted)
 {
 	Variable   *var;
 
-	var = lookupCreateVariable(st, context, name);
+	var = lookupCreateVariable(variables, context, name, aborted);
 	if (!var)
 		return false;
 
@@ -1429,12 +1590,13 @@ putVariableValue(CState *st, const char *context, char *name,
 /* Assign an integer value to a variable, creating it if need be */
 /* Returns false on failure (bad name) */
 static bool
-putVariableInt(CState *st, const char *context, char *name, int64 value)
+putVariableInt(Variables *variables, const char *context, char *name,
+			   int64 value, bool aborted)
 {
 	PgBenchValue val;
 
 	setIntValue(&val, value);
-	return putVariableValue(st, context, name, &val);
+	return putVariableValue(variables, context, name, &val, aborted);
 }
 
 /*
@@ -1489,7 +1651,7 @@ replaceVariable(char **sql, char *param, int len, char *value)
 }
 
 static char *
-assignVariables(CState *st, char *sql)
+assignVariables(Variables *variables, char *sql)
 {
 	char	   *p,
 			   *name,
@@ -1510,7 +1672,7 @@ assignVariables(CState *st, char *sql)
 			continue;
 		}
 
-		val = getVariable(st, name);
+		val = getVariable(variables, name);
 		free(name);
 		if (val == NULL)
 		{
@@ -1525,12 +1687,13 @@ assignVariables(CState *st, char *sql)
 }
 
 static void
-getQueryParams(CState *st, const Command *command, const char **params)
+getQueryParams(Variables *variables, const Command *command,
+			   const char **params)
 {
 	int			i;
 
 	for (i = 0; i < command->argc - 1; i++)
-		params[i] = getVariable(st, command->argv[i + 1]);
+		params[i] = getVariable(variables, command->argv[i + 1]);
 }
 
 static char *
@@ -1565,7 +1728,11 @@ coerceToBool(PgBenchValue *pval, bool *bval)
 	}
 	else /* NULL, INT or DOUBLE */
 	{
-		fprintf(stderr, "cannot coerce %s to boolean\n", valueTypeName(pval));
+		if (debug_level >= DEBUG_FAILS)
+		{
+			fprintf(stderr, "cannot coerce %s to boolean\n",
+					valueTypeName(pval));
+		}
 		*bval = false;			/* suppress uninitialized-variable warnings */
 		return false;
 	}
@@ -1610,7 +1777,8 @@ coerceToInt(PgBenchValue *pval, int64 *ival)
 
 		if (dval < PG_INT64_MIN || PG_INT64_MAX < dval)
 		{
-			fprintf(stderr, "double to int overflow for %f\n", dval);
+			if (debug_level >= DEBUG_FAILS)
+				fprintf(stderr, "double to int overflow for %f\n", dval);
 			return false;
 		}
 		*ival = (int64) dval;
@@ -1618,7 +1786,8 @@ coerceToInt(PgBenchValue *pval, int64 *ival)
 	}
 	else /* BOOLEAN or NULL */
 	{
-		fprintf(stderr, "cannot coerce %s to int\n", valueTypeName(pval));
+		if (debug_level >= DEBUG_FAILS)
+			fprintf(stderr, "cannot coerce %s to int\n", valueTypeName(pval));
 		return false;
 	}
 }
@@ -1639,7 +1808,9 @@ coerceToDouble(PgBenchValue *pval, double *dval)
 	}
 	else /* BOOLEAN or NULL */
 	{
-		fprintf(stderr, "cannot coerce %s to double\n", valueTypeName(pval));
+		if (debug_level >= DEBUG_FAILS)
+			fprintf(stderr, "cannot coerce %s to double\n",
+					valueTypeName(pval));
 		return false;
 	}
 }
@@ -1817,8 +1988,11 @@ evalStandardFunc(TState *thread, CState *st,
 
 	if (l != NULL)
 	{
-		fprintf(stderr,
-				"too many function arguments, maximum is %d\n", MAX_FARGS);
+		if (debug_level >= DEBUG_FAILS)
+		{
+			fprintf(stderr,
+					"too many function arguments, maximum is %d\n", MAX_FARGS);
+		}
 		return false;
 	}
 
@@ -1941,7 +2115,8 @@ evalStandardFunc(TState *thread, CState *st,
 						case PGBENCH_MOD:
 							if (ri == 0)
 							{
-								fprintf(stderr, "division by zero\n");
+								if (debug_level >= DEBUG_FAILS)
+									fprintf(stderr, "division by zero\n");
 								return false;
 							}
 							/* special handling of -1 divisor */
@@ -1952,7 +2127,11 @@ evalStandardFunc(TState *thread, CState *st,
 									/* overflow check (needed for INT64_MIN) */
 									if (li == PG_INT64_MIN)
 									{
-										fprintf(stderr, "bigint out of range\n");
+										if (debug_level >= DEBUG_FAILS)
+										{
+											fprintf(stderr,
+													"bigint out of range\n");
+										}
 										return false;
 									}
 									else
@@ -2187,20 +2366,22 @@ evalStandardFunc(TState *thread, CState *st,
 				/* check random range */
 				if (imin > imax)
 				{
-					fprintf(stderr, "empty range given to random\n");
+					if (debug_level >= DEBUG_FAILS)
+						fprintf(stderr, "empty range given to random\n");
 					return false;
 				}
 				else if (imax - imin < 0 || (imax - imin) + 1 < 0)
 				{
 					/* prevent int overflows in random functions */
-					fprintf(stderr, "random range is too large\n");
+					if (debug_level >= DEBUG_FAILS)
+						fprintf(stderr, "random range is too large\n");
 					return false;
 				}
 
 				if (func == PGBENCH_RANDOM)
 				{
 					Assert(nargs == 2);
-					setIntValue(retval, getrand(thread, imin, imax));
+					setIntValue(retval, getrand(&st->random_state, imin, imax));
 				}
 				else			/* gaussian & exponential */
 				{
@@ -2215,39 +2396,51 @@ evalStandardFunc(TState *thread, CState *st,
 					{
 						if (param < MIN_GAUSSIAN_PARAM)
 						{
-							fprintf(stderr,
-									"gaussian parameter must be at least %f "
-									"(not %f)\n", MIN_GAUSSIAN_PARAM, param);
+							if (debug_level >= DEBUG_FAILS)
+							{
+								fprintf(stderr,
+										"gaussian parameter must be at least %f (not %f)\n",
+										MIN_GAUSSIAN_PARAM, param);
+							}
 							return false;
 						}
 
 						setIntValue(retval,
-									getGaussianRand(thread, imin, imax, param));
+									getGaussianRand(&st->random_state, imin,
+													imax, param));
 					}
 					else if (func == PGBENCH_RANDOM_ZIPFIAN)
 					{
 						if (param <= 0.0 || param == 1.0 || param > MAX_ZIPFIAN_PARAM)
 						{
-							fprintf(stderr,
-									"zipfian parameter must be in range (0, 1) U (1, %d]"
-									" (got %f)\n", MAX_ZIPFIAN_PARAM, param);
+							if (debug_level >= DEBUG_FAILS)
+							{
+								fprintf(stderr,
+										"zipfian parameter must be in range (0, 1) U (1, %d] (got %f)\n",
+										MAX_ZIPFIAN_PARAM, param);
+							}
 							return false;
 						}
 						setIntValue(retval,
-									getZipfianRand(thread, imin, imax, param));
+									getZipfianRand(thread, &st->random_state,
+												   imin, imax, param));
 					}
 					else		/* exponential */
 					{
 						if (param <= 0.0)
 						{
-							fprintf(stderr,
-									"exponential parameter must be greater than zero"
-									" (got %f)\n", param);
+							if (debug_level >= DEBUG_FAILS)
+							{
+								fprintf(stderr,
+										"exponential parameter must be greater than zero (got %f)\n",
+										param);
+							}
 							return false;
 						}
 
 						setIntValue(retval,
-									getExponentialRand(thread, imin, imax, param));
+									getExponentialRand(&st->random_state, imin,
+													   imax, param));
 					}
 				}
 
@@ -2346,10 +2539,13 @@ evaluateExpr(TState *thread, CState *st, PgBenchExpr *expr, PgBenchValue *retval
 			{
 				Variable   *var;
 
-				if ((var = lookupVariable(st, expr->u.variable.varname)) == NULL)
+				if ((var = lookupVariable(&st->variables, expr->u.variable.varname)) == NULL)
 				{
-					fprintf(stderr, "undefined variable \"%s\"\n",
-							expr->u.variable.varname);
+					if (debug_level >= DEBUG_FAILS)
+					{
+						fprintf(stderr, "undefined variable \"%s\"\n",
+								expr->u.variable.varname);
+					}
 					return false;
 				}
 
@@ -2410,7 +2606,7 @@ getMetaCommand(const char *cmd)
  * Return true if succeeded, or false on error.
  */
 static bool
-runShellCommand(CState *st, char *variable, char **argv, int argc)
+runShellCommand(Variables *variables, char *variable, char **argv, int argc)
 {
 	char		command[SHELL_COMMAND_SIZE];
 	int			i,
@@ -2441,17 +2637,21 @@ runShellCommand(CState *st, char *variable, char **argv, int argc)
 		{
 			arg = argv[i] + 1;	/* a string literal starting with colons */
 		}
-		else if ((arg = getVariable(st, argv[i] + 1)) == NULL)
+		else if ((arg = getVariable(variables, argv[i] + 1)) == NULL)
 		{
-			fprintf(stderr, "%s: undefined variable \"%s\"\n",
-					argv[0], argv[i]);
+			if (debug_level >= DEBUG_FAILS)
+			{
+				fprintf(stderr, "%s: undefined variable \"%s\"\n",
+						argv[0], argv[i]);
+			}
 			return false;
 		}
 
 		arglen = strlen(arg);
 		if (len + arglen + (i > 0 ? 1 : 0) >= SHELL_COMMAND_SIZE - 1)
 		{
-			fprintf(stderr, "%s: shell command is too long\n", argv[0]);
+			if (debug_level >= DEBUG_FAILS)
+				fprintf(stderr, "%s: shell command is too long\n", argv[0]);
 			return false;
 		}
 
@@ -2468,7 +2668,7 @@ runShellCommand(CState *st, char *variable, char **argv, int argc)
 	{
 		if (system(command))
 		{
-			if (!timer_exceeded)
+			if (!timer_exceeded && debug_level >= DEBUG_FAILS)
 				fprintf(stderr, "%s: could not launch shell command\n", argv[0]);
 			return false;
 		}
@@ -2478,19 +2678,21 @@ runShellCommand(CState *st, char *variable, char **argv, int argc)
 	/* Execute the command with pipe and read the standard output. */
 	if ((fp = popen(command, "r")) == NULL)
 	{
-		fprintf(stderr, "%s: could not launch shell command\n", argv[0]);
+		if (debug_level >= DEBUG_FAILS)
+			fprintf(stderr, "%s: could not launch shell command\n", argv[0]);
 		return false;
 	}
 	if (fgets(res, sizeof(res), fp) == NULL)
 	{
-		if (!timer_exceeded)
+		if (!timer_exceeded && debug_level >= DEBUG_FAILS)
 			fprintf(stderr, "%s: could not read result of shell command\n", argv[0]);
 		(void) pclose(fp);
 		return false;
 	}
 	if (pclose(fp) < 0)
 	{
-		fprintf(stderr, "%s: could not close shell command\n", argv[0]);
+		if (debug_level >= DEBUG_FAILS)
+			fprintf(stderr, "%s: could not close shell command\n", argv[0]);
 		return false;
 	}
 
@@ -2500,11 +2702,14 @@ runShellCommand(CState *st, char *variable, char **argv, int argc)
 		endptr++;
 	if (*res == '\0' || *endptr != '\0')
 	{
-		fprintf(stderr, "%s: shell command must return an integer (not \"%s\")\n",
-				argv[0], res);
+		if (debug_level >= DEBUG_FAILS)
+		{
+			fprintf(stderr, "%s: shell command must return an integer (not \"%s\")\n",
+					argv[0], res);
+		}
 		return false;
 	}
-	if (!putVariableInt(st, "setshell", variable, retval))
+	if (!putVariableInt(variables, "setshell", variable, retval, false))
 		return false;
 
 #ifdef DEBUG
@@ -2521,11 +2726,45 @@ preparedStatementName(char *buffer, int file, int state)
 }
 
 static void
-commandFailed(CState *st, const char *cmd, const char *message)
+commandFailed(CState *st, const char *cmd, const char *message, bool aborted)
 {
-	fprintf(stderr,
-			"client %d aborted in command %d (%s) of script %d; %s\n",
-			st->id, st->command, cmd, st->use_file, message);
+	/*
+	 * Always print an error message if the client is aborted...
+	 */
+	if (aborted)
+	{
+		fprintf(stderr,
+				"client %d aborted in command %d (%s) of script %d; %s\n",
+				st->id, st->command, cmd, st->use_file, message);
+		return;
+	}
+
+	/*
+	 * ... otherwise print an error message only if there's at least the
+	 * debugging mode for fails.
+	 */
+	if (debug_level < DEBUG_FAILS)
+		return;
+
+	if (st->first_failure.status == NO_FAILURE)
+	{
+		/*
+		 * This is the first failure during the execution of the current script.
+		 */
+		fprintf(stderr,
+				"client %d got a failure in command %d (%s) of script %d; %s\n",
+				st->id, st->command, cmd, st->use_file, message);
+	}
+	else
+	{
+		/*
+		 * This is not the first failure during the execution of the current
+		 * script.
+		 */
+		fprintf(stderr,
+				"client %d continues a failed transaction in command %d (%s) of script %d; %s\n",
+				st->id, st->command, cmd, st->use_file, message);
+	}
 }
 
 /* return a script number with a weighted choice. */
@@ -2538,7 +2777,7 @@ chooseScript(TState *thread)
 	if (num_scripts == 1)
 		return 0;
 
-	w = getrand(thread, 0, total_weight - 1);
+	w = getrand(&thread->random_state, 0, total_weight - 1);
 	do
 	{
 		w -= sql_script[i++].weight;
@@ -2558,9 +2797,9 @@ sendCommand(CState *st, Command *command)
 		char	   *sql;
 
 		sql = pg_strdup(command->argv[0]);
-		sql = assignVariables(st, sql);
+		sql = assignVariables(&st->variables, sql);
 
-		if (debug)
+		if (debug_level >= DEBUG_ALL)
 			fprintf(stderr, "client %d sending %s\n", st->id, sql);
 		r = PQsendQuery(st->con, sql);
 		free(sql);
@@ -2570,9 +2809,9 @@ sendCommand(CState *st, Command *command)
 		const char *sql = command->argv[0];
 		const char *params[MAX_ARGS];
 
-		getQueryParams(st, command, params);
+		getQueryParams(&st->variables, command, params);
 
-		if (debug)
+		if (debug_level >= DEBUG_ALL)
 			fprintf(stderr, "client %d sending %s\n", st->id, sql);
 		r = PQsendQueryParams(st->con, sql, command->argc - 1,
 							  NULL, params, NULL, NULL, 0);
@@ -2604,10 +2843,10 @@ sendCommand(CState *st, Command *command)
 			st->prepared[st->use_file] = true;
 		}
 
-		getQueryParams(st, command, params);
+		getQueryParams(&st->variables, command, params);
 		preparedStatementName(name, st->use_file, st->command);
 
-		if (debug)
+		if (debug_level >= DEBUG_ALL)
 			fprintf(stderr, "client %d sending %s\n", st->id, name);
 		r = PQsendQueryPrepared(st->con, name, command->argc - 1,
 								params, NULL, NULL, 0);
@@ -2617,10 +2856,9 @@ sendCommand(CState *st, Command *command)
 
 	if (r == 0)
 	{
-		if (debug)
+		if (debug_level >= DEBUG_ALL)
 			fprintf(stderr, "client %d could not send %s\n",
 					st->id, command->argv[0]);
-		st->ecnt++;
 		return false;
 	}
 	else
@@ -2632,17 +2870,20 @@ sendCommand(CState *st, Command *command)
  * of delay, in microseconds.  Returns true on success, false on error.
  */
 static bool
-evaluateSleep(CState *st, int argc, char **argv, int *usecs)
+evaluateSleep(Variables *variables, int argc, char **argv, int *usecs)
 {
 	char	   *var;
 	int			usec;
 
 	if (*argv[1] == ':')
 	{
-		if ((var = getVariable(st, argv[1] + 1)) == NULL)
+		if ((var = getVariable(variables, argv[1] + 1)) == NULL)
 		{
-			fprintf(stderr, "%s: undefined variable \"%s\"\n",
-					argv[0], argv[1]);
+			if (debug_level >= DEBUG_FAILS)
+			{
+				fprintf(stderr, "%s: undefined variable \"%s\"\n",
+						argv[0], argv[1]);
+			}
 			return false;
 		}
 		usec = atoi(var);
@@ -2665,6 +2906,169 @@ evaluateSleep(CState *st, int argc, char **argv, int *usecs)
 }
 
 /*
+ * Get the number of all processed transactions including skipped ones and
+ * errors.
+ */
+static int64
+getTotalCnt(const CState *st)
+{
+	return st->cnt + st->ecnt;
+}
+
+/*
+ * Copy an array of random state.
+ */
+static void
+copyRandomState(RandomState *destination, const RandomState *source)
+{
+	memcpy(destination->data, source->data, sizeof(unsigned short) * 3);
+}
+
+/*
+ * Make a deep copy of variables array.
+ */
+static void
+copyVariables(Variables *destination_vars, const Variables *source_vars)
+{
+	Variable   *destination;
+	Variable   *current_destination;
+	const Variable *source;
+	const Variable *current_source;
+	int			nvariables;
+
+	if (!destination_vars || !source_vars)
+		return;
+
+	destination = destination_vars->array;
+	source = source_vars->array;
+	nvariables = source_vars->nvariables;
+
+	for (current_destination = destination;
+		 current_destination - destination < destination_vars->nvariables;
+		 ++current_destination)
+	{
+		pg_free(current_destination->name);
+		pg_free(current_destination->svalue);
+	}
+
+	destination_vars->array = pg_realloc(destination_vars->array,
+										 sizeof(Variable) * nvariables);
+	destination = destination_vars->array;
+
+	for (current_source = source, current_destination = destination;
+		 current_source - source < nvariables;
+		 ++current_source, ++current_destination)
+	{
+		current_destination->name = pg_strdup(current_source->name);
+		if (current_source->svalue)
+			current_destination->svalue = pg_strdup(current_source->svalue);
+		else
+			current_destination->svalue = NULL;
+		current_destination->value = current_source->value;
+	}
+
+	destination_vars->nvariables = nvariables;
+	destination_vars->vars_sorted = source_vars->vars_sorted;
+}
+
+/*
+ * Returns true if this type of failure can be retried.
+ */
+static bool
+canRetryFailure(FailureStatus failure_status)
+{
+	return (failure_status == SERIALIZATION_FAILURE ||
+			failure_status == DEADLOCK_FAILURE);
+}
+
+/*
+ * Returns true if the failure can be retried.
+ */
+static bool
+canRetry(CState *st, instr_time *now)
+{
+	FailureStatus failure_status = st->first_failure.status;
+
+	Assert(failure_status != NO_FAILURE);
+
+	/* We can only retry serialization or deadlock failures. */
+	if (!canRetryFailure(failure_status))
+		return false;
+
+	/*
+	 * We must have at least one option to limit the retrying of failed
+	 * transactions.
+	 */
+	Assert(max_tries || max_tries_time);
+
+	/*
+	 * We cannot retry the failure if we have reached the maximum number of
+	 * tries.
+	 */
+	if (max_tries && st->retries + 1 >= max_tries)
+		return false;
+
+	/*
+	 * We cannot retry the failure if we spent too much time on this
+	 * transaction.
+	 */
+	if (max_tries_time)
+	{
+		if (INSTR_TIME_IS_ZERO(*now))
+			INSTR_TIME_SET_CURRENT(*now);
+
+		if (INSTR_TIME_GET_MICROSEC(*now) - st->txn_scheduled >= max_tries_time)
+			return false;
+	}
+
+	/* OK */
+	return true;
+}
+
+/*
+ * Process the conditional stack depending on the condition value; is used for
+ * the meta commands \if and \elif.
+ */
+static void
+executeCondition(CState *st, bool condition)
+{
+	Command    *command = sql_script[st->use_file].commands[st->command];
+
+	/* execute or not depending on evaluated condition */
+	if (command->meta == META_IF)
+	{
+		conditional_stack_push(st->cstack,
+							   condition ? IFSTATE_TRUE : IFSTATE_FALSE);
+	}
+	else if (command->meta == META_ELIF)
+	{
+		/* we should get here only if the "elif" needed evaluation */
+		Assert(conditional_stack_peek(st->cstack) == IFSTATE_FALSE);
+		conditional_stack_poke(st->cstack,
+							   condition ? IFSTATE_TRUE : IFSTATE_FALSE);
+	}
+}
+
+/*
+ * Get the failure status from the error code.
+ */
+static FailureStatus
+getFailureStatus(char *sqlState)
+{
+	if (sqlState)
+	{
+		if (strcmp(sqlState, ERRCODE_T_R_SERIALIZATION_FAILURE) == 0)
+			return SERIALIZATION_FAILURE;
+		else if (strcmp(sqlState, ERRCODE_T_R_DEADLOCK_DETECTED) == 0)
+			return DEADLOCK_FAILURE;
+		else if (strcmp(sqlState, ERRCODE_IN_FAILED_SQL_TRANSACTION) == 0)
+			return IN_FAILED_SQL_TRANSACTION;
+	}
+
+	return ANOTHER_FAILURE;
+}
+
+/*
  * Advance the state machine of a connection, if possible.
  */
 static void
@@ -2675,6 +3079,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 	instr_time	now;
 	bool		end_tx_processed = false;
 	int64		wait;
+	FailureStatus failure_status = NO_FAILURE;
 
 	/*
 	 * gettimeofday() isn't free, so we get the current timestamp lazily the
@@ -2705,7 +3110,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 
 				st->use_file = chooseScript(thread);
 
-				if (debug)
+				if (debug_level >= DEBUG_ALL)
 					fprintf(stderr, "client %d executing script \"%s\"\n", st->id,
 							sql_script[st->use_file].desc);
 
@@ -2715,6 +3120,11 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 					st->state = CSTATE_START_TX;
 				/* check consistency */
 				Assert(conditional_stack_empty(st->cstack));
+
+				/* reset transaction variables to default values */
+				st->first_failure.status = NO_FAILURE;
+				st->retries = 0;
+
 				break;
 
 				/*
@@ -2732,7 +3142,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				 * away.
 				 */
 				Assert(throttle_delay > 0);
-				wait = getPoissonRand(thread, throttle_delay);
+				wait = getPoissonRand(&thread->random_state, throttle_delay);
 
 				thread->throttle_trigger += wait;
 				st->txn_scheduled = thread->throttle_trigger;
@@ -2762,16 +3172,17 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 						INSTR_TIME_SET_CURRENT(now);
 					now_us = INSTR_TIME_GET_MICROSEC(now);
 					while (thread->throttle_trigger < now_us - latency_limit &&
-						   (nxacts <= 0 || st->cnt < nxacts))
+						   (nxacts <= 0 || getTotalCnt(st) < nxacts))
 					{
 						processXactStats(thread, st, &now, true, agg);
 						/* next rendez-vous */
-						wait = getPoissonRand(thread, throttle_delay);
+						wait = getPoissonRand(&thread->random_state,
+											  throttle_delay);
 						thread->throttle_trigger += wait;
 						st->txn_scheduled = thread->throttle_trigger;
 					}
 					/* stop client if -t exceeded */
-					if (nxacts > 0 && st->cnt >= nxacts)
+					if (nxacts > 0 && getTotalCnt(st) >= nxacts)
 					{
 						st->state = CSTATE_FINISHED;
 						break;
@@ -2779,7 +3190,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				}
 
 				st->state = CSTATE_THROTTLE;
-				if (debug)
+				if (debug_level >= DEBUG_ALL)
 					fprintf(stderr, "client %d throttling " INT64_FORMAT " us\n",
 							st->id, wait);
 				break;
@@ -2826,11 +3237,20 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				}
 
 				/*
-				 * Record transaction start time under logging, progress or
-				 * throttling.
+				 * It is the first try to run this transaction. Remember its
+				 * parameters just in case if it fails or we should repeat it in
+				 * future.
+				 */
+				copyRandomState(&st->retry_state.random_state,
+								&st->random_state);
+				copyVariables(&st->retry_state.variables, &st->variables);
+
+				/*
+				 * Record transaction start time under logging, progress,
+				 * throttling, or if we have the maximum time of tries.
 				 */
 				if (use_log || progress || throttle_delay || latency_limit ||
-					per_script_stats)
+					per_script_stats || max_tries_time)
 				{
 					if (INSTR_TIME_IS_ZERO(now))
 						INSTR_TIME_SET_CURRENT(now);
@@ -2861,7 +3281,15 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				 */
 				if (command == NULL)
 				{
-					st->state = CSTATE_END_TX;
+					if (st->first_failure.status == NO_FAILURE)
+					{
+						st->state = CSTATE_END_TX;
+					}
+					else
+					{
+						/* check if we can retry the failure */
+						st->state = CSTATE_RETRY;
+					}
 					break;
 				}
 
@@ -2869,7 +3297,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				 * Record statement start time if per-command latencies are
 				 * requested
 				 */
-				if (is_latencies)
+				if (report_per_command)
 				{
 					if (INSTR_TIME_IS_ZERO(now))
 						INSTR_TIME_SET_CURRENT(now);
@@ -2880,7 +3308,8 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				{
 					if (!sendCommand(st, command))
 					{
-						commandFailed(st, "SQL", "SQL command send failed");
+						commandFailed(st, "SQL", "SQL command send failed",
+									  true);
 						st->state = CSTATE_ABORTED;
 					}
 					else
@@ -2892,7 +3321,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 								i;
 					char	  **argv = command->argv;
 
-					if (debug)
+					if (debug_level >= DEBUG_ALL)
 					{
 						fprintf(stderr, "client %d executing \\%s", st->id, argv[0]);
 						for (i = 1; i < argc; i++)
@@ -2900,6 +3329,9 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 						fprintf(stderr, "\n");
 					}
 
+					/* change it if the meta command fails */
+					failure_status = NO_FAILURE;
+
 					if (command->meta == META_SLEEP)
 					{
 						/*
@@ -2911,10 +3343,13 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 						 */
 						int			usec;
 
-						if (!evaluateSleep(st, argc, argv, &usec))
+						if (!evaluateSleep(&st->variables, argc, argv, &usec))
 						{
-							commandFailed(st, "sleep", "execution of meta-command failed");
-							st->state = CSTATE_ABORTED;
+							commandFailed(st, "sleep",
+										  "execution of meta-command failed",
+										  false);
+							failure_status = ANOTHER_FAILURE;
+							st->state = CSTATE_FAILURE;
 							break;
 						}
 
@@ -2942,35 +3377,37 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 
 						if (!evaluateExpr(thread, st, expr, &result))
 						{
-							commandFailed(st, argv[0], "evaluation of meta-command failed");
-							st->state = CSTATE_ABORTED;
+							commandFailed(st, argv[0],
+										  "evaluation of meta-command failed",
+										  false);
+
+							/*
+							 * Do not ruin the following conditional commands,
+							 * if any.
+							 */
+							executeCondition(st, false);
+
+							failure_status = ANOTHER_FAILURE;
+							st->state = CSTATE_FAILURE;
 							break;
 						}
 
 						if (command->meta == META_SET)
 						{
-							if (!putVariableValue(st, argv[0], argv[1], &result))
+							if (!putVariableValue(&st->variables, argv[0],
+												  argv[1], &result, false))
 							{
-								commandFailed(st, "set", "assignment of meta-command failed");
-								st->state = CSTATE_ABORTED;
+								commandFailed(st, "set",
+											  "assignment of meta-command failed",
+											  false);
+								failure_status = ANOTHER_FAILURE;
+								st->state = CSTATE_FAILURE;
 								break;
 							}
 						}
 						else /* if and elif evaluated cases */
 						{
-							bool cond = valueTruth(&result);
-
-							/* execute or not depending on evaluated condition */
-							if (command->meta == META_IF)
-							{
-								conditional_stack_push(st->cstack, cond ? IFSTATE_TRUE : IFSTATE_FALSE);
-							}
-							else /* elif */
-							{
-								/* we should get here only if the "elif" needed evaluation */
-								Assert(conditional_stack_peek(st->cstack) == IFSTATE_FALSE);
-								conditional_stack_poke(st->cstack, cond ? IFSTATE_TRUE : IFSTATE_FALSE);
-							}
+							executeCondition(st, valueTruth(&result));
 						}
 					}
 					else if (command->meta == META_ELSE)
@@ -2999,7 +3436,9 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 					}
 					else if (command->meta == META_SETSHELL)
 					{
-						bool		ret = runShellCommand(st, argv[1], argv + 2, argc - 2);
+						bool		ret = runShellCommand(&st->variables,
+														  argv[1], argv + 2,
+														  argc - 2);
 
 						if (timer_exceeded) /* timeout */
 						{
@@ -3008,8 +3447,11 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 						}
 						else if (!ret)	/* on error */
 						{
-							commandFailed(st, "setshell", "execution of meta-command failed");
-							st->state = CSTATE_ABORTED;
+							commandFailed(st, "setshell",
+										  "execution of meta-command failed",
+										  false);
+							failure_status = ANOTHER_FAILURE;
+							st->state = CSTATE_FAILURE;
 							break;
 						}
 						else
@@ -3019,7 +3461,8 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 					}
 					else if (command->meta == META_SHELL)
 					{
-						bool		ret = runShellCommand(st, NULL, argv + 1, argc - 1);
+						bool		ret = runShellCommand(&st->variables, NULL,
+														  argv + 1, argc - 1);
 
 						if (timer_exceeded) /* timeout */
 						{
@@ -3028,8 +3471,11 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 						}
 						else if (!ret)	/* on error */
 						{
-							commandFailed(st, "shell", "execution of meta-command failed");
-							st->state = CSTATE_ABORTED;
+							commandFailed(st, "shell",
+										  "execution of meta-command failed",
+										  false);
+							failure_status = ANOTHER_FAILURE;
+							st->state = CSTATE_FAILURE;
 							break;
 						}
 						else
@@ -3134,37 +3580,55 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				 * Wait for the current SQL command to complete
 				 */
 			case CSTATE_WAIT_RESULT:
-				command = sql_script[st->use_file].commands[st->command];
-				if (debug)
-					fprintf(stderr, "client %d receiving\n", st->id);
-				if (!PQconsumeInput(st->con))
-				{				/* there's something wrong */
-					commandFailed(st, "SQL", "perhaps the backend died while processing");
-					st->state = CSTATE_ABORTED;
-					break;
-				}
-				if (PQisBusy(st->con))
-					return;		/* don't have the whole result yet */
-
-				/*
-				 * Read and discard the query result;
-				 */
-				res = PQgetResult(st->con);
-				switch (PQresultStatus(res))
 				{
-					case PGRES_COMMAND_OK:
-					case PGRES_TUPLES_OK:
-					case PGRES_EMPTY_QUERY:
-						/* OK */
-						PQclear(res);
-						discard_response(st);
-						st->state = CSTATE_END_COMMAND;
-						break;
-					default:
-						commandFailed(st, "SQL", PQerrorMessage(st->con));
-						PQclear(res);
+					char	   *sqlState;
+
+					command = sql_script[st->use_file].commands[st->command];
+					if (debug_level >= DEBUG_ALL)
+						fprintf(stderr, "client %d receiving\n", st->id);
+					if (!PQconsumeInput(st->con))
+					{				/* there's something wrong */
+						commandFailed(st, "SQL",
+									  "perhaps the backend died while processing",
+									  true);
 						st->state = CSTATE_ABORTED;
 						break;
+					}
+					if (PQisBusy(st->con))
+						return;		/* don't have the whole result yet */
+
+					/*
+					 * Read and discard the query result;
+					 */
+					res = PQgetResult(st->con);
+					sqlState = PQresultErrorField(res, PG_DIAG_SQLSTATE);
+					switch (PQresultStatus(res))
+					{
+						case PGRES_COMMAND_OK:
+						case PGRES_TUPLES_OK:
+						case PGRES_EMPTY_QUERY:
+							/* OK */
+							PQclear(res);
+							discard_response(st);
+							failure_status = NO_FAILURE;
+							st->state = CSTATE_END_COMMAND;
+							break;
+						case PGRES_NONFATAL_ERROR:
+						case PGRES_FATAL_ERROR:
+							failure_status = getFailureStatus(sqlState);
+							commandFailed(st, "SQL", PQerrorMessage(st->con),
+										  false);
+							PQclear(res);
+							discard_response(st);
+							st->state = CSTATE_FAILURE;
+							break;
+						default:
+							commandFailed(st, "SQL", PQerrorMessage(st->con),
+										  true);
+							PQclear(res);
+							st->state = CSTATE_ABORTED;
+							break;
+					}
 				}
 				break;
 
@@ -3193,7 +3657,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				 * in thread-local data structure, if per-command latencies
 				 * are requested.
 				 */
-				if (is_latencies)
+				if (report_per_command)
 				{
 					if (INSTR_TIME_IS_ZERO(now))
 						INSTR_TIME_SET_CURRENT(now);
@@ -3212,6 +3676,139 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				break;
 
 				/*
+				 * Remember the failure and go ahead with next command.
+				 */
+			case CSTATE_FAILURE:
+
+				Assert(failure_status != NO_FAILURE);
+
+				/*
+				 * All subsequent failures will be "retried"/"failed" if the
+				 * first failure of this transaction can be/cannot be retried.
+				 * Therefore remember only the first failure.
+				 */
+				if (st->first_failure.status == NO_FAILURE)
+				{
+					st->first_failure.status = failure_status;
+					st->first_failure.command = st->command;
+				}
+
+				/* Go ahead with next command, to be executed or skipped */
+				st->command++;
+				st->state = conditional_active(st->cstack) ?
+					CSTATE_START_COMMAND : CSTATE_SKIP_COMMAND;
+				break;
+
+			/*
+			 * Retry the failed transaction if possible.
+			 */
+			case CSTATE_RETRY:
+				{
+					double		used_time = 0;
+
+					command = sql_script[st->use_file].commands[st->first_failure.command];
+
+					if (max_tries_time)
+					{
+						if (INSTR_TIME_IS_ZERO(now))
+							INSTR_TIME_SET_CURRENT(now);
+
+						used_time = (100.0 * (INSTR_TIME_GET_MICROSEC(now) -
+							st->txn_scheduled) / max_tries_time);
+					}
+
+					if (canRetry(st, &now))
+					{
+						/*
+						 * The failed transaction will be retried. So accumulate
+						 * the retry.
+						 */
+						st->retries++;
+						command->retries++;
+
+						if (debug_level >= DEBUG_FAILS)
+						{
+							fprintf(stderr,
+									"client %d repeats the failed transaction (try %d",
+									st->id, st->retries + 1);
+
+							if (max_tries)
+								fprintf(stderr, "/%d", max_tries);
+
+							if (max_tries_time)
+							{
+								fprintf(stderr,
+										", %.3f%% of the maximum time of tries was used",
+										used_time);
+							}
+
+							fprintf(stderr, ")\n");
+						}
+
+						/*
+						 * Reset the execution parameters as they were at the
+						 * beginning of the transaction.
+						 */
+						copyRandomState(&st->random_state,
+										&st->retry_state.random_state);
+						copyVariables(&st->variables, &st->retry_state.variables);
+
+						/* Process the first transaction command */
+						st->command = 0;
+						st->first_failure.status = NO_FAILURE;
+						st->state = CSTATE_START_COMMAND;
+					}
+					else
+					{
+						/*
+						 * We will not be able to retry this failed transaction.
+						 * So accumulate the error.
+						 */
+						command->errors++;
+						if (st->first_failure.status == IN_FAILED_SQL_TRANSACTION)
+							command->errors_in_failed_tx++;
+
+						if (debug_level >= DEBUG_FAILS)
+						{
+							fprintf(stderr,
+									"client %d ends the failed transaction (try %d",
+									st->id, st->retries + 1);
+
+							/*
+							 * Report the actual number and/or time of
+							 * tries. We do not need this information if this
+							 * type of failure can be never retried.
+							 */
+							if (canRetryFailure(st->first_failure.status))
+							{
+								if (max_tries)
+									fprintf(stderr, "/%d", max_tries);
+
+								if (max_tries_time)
+								{
+									fprintf(stderr,
+											", %.3f%% of the maximum time of tries was used",
+											used_time);
+								}
+							}
+
+							fprintf(stderr, ")\n");
+						}
+
+						/*
+						 * Reset the execution parameters as they were at the
+						 * beginning of the transaction except for a random
+						 * state.
+						 */
+						copyVariables(&st->variables, &st->retry_state.variables);
+
+						/* End the failed transaction */
+						st->state = CSTATE_END_TX;
+					}
+				}
+				break;
+
+				/*
 				 * End of transaction.
 				 */
 			case CSTATE_END_TX:
@@ -3232,7 +3829,8 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 					INSTR_TIME_SET_ZERO(now);
 				}
 
-				if ((st->cnt >= nxacts && duration <= 0) || timer_exceeded)
+				if ((getTotalCnt(st) >= nxacts && duration <= 0) ||
+					timer_exceeded)
 				{
 					/* exit success */
 					st->state = CSTATE_FINISHED;
@@ -3292,7 +3890,7 @@ doLog(TState *thread, CState *st,
 	 * to the random sample.
 	 */
 	if (sample_rate != 0.0 &&
-		pg_erand48(thread->random_state) > sample_rate)
+		pg_erand48(thread->random_state.data) > sample_rate)
 		return;
 
 	/* should we aggregate the results or not? */
@@ -3308,13 +3906,15 @@ doLog(TState *thread, CState *st,
 		while (agg->start_time + agg_interval <= now)
 		{
 			/* print aggregated report to logfile */
-			fprintf(logfile, "%ld " INT64_FORMAT " %.0f %.0f %.0f %.0f",
+			fprintf(logfile, "%ld " INT64_FORMAT " %.0f %.0f %.0f %.0f " INT64_FORMAT " " INT64_FORMAT,
 					(long) agg->start_time,
 					agg->cnt,
 					agg->latency.sum,
 					agg->latency.sum2,
 					agg->latency.min,
-					agg->latency.max);
+					agg->latency.max,
+					agg->errors,
+					agg->errors_in_failed_tx);
 			if (throttle_delay)
 			{
 				fprintf(logfile, " %.0f %.0f %.0f %.0f",
@@ -3325,6 +3925,10 @@ doLog(TState *thread, CState *st,
 				if (latency_limit)
 					fprintf(logfile, " " INT64_FORMAT, agg->skipped);
 			}
+			if (max_tries > 1 || max_tries_time)
+				fprintf(logfile, " " INT64_FORMAT " " INT64_FORMAT,
+						agg->retried,
+						agg->retries);
 			fputc('\n', logfile);
 
 			/* reset data and move to next interval */
@@ -3332,7 +3936,8 @@ doLog(TState *thread, CState *st,
 		}
 
 		/* accumulate the current transaction */
-		accumStats(agg, skipped, latency, lag);
+		accumStats(agg, skipped, latency, lag, st->first_failure.status,
+				   st->retries);
 	}
 	else
 	{
@@ -3342,14 +3947,25 @@ doLog(TState *thread, CState *st,
 		gettimeofday(&tv, NULL);
 		if (skipped)
 			fprintf(logfile, "%d " INT64_FORMAT " skipped %d %ld %ld",
-					st->id, st->cnt, st->use_file,
+					st->id, getTotalCnt(st), st->use_file,
 					(long) tv.tv_sec, (long) tv.tv_usec);
-		else
+		else if (st->first_failure.status == NO_FAILURE)
 			fprintf(logfile, "%d " INT64_FORMAT " %.0f %d %ld %ld",
-					st->id, st->cnt, latency, st->use_file,
+					st->id, getTotalCnt(st), latency, st->use_file,
+					(long) tv.tv_sec, (long) tv.tv_usec);
+		else if (st->first_failure.status == IN_FAILED_SQL_TRANSACTION)
+			fprintf(logfile, "%d " INT64_FORMAT " in_failed_tx %d %ld %ld",
+					st->id, getTotalCnt(st), st->use_file,
 					(long) tv.tv_sec, (long) tv.tv_usec);
+		else
+			fprintf(logfile, "%d " INT64_FORMAT " failed %d %ld %ld",
+					st->id, getTotalCnt(st), st->use_file,
+					(long) tv.tv_sec, (long) tv.tv_usec);
+
 		if (throttle_delay)
 			fprintf(logfile, " %.0f", lag);
+		if (max_tries > 1 || max_tries_time)
+			fprintf(logfile, " %d", st->retries);
 		fputc('\n', logfile);
 	}
 }
@@ -3369,7 +3985,7 @@ processXactStats(TState *thread, CState *st, instr_time *now,
 	bool		thread_details = progress || throttle_delay || latency_limit,
 				detailed = thread_details || use_log || per_script_stats;
 
-	if (detailed && !skipped)
+	if (detailed && !skipped && st->first_failure.status == NO_FAILURE)
 	{
 		if (INSTR_TIME_IS_ZERO(*now))
 			INSTR_TIME_SET_CURRENT(*now);
@@ -3382,7 +3998,8 @@ processXactStats(TState *thread, CState *st, instr_time *now,
 	if (thread_details)
 	{
 		/* keep detailed thread stats */
-		accumStats(&thread->stats, skipped, latency, lag);
+		accumStats(&thread->stats, skipped, latency, lag,
+				   st->first_failure.status, st->retries);
 
 		/* count transactions over the latency limit, if needed */
 		if (latency_limit && latency > latency_limit)
@@ -3390,19 +4007,24 @@ processXactStats(TState *thread, CState *st, instr_time *now,
 	}
 	else
 	{
-		/* no detailed stats, just count */
-		thread->stats.cnt++;
+		/* no detailed stats */
+		accumStats(&thread->stats, skipped, 0, 0, st->first_failure.status,
+				   st->retries);
 	}
 
 	/* client stat is just counting */
-	st->cnt++;
+	if (st->first_failure.status == NO_FAILURE)
+		st->cnt++;
+	else
+		st->ecnt++;
 
 	if (use_log)
 		doLog(thread, st, agg, skipped, latency, lag);
 
 	/* XXX could use a mutex here, but we choose not to */
 	if (per_script_stats)
-		accumStats(&sql_script[st->use_file].stats, skipped, latency, lag);
+		accumStats(&sql_script[st->use_file].stats, skipped, latency, lag,
+				   st->first_failure.status, st->retries);
 }
 
 
@@ -4535,7 +5157,8 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 	double		time_include,
 				tps_include,
 				tps_exclude;
-	int64		ntx = total->cnt - total->skipped;
+	int64		ntx = total->cnt - total->skipped,
+				total_ntx = total->cnt + total->errors;
 	int			i,
 				totalCacheOverflows = 0;
 
@@ -4556,8 +5179,8 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 	if (duration <= 0)
 	{
 		printf("number of transactions per client: %d\n", nxacts);
-		printf("number of transactions actually processed: " INT64_FORMAT "/%d\n",
-			   ntx, nxacts * nclients);
+		printf("number of transactions actually processed: " INT64_FORMAT "/" INT64_FORMAT "\n",
+			   ntx, total_ntx);
 	}
 	else
 	{
@@ -4565,6 +5188,32 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 		printf("number of transactions actually processed: " INT64_FORMAT "\n",
 			   ntx);
 	}
+
+	if (total->errors > 0)
+		printf("number of errors: " INT64_FORMAT " (%.3f%%)\n",
+			   total->errors, 100.0 * total->errors / total_ntx);
+
+	if (total->errors_in_failed_tx > 0)
+		printf("number of errors \"in failed SQL transaction\": " INT64_FORMAT " (%.3f%%)\n",
+			   total->errors_in_failed_tx,
+			   100.0 * total->errors_in_failed_tx / total_ntx);
+
+	/*
+	 * It can be non-zero only if max_tries is greater than one or
+	 * max_tries_time is used.
+	 */
+	if (total->retried > 0)
+	{
+		printf("number of retried: " INT64_FORMAT " (%.3f%%)\n",
+			   total->retried, 100.0 * total->retried / total_ntx);
+		printf("number of retries: " INT64_FORMAT "\n", total->retries);
+	}
+
+	if (max_tries)
+		printf("maximum number of tries: %d\n", max_tries);
+	if (max_tries_time)
+		printf("maximum time of tries: %.1f ms\n", max_tries_time / 1000.0);
+
 	/* Report zipfian cache overflow */
 	for (i = 0; i < nthreads; i++)
 	{
@@ -4594,8 +5243,14 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 	else
 	{
 		/* no measurement, show average latency computed from run time */
-		printf("latency average = %.3f ms\n",
-			   1000.0 * time_include * nclients / total->cnt);
+		printf("latency average = %.3f ms",
+			   1000.0 * time_include * nclients / total_ntx);
+
+		/* this statistics includes both successful and failed transactions */
+		if (total->errors > 0)
+			printf(" (including errors)");
+
+		printf("\n");
 	}
 
 	if (throttle_delay)
@@ -4614,7 +5269,7 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 	printf("tps = %f (excluding connections establishing)\n", tps_exclude);
 
 	/* Report per-script/command statistics */
-	if (per_script_stats || is_latencies)
+	if (per_script_stats || report_per_command)
 	{
 		int			i;
 
@@ -4623,6 +5278,7 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 			if (per_script_stats)
 			{
 				StatsData  *sstats = &sql_script[i].stats;
+				int64		script_total_ntx = sstats->cnt + sstats->errors;
 
 				printf("SQL script %d: %s\n"
 					   " - weight: %d (targets %.1f%% of total)\n"
@@ -4631,9 +5287,33 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 					   sql_script[i].weight,
 					   100.0 * sql_script[i].weight / total_weight,
 					   sstats->cnt,
-					   100.0 * sstats->cnt / total->cnt,
+					   100.0 * sstats->cnt / script_total_ntx,
 					   (sstats->cnt - sstats->skipped) / time_include);
 
+				if (total->errors > 0)
+					printf(" - number of errors: " INT64_FORMAT " (%.3f%%)\n",
+						   sstats->errors,
+						   100.0 * sstats->errors / script_total_ntx);
+
+				if (total->errors_in_failed_tx > 0)
+					printf(" - number of errors \"in failed SQL transaction\": " INT64_FORMAT " (%.3f%%)\n",
+						   sstats->errors_in_failed_tx,
+						   (100.0 * sstats->errors_in_failed_tx /
+							script_total_ntx));
+
+				/*
+				 * It can be non-zero only if max_tries is greater than one or
+				 * max_tries_time is used.
+				 */
+				if (total->retried > 0)
+				{
+					printf(" - number of retried: " INT64_FORMAT " (%.3f%%)\n",
+						   sstats->retried,
+						   100.0 * sstats->retried / script_total_ntx);
+					printf(" - number of retries: " INT64_FORMAT "\n",
+						   sstats->retries);
+				}
+
 				if (throttle_delay && latency_limit && sstats->cnt > 0)
 					printf(" - number of transactions skipped: " INT64_FORMAT " (%.3f%%)\n",
 						   sstats->skipped,
@@ -4642,15 +5322,33 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 				printSimpleStats(" - latency", &sstats->latency);
 			}
 
-			/* Report per-command latencies */
-			if (is_latencies)
+			/* Report per-command latencies and errors */
+			if (report_per_command)
 			{
 				Command   **commands;
 
 				if (per_script_stats)
-					printf(" - statement latencies in milliseconds:\n");
+					printf(" - statement latencies in milliseconds");
 				else
-					printf("statement latencies in milliseconds:\n");
+					printf("statement latencies in milliseconds");
+
+				if (total->errors > 0)
+				{
+					printf("%s errors",
+						   ((total->errors_in_failed_tx == 0 &&
+							total->retried == 0) ?
+							" and" : ","));
+				}
+				if (total->errors_in_failed_tx > 0)
+				{
+					printf("%s errors \"in failed SQL transaction\"",
+						   total->retried == 0 ? " and" : ",");
+				}
+				if (total->retried > 0)
+				{
+					printf(" and retries");
+				}
+				printf(":\n");
 
 				for (commands = sql_script[i].commands;
 					 *commands != NULL;
@@ -4658,10 +5356,25 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 				{
 					SimpleStats *cstats = &(*commands)->stats;
 
-					printf("   %11.3f  %s\n",
+					printf("   %11.3f",
 						   (cstats->count > 0) ?
-						   1000.0 * cstats->sum / cstats->count : 0.0,
-						   (*commands)->line);
+						   1000.0 * cstats->sum / cstats->count : 0.0);
+					if (total->errors > 0)
+					{
+						printf("  %20" INT64_MODIFIER "d",
+							   (*commands)->errors);
+					}
+					if (total->errors_in_failed_tx > 0)
+					{
+						printf("  %20" INT64_MODIFIER "d",
+							   (*commands)->errors_in_failed_tx);
+					}
+					if (total->retried > 0)
+					{
+						printf("  %20" INT64_MODIFIER "d",
+							   (*commands)->retries);
+					}
+					printf("  %s\n", (*commands)->line);
 				}
 			}
 		}
@@ -4716,6 +5429,17 @@ set_random_seed(const char *seed)
 	return true;
 }
 
+/*
+ * Initialize the random state of the client/thread.
+ */
+static void
+initRandomState(RandomState *random_state)
+{
+	random_state->data[0] = random();
+	random_state->data[1] = random();
+	random_state->data[2] = random();
+}
+
 
 int
 main(int argc, char **argv)
@@ -4725,7 +5449,7 @@ main(int argc, char **argv)
 		{"builtin", required_argument, NULL, 'b'},
 		{"client", required_argument, NULL, 'c'},
 		{"connect", no_argument, NULL, 'C'},
-		{"debug", no_argument, NULL, 'd'},
+		{"debug", required_argument, NULL, 'd'},
 		{"define", required_argument, NULL, 'D'},
 		{"file", required_argument, NULL, 'f'},
 		{"fillfactor", required_argument, NULL, 'F'},
@@ -4740,7 +5464,7 @@ main(int argc, char **argv)
 		{"progress", required_argument, NULL, 'P'},
 		{"protocol", required_argument, NULL, 'M'},
 		{"quiet", no_argument, NULL, 'q'},
-		{"report-latencies", no_argument, NULL, 'r'},
+		{"report-per-command", no_argument, NULL, 'r'},
 		{"rate", required_argument, NULL, 'R'},
 		{"scale", required_argument, NULL, 's'},
 		{"select-only", no_argument, NULL, 'S'},
@@ -4759,6 +5483,8 @@ main(int argc, char **argv)
 		{"log-prefix", required_argument, NULL, 7},
 		{"foreign-keys", no_argument, NULL, 8},
 		{"random-seed", required_argument, NULL, 9},
+		{"max-tries", required_argument, NULL, 10},
+		{"max-tries-time", required_argument, NULL, 11},
 		{NULL, 0, NULL, 0}
 	};
 
@@ -4834,7 +5560,7 @@ main(int argc, char **argv)
 		exit(1);
 	}
 
-	while ((c = getopt_long(argc, argv, "iI:h:nvp:dqb:SNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
+	while ((c = getopt_long(argc, argv, "iI:h:nvp:d:qb:SNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
 	{
 		char	   *script;
 
@@ -4864,8 +5590,22 @@ main(int argc, char **argv)
 				pgport = pg_strdup(optarg);
 				break;
 			case 'd':
-				debug++;
-				break;
+				{
+					for (debug_level = 0;
+						 debug_level < NUM_DEBUGLEVEL;
+						 debug_level++)
+					{
+						if (strcmp(optarg, DEBUGLEVEl[debug_level]) == 0)
+							break;
+					}
+					if (debug_level >= NUM_DEBUGLEVEL)
+					{
+						fprintf(stderr, "invalid debug level (-d): \"%s\"\n",
+								optarg);
+						exit(1);
+					}
+					break;
+				}
 			case 'c':
 				benchmarking_option_set = true;
 				nclients = atoi(optarg);
@@ -4917,7 +5657,7 @@ main(int argc, char **argv)
 				break;
 			case 'r':
 				benchmarking_option_set = true;
-				is_latencies = true;
+				report_per_command = true;
 				break;
 			case 's':
 				scale_given = true;
@@ -4998,7 +5738,7 @@ main(int argc, char **argv)
 					}
 
 					*p++ = '\0';
-					if (!putVariable(&state[0], "option", optarg, p))
+					if (!putVariable(&state[0].variables, "option", optarg, p))
 						exit(1);
 				}
 				break;
@@ -5114,6 +5854,34 @@ main(int argc, char **argv)
 					exit(1);
 				}
 				break;
+			case 10:			/* max-tries */
+				{
+					int32		max_tries_arg = atoi(optarg);
+
+					if (max_tries_arg <= 0)
+					{
+						fprintf(stderr, "invalid number of maximum tries: \"%s\"\n",
+								optarg);
+						exit(1);
+					}
+					benchmarking_option_set = true;
+					max_tries = (uint32) max_tries_arg;
+				}
+				break;
+			case 11:			/* max-tries-time */
+				{
+					double		max_tries_time_ms = atof(optarg);
+
+					if (max_tries_time_ms <= 0.0)
+					{
+						fprintf(stderr, "invalid maximum time of tries: \"%s\"\n",
+								optarg);
+						exit(1);
+					}
+					benchmarking_option_set = true;
+					max_tries_time = (uint64) (max_tries_time_ms * 1000);
+				}
+				break;
 			default:
 				fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
 				exit(1);
@@ -5283,6 +6051,10 @@ main(int argc, char **argv)
 		exit(1);
 	}
 
+	/* If necessary set the default tries limit  */
+	if (!max_tries && !max_tries_time)
+		max_tries = 1;
+
 	/*
 	 * save main process id in the global variable because process id will be
 	 * changed after fork.
@@ -5300,19 +6072,19 @@ main(int argc, char **argv)
 			int			j;
 
 			state[i].id = i;
-			for (j = 0; j < state[0].nvariables; j++)
+			for (j = 0; j < state[0].variables.nvariables; j++)
 			{
-				Variable   *var = &state[0].variables[j];
+				Variable   *var = &state[0].variables.array[j];
 
 				if (var->value.type != PGBT_NO_VALUE)
 				{
-					if (!putVariableValue(&state[i], "startup",
-										   var->name, &var->value))
+					if (!putVariableValue(&state[i].variables, "startup",
+										   var->name, &var->value, true))
 						exit(1);
 				}
 				else
 				{
-					if (!putVariable(&state[i], "startup",
+					if (!putVariable(&state[i].variables, "startup",
 									 var->name, var->svalue))
 						exit(1);
 				}
@@ -5324,9 +6096,10 @@ main(int argc, char **argv)
 	for (i = 0; i < nclients; i++)
 	{
 		state[i].cstack = conditional_stack_create();
+		initRandomState(&state[i].random_state);
 	}
 
-	if (debug)
+	if (debug_level >= DEBUG_ALL)
 	{
 		if (duration <= 0)
 			printf("pghost: %s pgport: %s nclients: %d nxacts: %d dbName: %s\n",
@@ -5387,11 +6160,12 @@ main(int argc, char **argv)
 	 * :scale variables normally get -s or database scale, but don't override
 	 * an explicit -D switch
 	 */
-	if (lookupVariable(&state[0], "scale") == NULL)
+	if (lookupVariable(&state[0].variables, "scale") == NULL)
 	{
 		for (i = 0; i < nclients; i++)
 		{
-			if (!putVariableInt(&state[i], "startup", "scale", scale))
+			if (!putVariableInt(&state[i].variables, "startup", "scale", scale,
+								true))
 				exit(1);
 		}
 	}
@@ -5400,15 +6174,18 @@ main(int argc, char **argv)
 	 * Define a :client_id variable that is unique per connection. But don't
 	 * override an explicit -D switch.
 	 */
-	if (lookupVariable(&state[0], "client_id") == NULL)
+	if (lookupVariable(&state[0].variables, "client_id") == NULL)
 	{
 		for (i = 0; i < nclients; i++)
-			if (!putVariableInt(&state[i], "startup", "client_id", i))
+		{
+			if (!putVariableInt(&state[i].variables, "startup", "client_id", i,
+								true))
 				exit(1);
+		}
 	}
 
 	/* set default seed for hash functions */
-	if (lookupVariable(&state[0], "default_seed") == NULL)
+	if (lookupVariable(&state[0].variables, "default_seed") == NULL)
 	{
 		uint64	seed = ((uint64) (random() & 0xFFFF) << 48) |
 					   ((uint64) (random() & 0xFFFF) << 32) |
@@ -5416,15 +6193,17 @@ main(int argc, char **argv)
 					   (uint64) (random() & 0xFFFF);
 
 		for (i = 0; i < nclients; i++)
-			if (!putVariableInt(&state[i], "startup", "default_seed", (int64) seed))
+			if (!putVariableInt(&state[i].variables, "startup", "default_seed",
+								(int64) seed, true))
 				exit(1);
 	}
 
 	/* set random seed unless overwritten */
-	if (lookupVariable(&state[0], "random_seed") == NULL)
+	if (lookupVariable(&state[0].variables, "random_seed") == NULL)
 	{
 		for (i = 0; i < nclients; i++)
-			if (!putVariableInt(&state[i], "startup", "random_seed", random_seed))
+			if (!putVariableInt(&state[i].variables, "startup", "random_seed",
+								random_seed, true))
 				exit(1);
 	}
 
@@ -5457,9 +6236,7 @@ main(int argc, char **argv)
 		thread->state = &state[nclients_dealt];
 		thread->nstate =
 			(nclients - nclients_dealt + nthreads - i - 1) / (nthreads - i);
-		thread->random_state[0] = random();
-		thread->random_state[1] = random();
-		thread->random_state[2] = random();
+		initRandomState(&thread->random_state);
 		thread->logfile = NULL; /* filled in later */
 		thread->latency_late = 0;
 		thread->zipf_cache.nb_cells = 0;
@@ -5541,6 +6318,10 @@ main(int argc, char **argv)
 		mergeSimpleStats(&stats.lag, &thread->stats.lag);
 		stats.cnt += thread->stats.cnt;
 		stats.skipped += thread->stats.skipped;
+		stats.retries += thread->stats.retries;
+		stats.retried += thread->stats.retried;
+		stats.errors += thread->stats.errors;
+		stats.errors_in_failed_tx += thread->stats.errors_in_failed_tx;
 		latency_late += thread->latency_late;
 		INSTR_TIME_ADD(conn_total_time, thread->conn_time);
 	}
@@ -5825,7 +6606,11 @@ threadRun(void *arg)
 				/* generate and show report */
 				StatsData	cur;
 				int64		run = now - last_report,
-							ntx;
+							ntx,
+							retries,
+							retried,
+							errors,
+							errors_in_failed_tx;
 				double		tps,
 							total_run,
 							latency,
@@ -5852,6 +6637,11 @@ threadRun(void *arg)
 					mergeSimpleStats(&cur.lag, &thread[i].stats.lag);
 					cur.cnt += thread[i].stats.cnt;
 					cur.skipped += thread[i].stats.skipped;
+					cur.retries += thread[i].stats.retries;
+					cur.retried += thread[i].stats.retried;
+					cur.errors += thread[i].stats.errors;
+					cur.errors_in_failed_tx +=
+						thread[i].stats.errors_in_failed_tx;
 				}
 
 				/* we count only actually executed transactions */
@@ -5869,6 +6659,11 @@ threadRun(void *arg)
 				{
 					latency = sqlat = stdev = lag = 0;
 				}
+				retries = cur.retries - last.retries;
+				retried = cur.retried - last.retried;
+				errors = cur.errors - last.errors;
+				errors_in_failed_tx = cur.errors_in_failed_tx -
+					last.errors_in_failed_tx;
 
 				if (progress_timestamp)
 				{
@@ -5894,6 +6689,14 @@ threadRun(void *arg)
 						"progress: %s, %.1f tps, lat %.3f ms stddev %.3f",
 						tbuf, tps, latency, stdev);
 
+				if (errors > 0)
+				{
+					fprintf(stderr, ", " INT64_FORMAT " failed" , errors);
+					if (errors_in_failed_tx > 0)
+						fprintf(stderr, " (" INT64_FORMAT " in failed tx)",
+								errors_in_failed_tx);
+				}
+
 				if (throttle_delay)
 				{
 					fprintf(stderr, ", lag %.3f ms", lag);
@@ -5901,6 +6704,16 @@ threadRun(void *arg)
 						fprintf(stderr, ", " INT64_FORMAT " skipped",
 								cur.skipped - last.skipped);
 				}
+
+				/*
+				 * It can be non-zero only if max_tries is greater than one or
+				 * max_tries_time is used.
+				 */
+				if (retried > 0)
+				{
+					fprintf(stderr, ", " INT64_FORMAT " retried, " INT64_FORMAT " retries",
+							retried, retries);
+				}
 				fprintf(stderr, "\n");
 
 				last = cur;
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index be08b20..96b3876 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -118,23 +118,28 @@ pgbench(
 	[   qr{builtin: TPC-B},
 		qr{clients: 2\b},
 		qr{processed: 10/10},
-		qr{mode: simple} ],
+		qr{mode: simple},
+		qr{maximum number of tries: 1},
+		qr{^((?!maximum time of tries)(.|\n))*$} ],
 	[qr{^$}],
 	'pgbench tpcb-like');
 
 pgbench(
-'--transactions=20 --client=5 -M extended --builtin=si -C --no-vacuum -s 1',
+'--transactions=20 --client=5 -M extended --builtin=si -C --no-vacuum -s 1'
+	  . ' --max-tries-time 1',    # no-op, just for testing
 	0,
 	[   qr{builtin: simple update},
 		qr{clients: 5\b},
 		qr{threads: 1\b},
 		qr{processed: 100/100},
-		qr{mode: extended} ],
+		qr{mode: extended},
+		qr{maximum time of tries: 1},
+		qr{^((?!maximum number of tries)(.|\n))*$} ],
 	[qr{scale option ignored}],
 	'pgbench simple update');
 
 pgbench(
-	'-t 100 -c 7 -M prepared -b se --debug',
+	'-t 100 -c 7 -M prepared -b se --debug all',
 	0,
 	[   qr{builtin: select only},
 		qr{clients: 7\b},
@@ -491,6 +496,10 @@ my @errors = (
 \set i 0
 SELECT LEAST(:i, :i, :i, :i, :i, :i, :i, :i, :i, :i, :i);
 } ],
+	[   'sql division by zero', 0, [qr{ERROR:  division by zero}],
+		q{-- SQL division by zero
+	SELECT 1 / 0;
+} ],
 
 	# SHELL
 	[   'shell bad command',               0,
@@ -621,6 +630,16 @@ SELECT LEAST(:i, :i, :i, :i, :i, :i, :i, :i, :i, :i, :i);
 	[   'sleep unknown unit',         1,
 		[qr{unrecognized time unit}], q{\sleep 1 week} ],
 
+	# CONDITIONAL BLOCKS
+	[   'if elif failed conditions', 0,
+		[qr{division by zero}],
+		q{-- failed conditions
+\if 1 / 0
+\elif 1 / 0
+\else
+\endif
+} ],
+
 	# MISC
 	[   'misc invalid backslash command',         1,
 		[qr{invalid command .* "nosuchcommand"}], q{\nosuchcommand} ],
@@ -635,14 +654,32 @@ for my $e (@errors)
 	my $n = '001_pgbench_error_' . $name;
 	$n =~ s/ /_/g;
 	pgbench(
-		'-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX -M prepared',
+		'-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX -M prepared -d fails',
 		$status,
-		[ $status ? qr{^$} : qr{processed: 0/1} ],
+		($status ?
+		 [ qr{^$} ] :
+		 [ qr{processed: 0/1}, qr{number of errors: 1 \(100.000%\)},
+		   qr{^((?!number of retried)(.|\n))*$} ]),
 		$re,
 		'pgbench script error: ' . $name,
 		{ $n => $script });
 }
 
+# reset client variables in case of failure
+pgbench(
+	'-n -t 2 -d fails', 0,
+	[ qr{processed: 0/2}, qr{number of errors: 2 \(100.000%\)},
+	  qr{^((?!number of retried)(.|\n))*$} ],
+	[ qr{(client 0 got a failure in command 1 \(SQL\) of script 0; ERROR:  syntax error at or near ":"(.|\n)*){2}} ],
+	'pgbench reset client variables in case of failure',
+	{	'001_pgbench_reset_client_variables' => q{
+BEGIN;
+-- select an unassigned variable
+SELECT :unassigned_var;
+\set unassigned_var 1
+END;
+} });
+
 # zipfian cache array overflow
 pgbench(
 	'-t 1', 0,
diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl
index af21f04..e6886a7 100644
--- a/src/bin/pgbench/t/002_pgbench_no_server.pl
+++ b/src/bin/pgbench/t/002_pgbench_no_server.pl
@@ -57,7 +57,7 @@ my @options = (
 
 	# name, options, stderr checks
 	[   'bad option',
-		'-h home -p 5432 -U calvin -d --bad-option',
+		'-h home -p 5432 -U calvin -d all --bad-option',
 		[ qr{(unrecognized|illegal) option}, qr{--help.*more information} ] ],
 	[   'no file',
 		'-f no-such-file',
@@ -113,6 +113,10 @@ my @options = (
 	[ 'bad random seed', '--random-seed=one',
 		[qr{unrecognized random seed option "one": expecting an unsigned integer, "time" or "rand"},
 		 qr{error while setting random seed from --random-seed option} ] ],
+	[ 'bad maximum number of tries', '--max-tries -10',
+		[qr{invalid number of maximum tries: "-10"} ] ],
+	[ 'bad maximum time of tries', '--max-tries-time -10',
+		[qr{invalid maximum time of tries: "-10"} ] ],
 
 	# loging sub-options
 	[   'sampling => log', '--sampling-rate=0.01',
diff --git a/src/bin/pgbench/t/003_serialization_and_deadlock_fails.pl b/src/bin/pgbench/t/003_serialization_and_deadlock_fails.pl
new file mode 100644
index 0000000..5660ddd
--- /dev/null
+++ b/src/bin/pgbench/t/003_serialization_and_deadlock_fails.pl
@@ -0,0 +1,739 @@
+use strict;
+use warnings;
+
+use Config;
+use PostgresNode;
+use TestLib;
+use Test::More tests => 32;
+
+use constant
+{
+	READ_COMMITTED   => 0,
+	REPEATABLE_READ  => 1,
+	SERIALIZABLE     => 2,
+};
+
+my @isolation_level_shell = (
+	'read\\ committed',
+	'repeatable\\ read',
+	'serializable');
+
+# The keys of advisory locks for testing deadlock failures:
+use constant
+{
+	DEADLOCK_1         => 3,
+	WAIT_PGBENCH_2     => 4,
+	DEADLOCK_2         => 5,
+	TRANSACTION_ENDS_1 => 6,
+	TRANSACTION_ENDS_2 => 7,
+};
+
+# Test concurrent update in table row.
+my $node = get_new_node('main');
+$node->init;
+$node->start;
+$node->safe_psql('postgres',
+    'CREATE UNLOGGED TABLE xy (x integer, y integer); '
+  . 'INSERT INTO xy VALUES (1, 2), (2, 3);');
+
+my $script_serialization = $node->basedir . '/pgbench_script_serialization';
+append_to_file($script_serialization,
+		"\\set delta random(-5000, 5000)\n"
+	  . "BEGIN;\n"
+	  . "SELECT pg_sleep(1);\n"
+	  . "UPDATE xy SET y = y + :delta "
+	  . "WHERE x = 1 AND pg_advisory_lock(0) IS NOT NULL;\n"
+	  . "SELECT pg_advisory_unlock_all();\n"
+	  . "END;\n");
+
+my $script_deadlocks1 = $node->basedir . '/pgbench_script_deadlocks1';
+append_to_file($script_deadlocks1,
+		"BEGIN;\n"
+	  . "SELECT pg_advisory_lock(" . DEADLOCK_1 . ");\n"
+	  . "SELECT pg_advisory_lock(" . WAIT_PGBENCH_2 . ");\n"
+	  . "SELECT pg_advisory_lock(" . DEADLOCK_2 . ");\n"
+	  . "END;\n"
+	  . "SELECT pg_advisory_unlock_all();\n"
+	  . "SELECT pg_advisory_lock(" . TRANSACTION_ENDS_1 . ");\n"
+	  . "SELECT pg_advisory_unlock_all();");
+
+my $script_deadlocks2 = $node->basedir . '/pgbench_script_deadlocks2';
+append_to_file($script_deadlocks2,
+		"BEGIN;\n"
+	  . "SELECT pg_advisory_lock(" . DEADLOCK_2 . ");\n"
+	  . "SELECT pg_advisory_lock(" . DEADLOCK_1 . ");\n"
+	  . "END;\n"
+	  . "SELECT pg_advisory_unlock_all();\n"
+	  . "SELECT pg_advisory_lock(" . TRANSACTION_ENDS_2 . ");\n"
+	  . "SELECT pg_advisory_unlock_all();");
+
+sub test_pgbench_serialization_errors
+{
+	my ($max_tries, $max_tries_time, $test_name) = @_;
+
+	my $isolation_level = REPEATABLE_READ;
+	my $isolation_level_shell = $isolation_level_shell[$isolation_level];
+
+	local $ENV{PGPORT} = $node->port;
+	local $ENV{PGOPTIONS} =
+		"-c default_transaction_isolation=" . $isolation_level_shell;
+	print "# PGOPTIONS: " . $ENV{PGOPTIONS} . "\n";
+
+	my ($h_psql, $in_psql, $out_psql);
+	my ($h_pgbench, $in_pgbench, $out_pgbench, $err_pgbench);
+
+	# Open a psql session, run a parallel transaction and aquire an advisory
+	# lock:
+	print "# Starting psql\n";
+	$h_psql = IPC::Run::start [ 'psql' ], \$in_psql, \$out_psql;
+
+	$in_psql = "begin;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /BEGIN/;
+
+	$in_psql =
+		"update xy set y = y + 1 "
+	  . "where x = 1 and pg_advisory_lock(0) is not null;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /UPDATE 1/;
+
+	my $retry_options =
+		($max_tries ? "--max-tries $max_tries" : "")
+	  . ($max_tries_time ? "--max-tries-time $max_tries_time" : "");
+
+	# Start pgbench:
+	my @command = (
+		qw(pgbench --no-vacuum --transactions 2 --debug fails --file),
+		$script_serialization,
+		split /\s+/, $retry_options);
+	print "# Running: " . join(" ", @command) . "\n";
+	$h_pgbench = IPC::Run::start \@command, \$in_pgbench, \$out_pgbench,
+	  \$err_pgbench;
+
+	# Wait until pgbench also tries to acquire the same advisory lock:
+	do
+	{
+		$in_psql =
+			"select * from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = 0::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /1 row/);
+
+	# In psql, commit the transaction, release advisory locks and end the
+	# session:
+	$in_psql = "end;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /COMMIT/;
+
+	$in_psql = "select pg_advisory_unlock_all();\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_all/;
+
+	$in_psql = "\\q\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() while length $in_psql;
+
+	$h_psql->finish();
+
+	# Get pgbench results
+	$h_pgbench->pump() until length $out_pgbench;
+	$h_pgbench->finish();
+
+	# On Windows, the exit status of the process is returned directly as the
+	# process's exit code, while on Unix, it's returned in the high bits
+	# of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+	# header file). IPC::Run's result function always returns exit code >> 8,
+	# assuming the Unix convention, which will always return 0 on Windows as
+	# long as the process was not terminated by an exception. To work around
+	# that, use $h->full_result on Windows instead.
+	my $result =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h_pgbench->full_results)[0]
+	  : $h_pgbench->result(0);
+
+	# Check pgbench results
+	ok(!$result, "@command exit code 0");
+
+	like($out_pgbench,
+		qr{processed: 1/2},
+		"$test_name: check processed transactions");
+
+	like($out_pgbench,
+		qr{number of errors: 1 \(50\.000%\)},
+		"$test_name: check errors");
+
+	like($out_pgbench,
+		qr{^((?!number of retried)(.|\n))*$},
+		"$test_name: check retried");
+
+	like($out_pgbench,
+		qr{latency average = \d+\.\d{3} ms \(including errors\)},
+		"$test_name: check latency average");
+
+	my $pattern =
+		"client 0 got a failure in command 3 \\(SQL\\) of script 0; "
+	  . "ERROR:  could not serialize access due to concurrent update";
+
+	like($err_pgbench,
+		qr{$pattern},
+		"$test_name: check serialization failure");
+}
+
+sub test_pgbench_serialization_failures
+{
+	my $isolation_level = REPEATABLE_READ;
+	my $isolation_level_shell = $isolation_level_shell[$isolation_level];
+
+	local $ENV{PGPORT} = $node->port;
+	local $ENV{PGOPTIONS} =
+		"-c default_transaction_isolation=" . $isolation_level_shell;
+	print "# PGOPTIONS: " . $ENV{PGOPTIONS} . "\n";
+
+	my ($h_psql, $in_psql, $out_psql);
+	my ($h_pgbench, $in_pgbench, $out_pgbench, $err_pgbench);
+
+	# Open a psql session, run a parallel transaction and aquire an advisory
+	# lock:
+	print "# Starting psql\n";
+	$h_psql = IPC::Run::start [ 'psql' ], \$in_psql, \$out_psql;
+
+	$in_psql = "begin;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /BEGIN/;
+
+	$in_psql =
+		"update xy set y = y + 1 "
+	  . "where x = 1 and pg_advisory_lock(0) is not null;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /UPDATE 1/;
+
+	# Start pgbench:
+	my @command = (
+		qw(pgbench --no-vacuum --transactions 1 --debug all --max-tries 2),
+		"--file",
+		$script_serialization);
+	print "# Running: " . join(" ", @command) . "\n";
+	$h_pgbench = IPC::Run::start \@command, \$in_pgbench, \$out_pgbench,
+	  \$err_pgbench;
+
+	# Wait until pgbench also tries to acquire the same advisory lock:
+	do
+	{
+		$in_psql =
+			"select * from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = 0::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /1 row/);
+
+	# In psql, commit the transaction, release advisory locks and end the
+	# session:
+	$in_psql = "end;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /COMMIT/;
+
+	$in_psql = "select pg_advisory_unlock_all();\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_all/;
+
+	$in_psql = "\\q\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() while length $in_psql;
+
+	$h_psql->finish();
+
+	# Get pgbench results
+	$h_pgbench->pump() until length $out_pgbench;
+	$h_pgbench->finish();
+
+	# On Windows, the exit status of the process is returned directly as the
+	# process's exit code, while on Unix, it's returned in the high bits
+	# of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+	# header file). IPC::Run's result function always returns exit code >> 8,
+	# assuming the Unix convention, which will always return 0 on Windows as
+	# long as the process was not terminated by an exception. To work around
+	# that, use $h->full_result on Windows instead.
+	my $result =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h_pgbench->full_results)[0]
+	  : $h_pgbench->result(0);
+
+	# Check pgbench results
+	ok(!$result, "@command exit code 0");
+
+	like($out_pgbench,
+		qr{processed: 1/1},
+		"concurrent update with retrying: check processed transactions");
+
+	like($out_pgbench,
+		qr{^((?!number of errors)(.|\n))*$},
+		"concurrent update with retrying: check errors");
+
+	like($out_pgbench,
+		qr{number of retried: 1 \(100\.000%\)},
+		"concurrent update with retrying: check retried");
+
+	like($out_pgbench,
+		qr{number of retries: 1},
+		"concurrent update with retrying: check retries");
+
+	like($out_pgbench,
+		qr{latency average = \d+\.\d{3} ms\n},
+		"concurrent update with retrying: check latency average");
+
+	my $pattern =
+		"client 0 sending UPDATE xy SET y = y \\+ (-?\\d+) "
+	  . "WHERE x = 1 AND pg_advisory_lock\\(0\\) IS NOT NULL;\n"
+	  . "(client 0 receiving\n)+"
+	  . "client 0 got a failure in command 3 \\(SQL\\) of script 0; "
+	  . "ERROR:  could not serialize access due to concurrent update\n\n"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g2+"
+	  . "client 0 continues a failed transaction in command 4 \\(SQL\\) of script 0; "
+	  . "ERROR:  current transaction is aborted, commands ignored until end of transaction block\n\n"
+	  . "client 0 sending END;\n"
+	  . "\\g2+"
+	  . "client 0 repeats the failed transaction \\(try 2/2\\)\n"
+	  . "client 0 executing \\\\set delta\n"
+	  . "client 0 sending BEGIN;\n"
+	  . "\\g2+"
+	  . "client 0 sending SELECT pg_sleep\\(1\\);\n"
+	  . "\\g2+"
+	  . "client 0 sending UPDATE xy SET y = y \\+ \\g1 "
+	  . "WHERE x = 1 AND pg_advisory_lock\\(0\\) IS NOT NULL;";
+
+	like($err_pgbench,
+		qr{$pattern},
+		"concurrent update with retrying: check the retried transaction");
+}
+
+sub test_pgbench_deadlock_errors
+{
+	my $isolation_level = READ_COMMITTED;
+	my $isolation_level_shell = $isolation_level_shell[$isolation_level];
+
+	local $ENV{PGPORT} = $node->port;
+	local $ENV{PGOPTIONS} =
+		"-c default_transaction_isolation=" . $isolation_level_shell;
+	print "# PGOPTIONS: " . $ENV{PGOPTIONS} . "\n";
+
+	my ($h_psql, $in_psql, $out_psql);
+	my ($h1, $in1, $out1, $err1);
+	my ($h2, $in2, $out2, $err2);
+
+	# Open a psql session and aquire an advisory lock:
+	print "# Starting psql\n";
+	$h_psql = IPC::Run::start [ 'psql' ], \$in_psql, \$out_psql;
+
+	$in_psql =
+		"select pg_advisory_lock(" . WAIT_PGBENCH_2 . ") "
+	  . "as pg_advisory_lock_" . WAIT_PGBENCH_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_lock_@{[ WAIT_PGBENCH_2 ]}/;
+
+	# Run the first pgbench:
+	my @command1 = (
+		qw(pgbench --no-vacuum --transactions 1 --debug fails --file),
+		$script_deadlocks1);
+	print "# Running: " . join(" ", @command1) . "\n";
+	$h1 = IPC::Run::start \@command1, \$in1, \$out1, \$err1;
+
+	# Wait until the first pgbench also tries to acquire the same advisory lock:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . WAIT_PGBENCH_2 . "_zero' "
+		  . "else '" . WAIT_PGBENCH_2 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . WAIT_PGBENCH_2
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ WAIT_PGBENCH_2 ]}_not_zero/);
+
+	# Run the second pgbench:
+	my @command2 = (
+		qw(pgbench --no-vacuum --transactions 1 --debug fails --file),
+		$script_deadlocks2);
+	print "# Running: " . join(" ", @command2) . "\n";
+	$h2 = IPC::Run::start \@command2, \$in2, \$out2, \$err2;
+
+	# Wait until the second pgbench tries to acquire the lock held by the first
+	# pgbench:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . DEADLOCK_1 . "_zero' "
+		  . "else '" . DEADLOCK_1 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . DEADLOCK_1
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ DEADLOCK_1 ]}_not_zero/);
+
+	# In the psql session, release the lock that the first pgbench is waiting
+	# for and end the session:
+	$in_psql =
+		"select pg_advisory_unlock(" . WAIT_PGBENCH_2 . ") "
+	  . "as pg_advisory_unlock_" . WAIT_PGBENCH_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_@{[ WAIT_PGBENCH_2 ]}/;
+
+	$in_psql = "\\q\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() while length $in_psql;
+
+	$h_psql->finish();
+
+	# Get results from all pgbenches:
+	$h1->pump() until length $out1;
+	$h1->finish();
+
+	$h2->pump() until length $out2;
+	$h2->finish();
+
+	# On Windows, the exit status of the process is returned directly as the
+	# process's exit code, while on Unix, it's returned in the high bits
+	# of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+	# header file). IPC::Run's result function always returns exit code >> 8,
+	# assuming the Unix convention, which will always return 0 on Windows as
+	# long as the process was not terminated by an exception. To work around
+	# that, use $h->full_result on Windows instead.
+	my $result1 =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h1->full_results)[0]
+	  : $h1->result(0);
+
+	my $result2 =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h2->full_results)[0]
+	  : $h2->result(0);
+
+	# Check all pgbench results
+	ok(!$result1, "@command1 exit code 0");
+	ok(!$result2, "@command2 exit code 0");
+
+	# The first or second pgbench should get a deadlock error
+	ok((($out1 =~ /processed: 0\/1/ and $out2 =~ /processed: 1\/1/) or
+		($out2 =~ /processed: 0\/1/ and $out1 =~ /processed: 1\/1/)),
+		"concurrent deadlock update: check processed transactions");
+
+	ok((($out1 =~ /number of errors: 1 \(100\.000%\)/ and
+		 $out2 =~ /^((?!number of errors)(.|\n))*$/) or
+		($out2 =~ /number of errors: 1 \(100\.000%\)/ and
+		 $out1 =~ /^((?!number of errors)(.|\n))*$/)),
+		"concurrent deadlock update: check errors");
+
+	ok(($err1 =~ /client 0 got a failure in command 3 \(SQL\) of script 0; ERROR:  deadlock detected/ or
+		$err2 =~ /client 0 got a failure in command 2 \(SQL\) of script 0; ERROR:  deadlock detected/),
+		"concurrent deadlock update: check deadlock failure");
+
+	# Both pgbenches do not have retried transactions
+	like($out1 . $out2,
+		qr{^((?!number of retried)(.|\n))*$},
+		"concurrent deadlock update: check retried");
+}
+
+sub test_pgbench_deadlock_failures
+{
+	my $isolation_level = READ_COMMITTED;
+	my $isolation_level_shell = $isolation_level_shell[$isolation_level];
+
+	local $ENV{PGPORT} = $node->port;
+	local $ENV{PGOPTIONS} =
+		"-c default_transaction_isolation=" . $isolation_level_shell;
+	print "# PGOPTIONS: " . $ENV{PGOPTIONS} . "\n";
+
+	my ($h_psql, $in_psql, $out_psql);
+	my ($h1, $in1, $out1, $err1);
+	my ($h2, $in2, $out2, $err2);
+
+	# Open a psql session and aquire an advisory lock:
+	print "# Starting psql\n";
+	$h_psql = IPC::Run::start [ 'psql' ], \$in_psql, \$out_psql;
+
+	$in_psql =
+		"select pg_advisory_lock(" . WAIT_PGBENCH_2 . ") "
+	  . "as pg_advisory_lock_" . WAIT_PGBENCH_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_lock_@{[ WAIT_PGBENCH_2 ]}/;
+
+	# Run the first pgbench:
+	my @command1 = (
+		qw(pgbench --no-vacuum --transactions 1 --debug all --max-tries 2),
+		"--file",
+		$script_deadlocks1);
+	print "# Running: " . join(" ", @command1) . "\n";
+	$h1 = IPC::Run::start \@command1, \$in1, \$out1, \$err1;
+
+	# Wait until the first pgbench also tries to acquire the same advisory lock:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . WAIT_PGBENCH_2 . "_zero' "
+		  . "else '" . WAIT_PGBENCH_2 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . WAIT_PGBENCH_2
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ WAIT_PGBENCH_2 ]}_not_zero/);
+
+	# Run the second pgbench:
+	my @command2 = (
+		qw(pgbench --no-vacuum --transactions 1 --debug all --max-tries 2),
+		"--file",
+		$script_deadlocks2);
+	print "# Running: " . join(" ", @command2) . "\n";
+	$h2 = IPC::Run::start \@command2, \$in2, \$out2, \$err2;
+
+	# Wait until the second pgbench tries to acquire the lock held by the first
+	# pgbench:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . DEADLOCK_1 . "_zero' "
+		  . "else '" . DEADLOCK_1 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . DEADLOCK_1
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ DEADLOCK_1 ]}_not_zero/);
+
+	# In the psql session, acquire the locks that pgbenches will wait for:
+	$in_psql =
+		"select pg_advisory_lock(" . TRANSACTION_ENDS_1 . ") "
+	  . "as pg_advisory_lock_" . TRANSACTION_ENDS_1 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_lock_@{[ TRANSACTION_ENDS_1 ]}/;
+
+	$in_psql =
+		"select pg_advisory_lock(" . TRANSACTION_ENDS_2 . ") "
+	  . "as pg_advisory_lock_" . TRANSACTION_ENDS_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_lock_@{[ TRANSACTION_ENDS_2 ]}/;
+
+	# In the psql session, release the lock that the first pgbench is waiting
+	# for:
+	$in_psql =
+		"select pg_advisory_unlock(" . WAIT_PGBENCH_2 . ") "
+	  . "as pg_advisory_unlock_" . WAIT_PGBENCH_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_@{[ WAIT_PGBENCH_2 ]}/;
+
+	# Wait until pgbenches try to acquire the locks held by the psql session:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . TRANSACTION_ENDS_1 . "_zero' "
+		  . "else '" . TRANSACTION_ENDS_1 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . TRANSACTION_ENDS_1
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ TRANSACTION_ENDS_1 ]}_not_zero/);
+
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . TRANSACTION_ENDS_2 . "_zero' "
+		  . "else '" . TRANSACTION_ENDS_2 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . TRANSACTION_ENDS_2
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ TRANSACTION_ENDS_2 ]}_not_zero/);
+
+	# In the psql session, release advisory locks and end the session:
+	$in_psql = "select pg_advisory_unlock_all() as pg_advisory_unlock_all;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_all/;
+
+	$in_psql = "\\q\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() while length $in_psql;
+
+	$h_psql->finish();
+
+	# Get results from all pgbenches:
+	$h1->pump() until length $out1;
+	$h1->finish();
+
+	$h2->pump() until length $out2;
+	$h2->finish();
+
+	# On Windows, the exit status of the process is returned directly as the
+	# process's exit code, while on Unix, it's returned in the high bits
+	# of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+	# header file). IPC::Run's result function always returns exit code >> 8,
+	# assuming the Unix convention, which will always return 0 on Windows as
+	# long as the process was not terminated by an exception. To work around
+	# that, use $h->full_result on Windows instead.
+	my $result1 =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h1->full_results)[0]
+	  : $h1->result(0);
+
+	my $result2 =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h2->full_results)[0]
+	  : $h2->result(0);
+
+	# Check all pgbench results
+	ok(!$result1, "@command1 exit code 0");
+	ok(!$result2, "@command2 exit code 0");
+
+	like($out1,
+		qr{processed: 1/1},
+		"concurrent deadlock update with retrying: pgbench 1: "
+	  . "check processed transactions");
+	like($out2,
+		qr{processed: 1/1},
+		"concurrent deadlock update with retrying: pgbench 2: "
+	  . "check processed transactions");
+
+	# The first or second pgbench should get a deadlock error which was retried:
+	like($out1 . $out2,
+		qr{^((?!number of errors)(.|\n))*$},
+		"concurrent deadlock update with retrying: check errors");
+
+	ok((($out1 =~ /number of retried: 1 \(100\.000%\)/ and
+		 $out2 =~ /^((?!number of retried)(.|\n))*$/) or
+		($out2 =~ /number of retried: 1 \(100\.000%\)/ and
+		 $out1 =~ /^((?!number of retried)(.|\n))*$/)),
+		"concurrent deadlock update with retrying: check retries");
+
+	my $pattern1 =
+		"client 0 sending BEGIN;\n"
+	  . "(client 0 receiving\n)+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . WAIT_PGBENCH_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 got a failure in command 3 \\(SQL\\) of script 0; "
+	  . "ERROR:  deadlock detected\n"
+	  . "((?!client 0)(.|\n))*"
+	  . "client 0 sending END;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . TRANSACTION_ENDS_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 repeats the failed transaction \\(try 2/2\\)\n"
+	  . "client 0 sending BEGIN;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . WAIT_PGBENCH_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending END;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . TRANSACTION_ENDS_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+";
+
+	my $pattern2 =
+		"client 0 sending BEGIN;\n"
+	  . "(client 0 receiving\n)+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 got a failure in command 2 \\(SQL\\) of script 0; "
+	  . "ERROR:  deadlock detected\n"
+	  . "((?!client 0)(.|\n))*"
+	  . "client 0 sending END;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . TRANSACTION_ENDS_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 repeats the failed transaction \\(try 2/2\\)\n"
+	  . "client 0 sending BEGIN;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending END;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . TRANSACTION_ENDS_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+";
+
+	ok(($err1 =~ /$pattern1/ or $err2 =~ /$pattern2/),
+		"concurrent deadlock update with retrying: "
+	  . "check the retried transaction");
+}
+
+test_pgbench_serialization_errors(
+								1,      # --max-tries
+								0,      # --max-tries-time (will not be used)
+								"concurrent update");
+test_pgbench_serialization_errors(
+								0,	    # --max-tries (will not be used)
+								900,    # --max-tries-time
+								"concurrent update with maximum time of tries");
+
+test_pgbench_serialization_failures();
+
+test_pgbench_deadlock_errors();
+test_pgbench_deadlock_failures();
+
+#done
+$node->stop;
-- 
2.7.4

