"Some tests to cover hash_index"
I am attaching the patch to improve some coverage of hash index code [1]Concurrent Hash Index. <https://commitfest.postgresql.org/10/647/>.
I have added some basic tests, which mainly covers overflow pages. It took
2 sec extra time in my machine in parallel schedule.
Hit Total Coverage
old tests Line Coverage 780 1478 52.7
Function Coverage 63 85 74.1
improvement after tests Line Coverage 1181 1478 79.9 %
Function Coverage 78 85 91.8 %
[1]: Concurrent Hash Index. <https://commitfest.postgresql.org/10/647/>
--
Thanks and Regards
Mithun C Y
EnterpriseDB: http://www.enterprisedb.com
Attachments:
commit-hash_coverage_testapplication/octet-stream; name=commit-hash_coverage_testDownload
From 12d2cef59da090a28a9a6e6162a8e38cb4a18875 Mon Sep 17 00:00:00 2001
From: "mithun.cy" <mithun.cy@enterprisedb.com>
Date: Thu, 4 Aug 2016 08:12:08 +0530
Subject: 1
diff --git a/src/test/regress/expected/concurrent_hash_index.out b/src/test/regress/expected/concurrent_hash_index.out
new file mode 100644
index 0000000..c3b8036
--- /dev/null
+++ b/src/test/regress/expected/concurrent_hash_index.out
@@ -0,0 +1,79 @@
+--
+-- Cause some overflow insert and splits.
+--
+CREATE TABLE con_hash_index_table (keycol INT);
+CREATE INDEX con_hash_index on con_hash_index_table USING HASH (keycol);
+WARNING: hash indexes are not WAL-logged and their use is discouraged
+INSERT INTO con_hash_index_table VALUES (1);
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table LIMIT 32000;
+VACUUM FULL con_hash_index_table;
+--
+-- Lets do a backward scan.
+--
+BEGIN;
+SET enable_seqscan = OFF;
+SET enable_bitmapscan = OFF;
+CREATE FUNCTION declares_cursor(int)
+ RETURNS void
+ AS 'DECLARE c CURSOR FOR SELECT * from con_hash_index_table WHERE keycol = $1;'
+LANGUAGE SQL;
+SELECT declares_cursor(1);
+ declares_cursor
+-----------------
+
+(1 row)
+
+MOVE FORWARD ALL FROM c;
+MOVE BACKWARD 10000 FROM c;
+MOVE BACKWARD ALL FROM c;
+ROLLBACK;
+--
+-- DELETE, INSERT, REBUILD INDEX.
+--
+DELETE FROM con_hash_index_table WHERE keycol = 1;
+INSERT INTO con_hash_index_table VALUES (1), (2);
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+VACUUM con_hash_index_table;
+REINDEX INDEX con_hash_index;
+--
+-- Clean Up.
+--
+DROP TABLE con_hash_index_table;
+--
+-- INDEX ON TEMP TABLE.
+--
+CREATE TEMP TABLE hash_ovfl_temp_heap (x int, y int);
+INSERT INTO hash_ovfl_temp_heap VALUES (1,1);
+CREATE INDEX hash_idx ON hash_ovfl_temp_heap USING hash (x);
+DROP TABLE hash_ovfl_temp_heap CASCADE;
+--
+-- Float4 type.
+--
+CREATE TABLE hash_ovfl_heap_float4 (x float4, y int);
+INSERT INTO hash_ovfl_heap_float4 VALUES (1.1,1);
+CREATE INDEX hash_idx ON hash_ovfl_heap_float4 USING hash (x);
+WARNING: hash indexes are not WAL-logged and their use is discouraged
+DROP TABLE hash_ovfl_heap_float4 CASCADE;
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index 4ebad04..b7cf14a 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -79,7 +79,7 @@ ignore: random
# ----------
# Another group of parallel tests
# ----------
-test: select_into select_distinct select_distinct_on select_implicit select_having subselect union case join aggregates transactions random portals arrays btree_index hash_index update namespace prepared_xacts delete
+test: select_into select_distinct select_distinct_on select_implicit select_having subselect union case join aggregates transactions random portals arrays btree_index hash_index concurrent_hash_index update namespace prepared_xacts delete
# ----------
# Another group of parallel tests
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 5c7038d..f491423 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -96,6 +96,7 @@ test: portals
test: arrays
test: btree_index
test: hash_index
+test: concurrent_hash_index
test: update
test: delete
test: namespace
diff --git a/src/test/regress/sql/concurrent_hash_index.sql b/src/test/regress/sql/concurrent_hash_index.sql
new file mode 100644
index 0000000..8f930d5
--- /dev/null
+++ b/src/test/regress/sql/concurrent_hash_index.sql
@@ -0,0 +1,80 @@
+--
+-- Cause some overflow insert and splits.
+--
+CREATE TABLE con_hash_index_table (keycol INT);
+CREATE INDEX con_hash_index on con_hash_index_table USING HASH (keycol);
+INSERT INTO con_hash_index_table VALUES (1);
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table LIMIT 32000;
+
+VACUUM FULL con_hash_index_table;
+
+--
+-- Lets do a backward scan.
+--
+BEGIN;
+SET enable_seqscan = OFF;
+SET enable_bitmapscan = OFF;
+CREATE FUNCTION declares_cursor(int)
+ RETURNS void
+ AS 'DECLARE c CURSOR FOR SELECT * from con_hash_index_table WHERE keycol = $1;'
+LANGUAGE SQL;
+
+SELECT declares_cursor(1);
+MOVE FORWARD ALL FROM c;
+MOVE BACKWARD 10000 FROM c;
+MOVE BACKWARD ALL FROM c;
+ROLLBACK;
+
+--
+-- DELETE, INSERT, REBUILD INDEX.
+--
+DELETE FROM con_hash_index_table WHERE keycol = 1;
+INSERT INTO con_hash_index_table VALUES (1), (2);
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
+
+VACUUM con_hash_index_table;
+REINDEX INDEX con_hash_index;
+
+--
+-- Clean Up.
+--
+DROP TABLE con_hash_index_table;
+
+--
+-- INDEX ON TEMP TABLE.
+--
+CREATE TEMP TABLE hash_ovfl_temp_heap (x int, y int);
+INSERT INTO hash_ovfl_temp_heap VALUES (1,1);
+CREATE INDEX hash_idx ON hash_ovfl_temp_heap USING hash (x);
+DROP TABLE hash_ovfl_temp_heap CASCADE;
+
+--
+-- Float4 type.
+--
+CREATE TABLE hash_ovfl_heap_float4 (x float4, y int);
+INSERT INTO hash_ovfl_heap_float4 VALUES (1.1,1);
+CREATE INDEX hash_idx ON hash_ovfl_heap_float4 USING hash (x);
+DROP TABLE hash_ovfl_heap_float4 CASCADE;
On Thu, Aug 4, 2016 at 7:24 PM, Mithun Cy <mithun.cy@enterprisedb.com>
wrote:
I am attaching the patch to improve some coverage of hash index code [1].
I have added some basic tests, which mainly covers overflow pages. It took
2 sec extra time in my machine in parallel schedule.Hit Total Coverage
old tests Line Coverage 780 1478 52.7Function Coverage 63 85 74.1
improvement after tests Line Coverage 1181 1478 79.9 %Function Coverage 78 85 91.8 %
I think the code coverage improvement for hash index with these tests seems
to be quite good, however time for tests seems to be slightly on higher
side. Do anybody have better suggestion for these tests?
diff --git a/src/test/regress/sql/concurrent_hash_index.sql
b/src/test/regress/sql/concurrent_hash_index.sql
I wonder why you have included a new file for these tests, why can't be
these added to existing hash_index.sql.
+--
+-- Cause some overflow insert and splits.
+--
+CREATE TABLE con_hash_index_table (keycol INT);
+CREATE INDEX con_hash_index on con_hash_index_table USING HASH (keycol);
The relation name con_hash_index* choosen in above tests doesn't seem to be
appropriate, how about hash_split_heap* or something like that.
Register your patch in latest CF (https://commitfest.postgresql.org/10/)
--
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com
Hi All,
I have reverified the code coverage for hash index code using the test file
(commit-hash_coverage_test) attached with this mailing list and have found
that some of the code in _hash_squeezebucket() function flow is not being
covered. For this i have added a small testcase on top of 'commit
hash_coverage_test' patch. I have done this mainly to test Amit's WAL for
hash index patch [1]/messages/by-id/CAA4eK1JOBX=YU33631Qh-XivYXtPSALh514+jR8XeD7v+K3r_Q@mail.gmail.com.
I have also removed the warning message that we used to get for hash index
like 'WARNING: hash indexes are not WAL-logged and their use is
discouraged' as this message is now no more visible w.r.t hash index after
the WAL patch for hash index. Please have a look and let me know your
thoughts.
[1]: /messages/by-id/CAA4eK1JOBX=YU33631Qh-XivYXtPSALh514+jR8XeD7v+K3r_Q@mail.gmail.com
/messages/by-id/CAA4eK1JOBX=YU33631Qh-XivYXtPSALh514+jR8XeD7v+K3r_Q@mail.gmail.com
With Regards,
Ashutosh Sharma
EnterpriseDB:http://www.enterprisedb.com
On Sat, Aug 6, 2016 at 9:41 AM, Amit Kapila <amit.kapila16@gmail.com> wrote:
Show quoted text
On Thu, Aug 4, 2016 at 7:24 PM, Mithun Cy <mithun.cy@enterprisedb.com>
wrote:I am attaching the patch to improve some coverage of hash index code [1].
I have added some basic tests, which mainly covers overflow pages. It
took 2 sec extra time in my machine in parallel schedule.Hit Total Coverage
old tests Line Coverage 780 1478 52.7Function Coverage 63 85 74.1
improvement after tests Line Coverage 1181 1478 79.9 %Function Coverage 78 85 91.8 %
I think the code coverage improvement for hash index with these tests
seems to be quite good, however time for tests seems to be slightly on
higher side. Do anybody have better suggestion for these tests?diff --git a/src/test/regress/sql/concurrent_hash_index.sql b/src/test/regress/sql/concurrent_hash_index.sql I wonder why you have included a new file for these tests, why can't be these added to existing hash_index.sql.+-- +-- Cause some overflow insert and splits. +-- +CREATE TABLE con_hash_index_table (keycol INT); +CREATE INDEX con_hash_index on con_hash_index_table USING HASH (keycol);The relation name con_hash_index* choosen in above tests doesn't seem to
be appropriate, how about hash_split_heap* or something like that.Register your patch in latest CF (https://commitfest.postgresql.org/10/)
--
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com
Hi,
I missed to attach the patch in my previous mail. Here i attach the patch.
With Regards,
Ashutosh Sharma
EnterpriseDB:http://www.enterprisedb.com
On Tue, Aug 23, 2016 at 11:47 AM, Ashutosh Sharma <ashu.coek88@gmail.com>
wrote:
Show quoted text
Hi All,
I have reverified the code coverage for hash index code using the test
file (commit-hash_coverage_test) attached with this mailing list and have
found that some of the code in _hash_squeezebucket() function flow is not
being covered. For this i have added a small testcase on top of 'commit
hash_coverage_test' patch. I have done this mainly to test Amit's WAL for
hash index patch [1].I have also removed the warning message that we used to get for hash index
like 'WARNING: hash indexes are not WAL-logged and their use is
discouraged' as this message is now no more visible w.r.t hash index after
the WAL patch for hash index. Please have a look and let me know your
thoughts.[1] - /messages/by-id/CAA4eK1JOBX%25
3DYU33631Qh-XivYXtPSALh514%2BjR8XeD7v%2BK3r_Q%40mail.gmail.comWith Regards,
Ashutosh Sharma
EnterpriseDB:http://www.enterprisedb.comOn Sat, Aug 6, 2016 at 9:41 AM, Amit Kapila <amit.kapila16@gmail.com>
wrote:On Thu, Aug 4, 2016 at 7:24 PM, Mithun Cy <mithun.cy@enterprisedb.com>
wrote:I am attaching the patch to improve some coverage of hash index code [1].
I have added some basic tests, which mainly covers overflow pages. It
took 2 sec extra time in my machine in parallel schedule.Hit Total Coverage
old tests Line Coverage 780 1478 52.7Function Coverage 63 85 74.1
improvement after tests Line Coverage 1181 1478 79.9 %Function Coverage 78 85 91.8 %
I think the code coverage improvement for hash index with these tests
seems to be quite good, however time for tests seems to be slightly on
higher side. Do anybody have better suggestion for these tests?diff --git a/src/test/regress/sql/concurrent_hash_index.sql b/src/test/regress/sql/concurrent_hash_index.sql I wonder why you have included a new file for these tests, why can't be these added to existing hash_index.sql.+-- +-- Cause some overflow insert and splits. +-- +CREATE TABLE con_hash_index_table (keycol INT); +CREATE INDEX con_hash_index on con_hash_index_table USING HASH (keycol);The relation name con_hash_index* choosen in above tests doesn't seem to
be appropriate, how about hash_split_heap* or something like that.Register your patch in latest CF (https://commitfest.postgresql.org/10/)
--
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com
Attachments:
commit-hash_coverage_test_v2.patchtext/x-patch; charset=US-ASCII; name=commit-hash_coverage_test_v2.patchDownload
diff --git a/src/test/regress/expected/concurrent_hash_index.out b/src/test/regress/expected/concurrent_hash_index.out
index c3b8036..60191c0 100644
--- a/src/test/regress/expected/concurrent_hash_index.out
+++ b/src/test/regress/expected/concurrent_hash_index.out
@@ -3,7 +3,6 @@
--
CREATE TABLE con_hash_index_table (keycol INT);
CREATE INDEX con_hash_index on con_hash_index_table USING HASH (keycol);
-WARNING: hash indexes are not WAL-logged and their use is discouraged
INSERT INTO con_hash_index_table VALUES (1);
INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
INSERT INTO con_hash_index_table SELECT * from con_hash_index_table;
@@ -75,5 +74,15 @@ DROP TABLE hash_ovfl_temp_heap CASCADE;
CREATE TABLE hash_ovfl_heap_float4 (x float4, y int);
INSERT INTO hash_ovfl_heap_float4 VALUES (1.1,1);
CREATE INDEX hash_idx ON hash_ovfl_heap_float4 USING hash (x);
-WARNING: hash indexes are not WAL-logged and their use is discouraged
DROP TABLE hash_ovfl_heap_float4 CASCADE;
+--
+-- Test hash index insertion with squeeze bucket (XLOG_HASH_MOVE_PAGE_CONTENTS
+-- WAL record type).
+--
+CREATE TABLE hash_split_buckets (seqno int4, random int4);
+CREATE INDEX hash_idx ON hash_split_buckets USING hash (random int4_ops)
+with (fillfactor = 10);
+INSERT INTO hash_split_buckets (seqno, random) SELECT a, a*5 FROM
+GENERATE_SERIES(1, 100000) a;
+REINDEX INDEX hash_idx;
+DROP TABLE hash_split_buckets;
diff --git a/src/test/regress/sql/concurrent_hash_index.sql b/src/test/regress/sql/concurrent_hash_index.sql
index 8f930d5..d3b09d0 100644
--- a/src/test/regress/sql/concurrent_hash_index.sql
+++ b/src/test/regress/sql/concurrent_hash_index.sql
@@ -78,3 +78,15 @@ CREATE TABLE hash_ovfl_heap_float4 (x float4, y int);
INSERT INTO hash_ovfl_heap_float4 VALUES (1.1,1);
CREATE INDEX hash_idx ON hash_ovfl_heap_float4 USING hash (x);
DROP TABLE hash_ovfl_heap_float4 CASCADE;
+
+--
+-- Test hash index insertion with squeeze bucket (XLOG_HASH_MOVE_PAGE_CONTENTS
+-- WAL record type).
+--
+CREATE TABLE hash_split_buckets (seqno int4, random int4);
+CREATE INDEX hash_idx ON hash_split_buckets USING hash (random int4_ops)
+with (fillfactor = 10);
+INSERT INTO hash_split_buckets (seqno, random) SELECT a, a*5 FROM
+GENERATE_SERIES(1, 100000) a;
+REINDEX INDEX hash_idx;
+DROP TABLE hash_split_buckets;
On Tue, Aug 23, 2016 at 2:17 AM, Ashutosh Sharma <ashu.coek88@gmail.com> wrote:
I have also removed the warning message that we used to get for hash index like 'WARNING: hash indexes are not WAL-logged and their use is discouraged' as this message is now no more visible w.r.t hash index after the WAL patch for hash index. Please have a look and let me know your thoughts.
Well, that change should be part of Amit's patch to add WAL logging,
not this patch, whose mission is just to improve test coverage.
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
Hi,
Well, that change should be part of Amit's patch to add WAL logging,
not this patch, whose mission is just to improve test coverage.
I have just removed the warning message from expected output file as i
have performed the testing on Amit's latest patch that removes this
warning message from the hash index code.
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Wed, Aug 24, 2016 at 11:38 AM, Ashutosh Sharma <ashu.coek88@gmail.com> wrote:
Hi,
Well, that change should be part of Amit's patch to add WAL logging,
not this patch, whose mission is just to improve test coverage.I have just removed the warning message from expected output file as i
have performed the testing on Amit's latest patch that removes this
warning message from the hash index code.
I think you are missing what Robert wants to point out. You don't
need to remove the warning message when you are adding new test cases
on top of Mithun's patch even if those new tests helps to cover the
code path which is required for wal-hash-index patch.
--
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Wed, Aug 24, 2016 at 2:34 AM, Amit Kapila <amit.kapila16@gmail.com> wrote:
On Wed, Aug 24, 2016 at 11:38 AM, Ashutosh Sharma <ashu.coek88@gmail.com> wrote:
Well, that change should be part of Amit's patch to add WAL logging,
not this patch, whose mission is just to improve test coverage.I have just removed the warning message from expected output file as i
have performed the testing on Amit's latest patch that removes this
warning message from the hash index code.I think you are missing what Robert wants to point out. You don't
need to remove the warning message when you are adding new test cases
on top of Mithun's patch even if those new tests helps to cover the
code path which is required for wal-hash-index patch.
Right. The point is, if somebody applies this patch on top of master,
the regression tests will now fail because of that missing line. That
means nobody is going to commit this.
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Sat, Aug 6, 2016 at 9:41 AM, Amit Kapila <amit.kapila16@gmail.com> wrote:
I wonder why you have included a new file for these tests, why can't be
these added to existing hash_index.sql.
tests in hash_index.sql did not cover overflow pages, above tests were for
mainly for them. So I thought having a separate test file can help
enabling/disabling them in schedule files, when we do not want them running
as it take slightly high time. If you think otherwise I will reconsider
will add tests to hash_index.sql.
The relation name con_hash_index* choosen in above tests doesn't seem to
be appropriate, how about hash_split_heap* or something like that.
Fixed. Have renamed relation, index and test filename accordingly.
--
Thanks and Regards
Mithun C Y
EnterpriseDB: http://www.enterprisedb.com
Attachments:
commit-hash_coverage_test_v2_no_wal.patchapplication/octet-stream; name=commit-hash_coverage_test_v2_no_wal.patchDownload
commit 5df7e726d1b78ac4f5be3bcc0a6e25cd05a3d26e
Author: mithun <mithun@localhost.localdomain>
Date: Mon Sep 19 14:03:26 2016 +0530
commit-1
diff --git a/src/test/regress/expected/hash_index_split.out b/src/test/regress/expected/hash_index_split.out
new file mode 100644
index 0000000..f3d25d1
--- /dev/null
+++ b/src/test/regress/expected/hash_index_split.out
@@ -0,0 +1,79 @@
+--
+-- Cause some overflow insert and splits.
+--
+CREATE TABLE hash_split_heap (keycol INT);
+CREATE INDEX hash_split_index on hash_split_heap USING HASH (keycol);
+WARNING: hash indexes are not WAL-logged and their use is discouraged
+INSERT INTO hash_split_heap VALUES (1);
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap LIMIT 32000;
+VACUUM FULL hash_split_heap;
+--
+-- Lets do a backward scan.
+--
+BEGIN;
+SET enable_seqscan = OFF;
+SET enable_bitmapscan = OFF;
+CREATE FUNCTION declares_cursor(int)
+ RETURNS void
+ AS 'DECLARE c CURSOR FOR SELECT * from hash_split_heap WHERE keycol = $1;'
+LANGUAGE SQL;
+SELECT declares_cursor(1);
+ declares_cursor
+-----------------
+
+(1 row)
+
+MOVE FORWARD ALL FROM c;
+MOVE BACKWARD 10000 FROM c;
+MOVE BACKWARD ALL FROM c;
+ROLLBACK;
+--
+-- DELETE, INSERT, REBUILD INDEX.
+--
+DELETE FROM hash_split_heap WHERE keycol = 1;
+INSERT INTO hash_split_heap VALUES (1), (2);
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+VACUUM hash_split_heap;
+REINDEX INDEX hash_split_index;
+--
+-- Clean Up.
+--
+DROP TABLE hash_split_heap;
+--
+-- INDEX ON TEMP TABLE.
+--
+CREATE TEMP TABLE hash_temp_heap (x int, y int);
+INSERT INTO hash_temp_heap VALUES (1,1);
+CREATE INDEX hash_idx ON hash_temp_heap USING hash (x);
+DROP TABLE hash_temp_heap CASCADE;
+--
+-- Float4 type.
+--
+CREATE TABLE hash_heap_float4 (x float4, y int);
+INSERT INTO hash_heap_float4 VALUES (1.1,1);
+CREATE INDEX hash_idx ON hash_heap_float4 USING hash (x);
+WARNING: hash indexes are not WAL-logged and their use is discouraged
+DROP TABLE hash_heap_float4 CASCADE;
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index 1cb5dfc..ada6d12 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -79,7 +79,7 @@ ignore: random
# ----------
# Another group of parallel tests
# ----------
-test: select_into select_distinct select_distinct_on select_implicit select_having subselect union case join aggregates transactions random portals arrays btree_index hash_index update namespace prepared_xacts delete
+test: select_into select_distinct select_distinct_on select_implicit select_having subselect union case join aggregates transactions random portals arrays btree_index hash_index hash_index_split update namespace prepared_xacts delete
# ----------
# Another group of parallel tests
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 8958d8c..37add6b 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -96,6 +96,7 @@ test: portals
test: arrays
test: btree_index
test: hash_index
+test: hash_index_split
test: update
test: delete
test: namespace
diff --git a/src/test/regress/sql/hash_index_split.sql b/src/test/regress/sql/hash_index_split.sql
new file mode 100644
index 0000000..af36df0
--- /dev/null
+++ b/src/test/regress/sql/hash_index_split.sql
@@ -0,0 +1,80 @@
+--
+-- Cause some overflow insert and splits.
+--
+CREATE TABLE hash_split_heap (keycol INT);
+CREATE INDEX hash_split_index on hash_split_heap USING HASH (keycol);
+INSERT INTO hash_split_heap VALUES (1);
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap LIMIT 32000;
+
+VACUUM FULL hash_split_heap;
+
+--
+-- Lets do a backward scan.
+--
+BEGIN;
+SET enable_seqscan = OFF;
+SET enable_bitmapscan = OFF;
+CREATE FUNCTION declares_cursor(int)
+ RETURNS void
+ AS 'DECLARE c CURSOR FOR SELECT * from hash_split_heap WHERE keycol = $1;'
+LANGUAGE SQL;
+
+SELECT declares_cursor(1);
+MOVE FORWARD ALL FROM c;
+MOVE BACKWARD 10000 FROM c;
+MOVE BACKWARD ALL FROM c;
+ROLLBACK;
+
+--
+-- DELETE, INSERT, REBUILD INDEX.
+--
+DELETE FROM hash_split_heap WHERE keycol = 1;
+INSERT INTO hash_split_heap VALUES (1), (2);
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+INSERT INTO hash_split_heap SELECT * from hash_split_heap;
+
+VACUUM hash_split_heap;
+REINDEX INDEX hash_split_index;
+
+--
+-- Clean Up.
+--
+DROP TABLE hash_split_heap;
+
+--
+-- INDEX ON TEMP TABLE.
+--
+CREATE TEMP TABLE hash_temp_heap (x int, y int);
+INSERT INTO hash_temp_heap VALUES (1,1);
+CREATE INDEX hash_idx ON hash_temp_heap USING hash (x);
+DROP TABLE hash_temp_heap CASCADE;
+
+--
+-- Float4 type.
+--
+CREATE TABLE hash_heap_float4 (x float4, y int);
+INSERT INTO hash_heap_float4 VALUES (1.1,1);
+CREATE INDEX hash_idx ON hash_heap_float4 USING hash (x);
+DROP TABLE hash_heap_float4 CASCADE;
On Mon, Sep 19, 2016 at 8:44 PM, Mithun Cy <mithun.cy@enterprisedb.com> wrote:
On Sat, Aug 6, 2016 at 9:41 AM, Amit Kapila <amit.kapila16@gmail.com> wrote:
I wonder why you have included a new file for these tests, why can't be
these added to existing hash_index.sql.tests in hash_index.sql did not cover overflow pages, above tests were for
mainly for them.
The name of file hash_index_split suggests it focus on split. Are the
tests focussed more on overflow pages or on split of hash index?
So I thought having a separate test file can help
enabling/disabling them in schedule files, when we do not want them running
as it take slightly high time. If you think otherwise I will reconsider will
add tests to hash_index.sql.
I think you have a point, but not sure if it is worth to add a
separate file. It might be tricky to choose which file to add new
tests for hash_indexes. Anybody else have opinion on this point?
Can you check how much time it takes as compare to btree or brin index tests?
I am facing below diff with your new patch.
***************
*** 1,4 ****
! --
-- Cause some overflow insert and splits.
--
CREATE TABLE hash_split_heap (keycol INT);
--- 1,4 ----
! --
-- Cause some overflow insert and splits.
--
CREATE TABLE hash_split_heap (keycol INT);
======================================================================
There is an extra space in expected file which is leading to above failure.
--
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Tue, Sep 20, 2016 at 8:52 AM, Amit Kapila <amit.kapila16@gmail.com> wrote:
I think you have a point, but not sure if it is worth to add a
separate file. It might be tricky to choose which file to add new
tests for hash_indexes. Anybody else have opinion on this point?
I think all the tests should be added to hash_index.sql. A new file
doesn't seem warranted.
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
Why not use generate_series() queries to insert the appropriate number
of tuples, instead of a handful of INSERT lines each time? Since each
insert is a separate transaction, that would probably be faster.
Why do you have a plpgsql function just to create a cursor? Wouldn't it
be simpler to create the cursor in an SQL statement?
--
�lvaro Herrera https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Tue, Sep 20, 2016 at 2:26 PM, Alvaro Herrera
<alvherre@2ndquadrant.com> wrote:
Why not use generate_series() queries to insert the appropriate number
of tuples, instead of a handful of INSERT lines each time? Since each
insert is a separate transaction, that would probably be faster.Why do you have a plpgsql function just to create a cursor? Wouldn't it
be simpler to create the cursor in an SQL statement?
This patch hasn't been updated in over a week, so I'm marking it
Returned with Feedback. I think this is a good effort and I hope
something committable will come from it, but with 2 days left it's not
going to happen this CF.
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Wed, Sep 28, 2016 at 9:56 PM, Robert Haas <robertmhaas@gmail.com> wrote:
something committable will come from it, but with 2 days left it's not
going to happen this CF.
Adding a new patch. This one uses generate series instead of INSERT INTO
SELECT and fixed comments from Alvaro.
--
Thanks and Regards
Mithun C Y
EnterpriseDB: http://www.enterprisedb.com
Attachments:
commit-hash_coverage_test_v3_no_wal.patchapplication/octet-stream; name=commit-hash_coverage_test_v3_no_wal.patchDownload
diff --git a/src/test/regress/expected/hash_index.out b/src/test/regress/expected/hash_index.out
index 22835f8..a755988 100644
--- a/src/test/regress/expected/hash_index.out
+++ b/src/test/regress/expected/hash_index.out
@@ -196,3 +196,49 @@ SELECT h.seqno AS f20000
-- WHERE x = 90;
-- SELECT count(*) AS i988 FROM hash_ovfl_heap
-- WHERE x = 1000;
+--
+-- Cause some overflow insert and splits.
+--
+CREATE TABLE hash_split_heap (keycol INT);
+CREATE INDEX hash_split_index on hash_split_heap USING HASH (keycol);
+WARNING: hash indexes are not WAL-logged and their use is discouraged
+INSERT INTO hash_split_heap SELECT 1 FROM generate_series(1, 70000) a;
+VACUUM FULL hash_split_heap;
+--
+-- Lets do a backward scan.
+--
+BEGIN;
+SET enable_seqscan = OFF;
+SET enable_bitmapscan = OFF;
+DECLARE c CURSOR FOR SELECT * from hash_split_heap WHERE keycol = 1;
+MOVE FORWARD ALL FROM c;
+MOVE BACKWARD 10000 FROM c;
+MOVE BACKWARD ALL FROM c;
+CLOSE c;
+END;
+--
+-- DELETE, INSERT, REBUILD INDEX.
+--
+DELETE FROM hash_split_heap WHERE keycol = 1;
+INSERT INTO hash_split_heap SELECT a/2 FROM generate_series(1, 50000) a;
+VACUUM hash_split_heap;
+REINDEX INDEX hash_split_index;
+--
+-- Clean Up.
+--
+DROP TABLE hash_split_heap;
+--
+-- INDEX ON TEMP TABLE.
+--
+CREATE TEMP TABLE hash_temp_heap (x int, y int);
+INSERT INTO hash_temp_heap VALUES (1,1);
+CREATE INDEX hash_idx ON hash_temp_heap USING hash (x);
+DROP TABLE hash_temp_heap CASCADE;
+--
+-- Float4 type.
+--
+CREATE TABLE hash_heap_float4 (x float4, y int);
+INSERT INTO hash_heap_float4 VALUES (1.1,1);
+CREATE INDEX hash_idx ON hash_heap_float4 USING hash (x);
+WARNING: hash indexes are not WAL-logged and their use is discouraged
+DROP TABLE hash_heap_float4 CASCADE;
diff --git a/src/test/regress/sql/hash_index.sql b/src/test/regress/sql/hash_index.sql
index 411e8ae..df2b2d4 100644
--- a/src/test/regress/sql/hash_index.sql
+++ b/src/test/regress/sql/hash_index.sql
@@ -151,3 +151,57 @@ SELECT h.seqno AS f20000
-- SELECT count(*) AS i988 FROM hash_ovfl_heap
-- WHERE x = 1000;
+
+--
+-- Cause some overflow insert and splits.
+--
+CREATE TABLE hash_split_heap (keycol INT);
+CREATE INDEX hash_split_index on hash_split_heap USING HASH (keycol);
+INSERT INTO hash_split_heap SELECT 1 FROM generate_series(1, 70000) a;
+
+VACUUM FULL hash_split_heap;
+
+--
+-- Lets do a backward scan.
+--
+BEGIN;
+SET enable_seqscan = OFF;
+SET enable_bitmapscan = OFF;
+
+DECLARE c CURSOR FOR SELECT * from hash_split_heap WHERE keycol = 1;
+MOVE FORWARD ALL FROM c;
+MOVE BACKWARD 10000 FROM c;
+MOVE BACKWARD ALL FROM c;
+CLOSE c;
+END;
+
+--
+-- DELETE, INSERT, REBUILD INDEX.
+--
+DELETE FROM hash_split_heap WHERE keycol = 1;
+INSERT INTO hash_split_heap SELECT a/2 FROM generate_series(1, 50000) a;
+
+VACUUM hash_split_heap;
+REINDEX INDEX hash_split_index;
+
+--
+-- Clean Up.
+--
+DROP TABLE hash_split_heap;
+
+--
+-- INDEX ON TEMP TABLE.
+--
+CREATE TEMP TABLE hash_temp_heap (x int, y int);
+INSERT INTO hash_temp_heap VALUES (1,1);
+CREATE INDEX hash_idx ON hash_temp_heap USING hash (x);
+DROP TABLE hash_temp_heap CASCADE;
+
+--
+-- Float4 type.
+--
+CREATE TABLE hash_heap_float4 (x float4, y int);
+INSERT INTO hash_heap_float4 VALUES (1.1,1);
+CREATE INDEX hash_idx ON hash_heap_float4 USING hash (x);
+DROP TABLE hash_heap_float4 CASCADE;
+
On Wed, Oct 12, 2016 at 1:17 AM, Mithun Cy <mithun.cy@enterprisedb.com> wrote:
On Wed, Sep 28, 2016 at 9:56 PM, Robert Haas <robertmhaas@gmail.com> wrote:
something committable will come from it, but with 2 days left it's not
going to happen this CF.Adding a new patch. This one uses generate series instead of INSERT INTO
SELECT and fixed comments from Alvaro.
Committed.
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers