[sqlsmith] Failed assertion in BecomeLockGroupLeader
Hi,
tonight's sqlsmith run yielded another core dump:
TRAP: FailedAssertion("!(MyProc->lockGroupLeader == ((void *)0))", File: "proc.c", Line: 1787)
I couldn't identifiy a query for it though: debug_query_string is empty.
Additionally, the offending query was not reported in the error context
as it typically is for non-parallel executor crashes.
regards,
Andreas
GNU gdb (Debian 7.7.1+dfsg-5) 7.7.1
Core was generated by `postgres: bgworker: parallel worker for PID 4706 '.
Program terminated with signal SIGABRT, Aborted.
#0 0x00007ff1bda16067 in __GI_raise (sig=sig@entry=6)
at ../nptl/sysdeps/unix/sysv/linux/raise.c:56
56 ../nptl/sysdeps/unix/sysv/linux/raise.c: Datei oder Verzeichnis nicht gefunden.
#0 0x00007ff1bda16067 in __GI_raise (sig=sig@entry=6) at ../nptl/sysdeps/unix/sysv/linux/raise.c:56
#1 0x00007ff1bda17448 in __GI_abort () at abort.c:89
#2 0x00000000007eaa11 in ExceptionalCondition (conditionName=conditionName@entry=0x988318 "!(MyProc->lockGroupLeader == ((void *)0))", errorType=errorType@entry=0x82a45d "FailedAssertion", fileName=fileName@entry=0x8760e5 "proc.c", lineNumber=lineNumber@entry=1787) at assert.c:54
#3 0x00000000006e3e7b in BecomeLockGroupLeader () at proc.c:1787
#4 0x00000000004e6a59 in LaunchParallelWorkers (pcxt=pcxt@entry=0x1db05c8) at parallel.c:437
#5 0x00000000005ef2d7 in ExecGather (node=node@entry=0x1d9d0b8) at nodeGather.c:168
#6 0x00000000005dd788 in ExecProcNode (node=node@entry=0x1d9d0b8) at execProcnode.c:515
#7 0x00000000005d999f in ExecutePlan (dest=0x1d7d310, direction=<optimized out>, numberTuples=0, sendTuples=<optimized out>, operation=CMD_SELECT, use_parallel_mode=<optimized out>, planstate=0x1d9d0b8, estate=0x1d9c858) at execMain.c:1567
#8 standard_ExecutorRun (queryDesc=0x1db0080, direction=<optimized out>, count=0) at execMain.c:338
#9 0x00000000005dcb3f in ParallelQueryMain (seg=<optimized out>, toc=0x7ff1be507000) at execParallel.c:716
#10 0x00000000004e608b in ParallelWorkerMain (main_arg=<optimized out>) at parallel.c:1033
#11 0x0000000000683a42 in StartBackgroundWorker () at bgworker.c:726
#12 0x000000000068eb82 in do_start_bgworker (rw=0x1d24ec0) at postmaster.c:5531
#13 maybe_start_bgworker () at postmaster.c:5706
#14 0x000000000046c993 in ServerLoop () at postmaster.c:1762
#15 0x00000000006909fe in PostmasterMain (argc=argc@entry=3, argv=argv@entry=0x1cfa560) at postmaster.c:1298
#16 0x000000000046d5ed in main (argc=3, argv=0x1cfa560) at main.c:228
(gdb) bt full
#3 0x00000000006e3e7b in BecomeLockGroupLeader () at proc.c:1787
leader_lwlock = <optimized out>
#4 0x00000000004e6a59 in LaunchParallelWorkers (pcxt=pcxt@entry=0x1db05c8) at parallel.c:437
oldcontext = 0x1d9ced0
worker = {
bgw_name = "\220\a\333\001\000\000\000\000\370\340L\276\361\177\000\000\370\373\025\264\361\177\000\000P\a\333\001\000\000\000\000X\310\331\001\000\000\000\000\004\000\000\000\000\000\000\000\310\005\333\001\000\000\000\000\233\222J\000\000\000\000",
bgw_flags = 1,
bgw_start_time = BgWorkerStart_PostmasterStart,
bgw_restart_time = 1,
bgw_main = 0x0,
bgw_library_name = "\000\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000u\222J\000\000\000\000\000\350\336\331\001\000\000\000\000\360\260a\000\000\000\000\000\200\315]\000\000\000\000\000\300\330\367\217\375\177\000\000h\205\333\001\000\000\000",
bgw_function_name = "`\346\327\001\000\000\000\000\004\000\000\000\000\000\000\000\360\260a\000\000\000\000\000\200\315]\000\000\000\000\000\300\330\367\217\375\177\000\000h\317\331\001\000\000\000\000\210\325\327\001\000\000\000\000\004\000\000\000\000\000\000",
bgw_main_arg = 6402288,
bgw_extra = "X\310\331\001\000\000\000\000\000\000\000\000\000\000\000\000\060\233\333\001\000\000\000\000\001\000\000\000\000\000\000\000\004\000\000\000\000\000\000\000l\321]\000\000\000\000\000\000\000\000\000\000\000\000\000H-\334\001\000\000\000\000h\317\331\001\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\274\341M\276\361\177\000\000\310\005\333\001\000\000\000\000\004\000\000\000\000\000\000\000\310\005\333\001\000\000\000\000\000\000\000\000\000\000\000",
bgw_notify_pid = 4
}
i = <optimized out>
any_registrations_failed = 0 '\000'
#5 0x00000000005ef2d7 in ExecGather (node=node@entry=0x1d9d0b8) at nodeGather.c:168
pcxt = 0x1db05c8
estate = <optimized out>
gather = 0x1d7d440
fslot = 0x1d9ced0
i = <optimized out>
resultSlot = <optimized out>
isDone = ExprSingleResult
econtext = <optimized out>
#6 0x00000000005dd788 in ExecProcNode (node=node@entry=0x1d9d0b8) at execProcnode.c:515
result = <optimized out>
__func__ = "ExecProcNode"
#7 0x00000000005d999f in ExecutePlan (dest=0x1d7d310, direction=<optimized out>, numberTuples=0, sendTuples=<optimized out>, operation=CMD_SELECT, use_parallel_mode=<optimized out>, planstate=0x1d9d0b8, estate=0x1d9c858) at execMain.c:1567
slot = <optimized out>
current_tuple_count = 0
#8 standard_ExecutorRun (queryDesc=0x1db0080, direction=<optimized out>, count=0) at execMain.c:338
estate = 0x1d9c858
operation = CMD_SELECT
dest = 0x1d7d310
sendTuples = <optimized out>
#9 0x00000000005dcb3f in ParallelQueryMain (seg=<optimized out>, toc=0x7ff1be507000) at execParallel.c:716
buffer_usage = <optimized out>
instrumentation = 0x0
instrument_options = 0
#10 0x00000000004e608b in ParallelWorkerMain (main_arg=<optimized out>) at parallel.c:1033
seg = 0x1d29d30
toc = 0x7ff1be507000
fps = 0x7ff1be51ec38
error_queue_space = <optimized out>
mq = <optimized out>
mqh = <optimized out>
libraryspace = <optimized out>
gucspace = <optimized out>
combocidspace = <optimized out>
tsnapspace = <optimized out>
asnapspace = <optimized out>
tstatespace = <optimized out>
msgbuf = {
data = 0x0,
len = 8,
maxlen = 1024,
cursor = 75
}
__func__ = "ParallelWorkerMain"
#11 0x0000000000683a42 in StartBackgroundWorker () at bgworker.c:726
local_sigjmp_buf = {{
__jmpbuf = {30559936, 3179006107653964804, 140727018841376, 1461888082, 4294967295, 30560912, -3178127926870473724, 3179007261678568452},
__mask_was_saved = 1,
__saved_mask = {
__val = {18446744066192964103, 0, 0, 140727018839040, 0, 2415385408, 30532816, 140727018782720, 0, 0, 140727018840336, 18446744073709551615, 0, 30514160, 140676256394695, 8589934591}
}
}}
buf = "bgworker: parallel worker for PID 4706\000\000\345[...]"
worker = 0x1d24ec0
entrypt = <optimized out>
__func__ = "StartBackgroundWorker"
#12 0x000000000068eb82 in do_start_bgworker (rw=0x1d24ec0) at postmaster.c:5531
worker_pid = <optimized out>
#13 maybe_start_bgworker () at postmaster.c:5706
rw = 0x1d24ec0
iter = {
cur = 0x1d25048,
next = 0x0,
prev = 0x1d207d8
}
now = <optimized out>
#14 0x000000000046c993 in ServerLoop () at postmaster.c:1762
rmask = {
fds_bits = {24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
}
selres = <optimized out>
now = <optimized out>
readmask = {
fds_bits = {24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
}
last_lockfile_recheck_time = 1461888082
last_touch_time = 1461885050
__func__ = "ServerLoop"
#15 0x00000000006909fe in PostmasterMain (argc=argc@entry=3, argv=argv@entry=0x1cfa560) at postmaster.c:1298
opt = <optimized out>
status = <optimized out>
userDoption = <optimized out>
listen_addr_saved = <optimized out>
i = <optimized out>
output_config_variable = <optimized out>
__func__ = "PostmasterMain"
#16 0x000000000046d5ed in main (argc=3, argv=0x1cfa560) at main.c:228
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Fri, Apr 29, 2016 at 12:01 PM, Andreas Seltenreich <seltenreich@gmx.de>
wrote:
Hi,
tonight's sqlsmith run yielded another core dump:
TRAP: FailedAssertion("!(MyProc->lockGroupLeader == ((void *)0))", File:
"proc.c", Line: 1787)
I couldn't identifiy a query for it though: debug_query_string is empty.
Additionally, the offending query was not reported in the error context
as it typically is for non-parallel executor crashes.
From callstack below, it is clear that the reason for core dump is that
Gather node is pushed below another Gather node which makes worker execute
the Gather node. Currently there is no support in workers to launch
another workers and ideally such a plan should not be generated. It will
be helpful if you can find the offending query or plan corresponding to it?
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com
Amit Kapila wrote:
On Fri, Apr 29, 2016 at 12:01 PM, Andreas Seltenreich <seltenreich@gmx.de>
wrote:
I couldn't identifiy a query for it though: debug_query_string is empty.
Additionally, the offending query was not reported in the error context
as it typically is for non-parallel executor crashes.From callstack below, it is clear that the reason for core dump is that
Gather node is pushed below another Gather node which makes worker execute
the Gather node. Currently there is no support in workers to launch
another workers and ideally such a plan should not be generated. It will
be helpful if you can find the offending query or plan corresponding to it?
So I suppose the PID of the process starting the workers should be in
the stack somewhere. With that one should be able to attach to that
process and get another stack trace. I'm curious on whether you would
need to have started the server with "postgres -T" in order to be able
to get a coordinated code dump from both processes. The
debug_query_string would be found in the leader, I suppose.
--
�lvaro Herrera http://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On 29 April 2016 at 08:31, Andreas Seltenreich <seltenreich@gmx.de> wrote:
Hi,
tonight's sqlsmith run yielded another core dump:
TRAP: FailedAssertion("!(MyProc->lockGroupLeader == ((void *)0))", File:
"proc.c", Line: 1787)I couldn't identifiy a query for it though: debug_query_string is empty.
Additionally, the offending query was not reported in the error context
as it typically is for non-parallel executor crashes.
It's good that the input is fuzzed, but there needs to be a way to re-run
identical fuzzing or a way to backtrack to find what broke. Not much point
finding bugs we can't identify later.
--
Simon Riggs http://www.2ndQuadrant.com/
<http://www.2ndquadrant.com/>
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
Amit Kapila <amit.kapila16@gmail.com> writes:
On Fri, Apr 29, 2016 at 12:01 PM, Andreas Seltenreich <seltenreich@gmx.de>
wrote:tonight's sqlsmith run yielded another core dump:
TRAP: FailedAssertion("!(MyProc->lockGroupLeader == ((void *)0))", File:
"proc.c", Line: 1787)I couldn't identifiy a query for it though: debug_query_string is empty.
Additionally, the offending query was not reported in the error context
as it typically is for non-parallel executor crashes.
From callstack below, it is clear that the reason for core dump is that
Gather node is pushed below another Gather node which makes worker execute
the Gather node. Currently there is no support in workers to launch
another workers and ideally such a plan should not be generated.
It might not be intentional. The bug we identified from Andreas' prior
report could be causing this: once a GatherPath's subpath has been freed,
that palloc chunk could be recycled into another GatherPath, or something
with a GatherPath in its substructure, leading to a plan of that shape.
It will
be helpful if you can find the offending query or plan corresponding to it?
I presume the lack of debug_query_string data is because nothing is
bothering to set debug_query_string in a worker process. Should that be
remedied? At the very least set it to "worker process", but it might be
worth copying over the full query from the parent side.
regards, tom lane
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
Alvaro Herrera writes:
Amit Kapila wrote:
It will be helpful if you can find the offending query or plan
corresponding to it?So I suppose the PID of the process starting the workers should be in
the stack somewhere.
Ja, it's right on the top, but long gone by now…
With that one should be able to attach to that process and get another
stack trace. I'm curious on whether you would need to have started
the server with "postgres -T"
This sounds like it should work to capture more context when the
Assertion fails the next time. I have to purge the catalogs a bit
though to avoid stopping early on boring core dumps. Most of them are
currently caused by acl.c using text for syscache lookups and triggering
an NAMEDATALEN assertion.
E.g.: select has_language_privilege('smithsmithsmithsmithsmithsmithsmithsmithsmithsmithsmithsmithsmith', smith');
thanks,
andreas
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
Simon Riggs writes:
It's good that the input is fuzzed, but there needs to be a way to re-run
identical fuzzing or a way to backtrack to find what broke. Not much point
finding bugs we can't identify later.
sqlsmith is deterministic and allows re-generating a sequence of random
queries with the --seed argument. Finding a testing methodology that
ensures a repeatable server-side is a harder problem though.
One would have to avoid touching any kind of concurrency, disable
autovacuum, autoanalyze and invoke explicit analyzes/vacuums in concert
with query generation. Further, one would have to avoid any kind of
concurrency while testing. Even then, 1% of the queries run into a
statement_timeout due to randomly generated excessive cross joins. If a
timeout just barely happens, it might not do so on the repeated run and
the deterministic state is gone from then on. I'm afraid this list is
not complete yet.
I didn't think the effort of creating this kind of clean-room testing
was worth it. If reports of failed assertions with backtrace without a
recipe to reproduce them are a nuisance, I'll avoid them in the future.
regards,
Andreas
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On 2016-04-30 02:28:22 +0200, Andreas Seltenreich wrote:
This sounds like it should work to capture more context when the
Assertion fails the next time. I have to purge the catalogs a bit
though to avoid stopping early on boring core dumps. Most of them are
currently caused by acl.c using text for syscache lookups and triggering
an NAMEDATALEN assertion.E.g.: select has_language_privilege('smithsmithsmithsmithsmithsmithsmithsmithsmithsmithsmithsmithsmith', smith');
Yuck. We got to fix those. Does anybody remember how these functions
came to use text instead of name for things that pretty clearly should
have accepted name (i.e. the objects not the priv string)?
Andres
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On 2016-04-30 02:34:35 +0200, Andreas Seltenreich wrote:
I didn't think the effort of creating this kind of clean-room testing
was worth it. If reports of failed assertions with backtrace without a
recipe to reproduce them are a nuisance, I'll avoid them in the future.
It's obviously better to have a recipe for reproducing the issue, but I
think the reports are quite useful even without that.
Andres
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Fri, Apr 29, 2016 at 7:15 PM, Tom Lane <tgl@sss.pgh.pa.us> wrote:
Amit Kapila <amit.kapila16@gmail.com> writes:
On Fri, Apr 29, 2016 at 12:01 PM, Andreas Seltenreich <
seltenreich@gmx.de>
wrote:
tonight's sqlsmith run yielded another core dump:
TRAP: FailedAssertion("!(MyProc->lockGroupLeader == ((void *)0))",
File:
"proc.c", Line: 1787)
I couldn't identifiy a query for it though: debug_query_string is
empty.
Additionally, the offending query was not reported in the error context
as it typically is for non-parallel executor crashes.From callstack below, it is clear that the reason for core dump is that
Gather node is pushed below another Gather node which makes worker
execute
the Gather node. Currently there is no support in workers to launch
another workers and ideally such a plan should not be generated.It might not be intentional. The bug we identified from Andreas' prior
report could be causing this: once a GatherPath's subpath has been freed,
that palloc chunk could be recycled into another GatherPath, or something
with a GatherPath in its substructure, leading to a plan of that shape.
Yes, thats one possibility.
It will
be helpful if you can find the offending query or plan corresponding to
it?
I presume the lack of debug_query_string data is because nothing is
bothering to set debug_query_string in a worker process. Should that be
remedied? At the very least set it to "worker process",
Currently for the purpose of query descriptor in worker process, we are
using "<parallel_query>" (refer function ExecParallelGetQueryDesc()), so
that seems to be a better choice.
but it might be
worth copying over the full query from the parent side.
That would amount to couple of extra cycles considering we need to do it
for each worker, but OTOH it might be a useful debugging information in the
cases as reported in this thread. Do you see any broader use of passing
query string to worker?
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com
On Sat, Apr 30, 2016 at 5:58 AM, Andreas Seltenreich <seltenreich@gmx.de>
wrote:
Alvaro Herrera writes:
Amit Kapila wrote:
It will be helpful if you can find the offending query or plan
corresponding to it?So I suppose the PID of the process starting the workers should be in
the stack somewhere.Ja, it's right on the top, but long gone by now…
With that one should be able to attach to that process and get another
stack trace. I'm curious on whether you would need to have started
the server with "postgres -T"This sounds like it should work to capture more context when the
Assertion fails the next time.
Sounds good. So can we assume that you will try to get us the new report
with more information?
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com
Amit Kapila writes:
On Sat, Apr 30, 2016 at 5:58 AM, Andreas Seltenreich <seltenreich@gmx.de> wrote:
This sounds like it should work to capture more context when the
Assertion fails the next time.Sounds good. So can we assume that you will try to get us the new report
with more information?
Ja. I do have a busy weekend+week ahead though, so no holding of
breath.
regards
Andreas
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
Amit Kapila writes:
On Fri, Apr 29, 2016 at 7:15 PM, Tom Lane <tgl@sss.pgh.pa.us> wrote:
but it might be worth copying over the full query from the parent
side.That would amount to couple of extra cycles considering we need to do it
for each worker, but OTOH it might be a useful debugging information in the
cases as reported in this thread.
Maybe only do it in assertion-enabled builds when performance is an
issue?
regards,
andreas
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Fri, Apr 29, 2016 at 9:45 AM, Tom Lane <tgl@sss.pgh.pa.us> wrote:
It will
be helpful if you can find the offending query or plan corresponding to it?I presume the lack of debug_query_string data is because nothing is
bothering to set debug_query_string in a worker process. Should that be
remedied? At the very least set it to "worker process", but it might be
worth copying over the full query from the parent side.
I agree. I thought about doing that at one point, but I didn't quite
have the cycles and I wasn't sure how important it would be. The fact
that we're already hitting cases like this before we've even gone to
beta suggests that it's pretty important. I think it's worth the
extra cycles, even in non-cassert builds. Compared to the overhead of
cajoling the postmaster to fork a new process, the cost of this should
be trivial.
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
Amit Kapila writes:
Sounds good. So can we assume that you will try to get us the new report
with more information?
I don't see these crashes anymore in c1543a8. By the amount of fuzzing
done it should have happened a dozen times, so it's highly likely
something in 23b09e15..c1543a8 fixed it.
regards,
andreas
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
On Thu, May 5, 2016 at 4:11 PM, Andreas Seltenreich <seltenreich@gmx.de> wrote:
Amit Kapila writes:
Sounds good. So can we assume that you will try to get us the new report
with more information?I don't see these crashes anymore in c1543a8. By the amount of fuzzing
done it should have happened a dozen times, so it's highly likely
something in 23b09e15..c1543a8 fixed it.
Hmm, I'd guess c45bf5751b6338488bd79ce777210285531da373 to be the most
likely candidate.
--
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
Robert Haas wrote:
On Thu, May 5, 2016 at 4:11 PM, Andreas Seltenreich <seltenreich@gmx.de> wrote:
Amit Kapila writes:
Sounds good. So can we assume that you will try to get us the new report
with more information?I don't see these crashes anymore in c1543a8. By the amount of fuzzing
done it should have happened a dozen times, so it's highly likely
something in 23b09e15..c1543a8 fixed it.Hmm, I'd guess c45bf5751b6338488bd79ce777210285531da373 to be the most
likely candidate.
I thought so too, but then that patch change things in the planner side,
but it seems to me that the reported crash is in the executor, unless I'm
misreading.
--
�lvaro Herrera http://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers
Alvaro Herrera writes:
Robert Haas wrote:
On Thu, May 5, 2016 at 4:11 PM, Andreas Seltenreich <seltenreich@gmx.de> wrote:
I don't see these crashes anymore in c1543a8. By the amount of fuzzing
done it should have happened a dozen times, so it's highly likely
something in 23b09e15..c1543a8 fixed it.Hmm, I'd guess c45bf5751b6338488bd79ce777210285531da373 to be the most
likely candidate.I thought so too, but then that patch change things in the planner side,
but it seems to me that the reported crash is in the executor, unless I'm
misreading.
Tom had a theory in Message-ID: <12751.1461937508@sss.pgh.pa.us> on how
the planner bug could cause the executor crash.
regards,
Andreas
--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers