BUG #14234: PostgreSQL consuming large amount of memory for persistent connection

Started by 德哥almost 10 years ago4 messagesbugs
Jump to latest
#1德哥
digoal@126.com

The following bug has been logged on the website:

Bug reference: 14234
Logged by: Zhou Digoal
Email address: digoal@126.com
PostgreSQL version: 9.5.3
Operating system: CentOS 6.x x64
Description:

test case :
PostgreSQL conf :
listen_addresses = '0.0.0.0' # what IP address(es) to listen
on;
port = 1921 # (change requires restart)
max_connections = 100 # (change requires restart)
superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '.' # comma-separated list of directories
tcp_keepalives_idle = 10 # TCP_KEEPIDLE, in seconds;
tcp_keepalives_interval = 10 # TCP_KEEPINTVL, in seconds;
tcp_keepalives_count = 60 # TCP_KEEPCNT;
shared_buffers = 4GB # min 128kB
dynamic_shared_memory_type = posix # the default is the first option
wal_level = hot_standby # minimal, archive, hot_standby, or
logical
fsync = off # turns forced synchronization on or
off
synchronous_commit = off # synchronization level;
full_page_writes = off # recover from partial page writes
wal_buffers = 1900MB # min 32kB, -1 sets based on
shared_buffers
wal_writer_delay = 10ms # 1-10000 milliseconds
max_wal_senders = 10 # max number of walsender processes
wal_keep_segments = 100 # in logfile segments, 16MB each; 0
disables
synchronous_standby_names = '*' # standby servers that provide sync rep
hot_standby = on # "on" allows queries during
recovery
wal_receiver_status_interval = 1s # send replies at least this often
hot_standby_feedback = on # send info from standby to
prevent
wal_retrieve_retry_interval = 1s # time to wait before retrying to
log_destination = 'csvlog' # Valid values are combinations of
logging_collector = on # Enable capturing of stderr and csvlog
log_truncate_on_rotation = on # If on, an existing log file with
the
log_timezone = 'PRC'
datestyle = 'iso, mdy'
timezone = 'PRC'
lc_messages = 'C' # locale for system error message
lc_monetary = 'C' # locale for monetary formatting
lc_numeric = 'C' # locale for number formatting
lc_time = 'C' # locale for time formatting
default_text_search_config = 'pg_catalog.english'
max_locks_per_transaction = 10000 # min 10
allow_system_table_mods =off
log_statement=none

functions :
-- MTDB_destroy
create or replace function MTDB_destroy (schemaNamePrefix varchar(100))
returns int as $$
declare
curs1 cursor(prefix varchar) is select schema_name from
information_schema.schemata where schema_name like prefix || '%';
schemaName varchar(100);
count integer;
begin
count := 0;
open curs1(schemaNamePrefix);
loop
fetch curs1 into schemaName;
if not found then exit; end if;
count := count + 1;
execute 'drop schema ' || schemaName || ' cascade;';
end loop;
close curs1;
return count;
end $$ language plpgsql;

-- MTDB_Initialize
create or replace function MTDB_Initialize (schemaNamePrefix varchar(100),
numberOfSchemas integer, numberOfTablesPerSchema integer,
createViewForEachTable boolean)
returns integer as $$
declare
currentSchemaId integer;
currentTableId integer;
currentSchemaName varchar(100);
currentTableName varchar(100);
currentViewName varchar(100);
count integer;
begin
-- clear
perform MTDB_Destroy(schemaNamePrefix);

count := 0;
currentSchemaId := 1;
loop
currentSchemaName := schemaNamePrefix ||
ltrim(currentSchemaId::varchar(10));
execute 'create schema ' || currentSchemaName;

currentTableId := 1;
loop
currentTableName := currentSchemaName || '.' || 'table' ||
ltrim(currentTableId::varchar(10));
execute 'create table ' || currentTableName || ' (f1 integer, f2
integer, f3 varchar(100), f4 varchar(100), f5 varchar(100), f6 varchar(100),
f7 boolean, f8 boolean, f9 integer, f10 integer)';
if (createViewForEachTable = true) then
currentViewName := currentSchemaName || '.' || 'view' ||
ltrim(currentTableId::varchar(10));
execute 'create view ' || currentViewName || ' as ' ||
'select t1.* from ' || currentTableName || ' t1 ' ||
' inner join ' || currentTableName || ' t2 on (t1.f1 = t2.f1) '
||
' inner join ' || currentTableName || ' t3 on (t2.f2 = t3.f2) '
||
' inner join ' || currentTableName || ' t4 on (t3.f3 = t4.f3) '
||
' inner join ' || currentTableName || ' t5 on (t4.f4 = t5.f4) '
||
' inner join ' || currentTableName || ' t6 on (t5.f5 = t6.f5) '
||
' inner join ' || currentTableName || ' t7 on (t6.f6 = t7.f6) '
||
' inner join ' || currentTableName || ' t8 on (t7.f7 = t8.f7) '
||
' inner join ' || currentTableName || ' t9 on (t8.f8 = t9.f8) '
||
' inner join ' || currentTableName || ' t10 on (t9.f9 = t10.f9)
';
end if;
currentTableId := currentTableId + 1;
count := count + 1;
if (currentTableId > numberOfTablesPerSchema) then exit; end if;
end loop;

currentSchemaId := currentSchemaId + 1;
if (currentSchemaId > numberOfSchemas) then exit; end if;
end loop;
return count;
END $$ language plpgsql;

-- MTDB_RunTests
create or replace function MTDB_RunTests(schemaNamePrefix varchar(100),
rounds integer)
returns integer as $$
declare
curs1 cursor(prefix varchar) is select table_schema || '.' || table_name
from information_schema.tables where table_schema like prefix || '%' and
table_type = 'VIEW';
currentViewName varchar(100);
count integer;
begin
count := 0;
loop
rounds := rounds - 1;
if (rounds < 0) then exit; end if;

open curs1(schemaNamePrefix);
loop
fetch curs1 into currentViewName;
if not found then exit; end if;
execute 'select * from ' || currentViewName;
count := count + 1;
end loop;
close curs1;
end loop;
return count;
end $$ language plpgsql;

test SQL:
prepare :
postgres=# select MTDB_Initialize('tenant', 100, 1000, true);

session 1 :
postgres=# select MTDB_RunTests('tenant', 1);
mtdb_runtests
---------------
100000
(1 row)

session 2 :
postgres=# select MTDB_RunTests('tenant', 1);
mtdb_runtests
---------------
100000
(1 row)

memory view :
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+
COMMAND
2536 digoal 20 0 20.829g 0.016t 1.786g S 0.0 25.7 3:08.20
postgres: postgres postgres [local] idle
2453 digoal 20 0 6854896 187124 142780 S 0.0 0.3 0:00.68
postgres: postgres postgres [local] idle

smem
PID User Command Swap USS PSS
RSS
2536 digoal postgres: postgres postgres 0 15022132 15535203
16894900
2453 digoal postgres: postgres postgres 0 15022256 15535405
16895100

why PostgreSQL session do not release these memory?
it will comsume these memory until disconnect.

I use discard all cann't release the session's memory.

--
Sent via pgsql-bugs mailing list (pgsql-bugs@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-bugs

#2Tom Lane
tgl@sss.pgh.pa.us
In reply to: 德哥 (#1)
Re: BUG #14234: PostgreSQL consuming large amount of memory for persistent connection

digoal@126.com writes:

why PostgreSQL session do not release these memory?
it will comsume these memory until disconnect.

Your session has created and accessed 100000 tables plus 100000 views.
I do not think you should complain if that takes a great deal of memory.
Either rethink why you need so many tables, or buy hardware commensurate
with the size of your problem.

regards, tom lane

--
Sent via pgsql-bugs mailing list (pgsql-bugs@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-bugs

#3德哥
digoal@126.com
In reply to: Tom Lane (#2)
Re: BUG #14234: PostgreSQL consuming large amount of memory for persistent connection

HI,
This is a reproduce method for why PostgreSQL session cann't release memory.
I have another case, has the same problem.
so i don't known why postgres cann't release memory? or can you explain these memory contain which objects? why use this design?
best regards,
digoal

--
公益是一辈子的事,I'm Digoal,Just Do It.

At 2016-07-08 10:01:18, "Tom Lane" <tgl@sss.pgh.pa.us> wrote:

Show quoted text

digoal@126.com writes:

why PostgreSQL session do not release these memory?
it will comsume these memory until disconnect.

Your session has created and accessed 100000 tables plus 100000 views.
I do not think you should complain if that takes a great deal of memory.
Either rethink why you need so many tables, or buy hardware commensurate
with the size of your problem.

regards, tom lane

#4Pavel Stehule
pavel.stehule@gmail.com
In reply to: 德哥 (#3)
Re: BUG #14234: PostgreSQL consuming large amount of memory for persistent connection

Hi

2016-07-08 7:11 GMT+02:00 德哥 <digoal@126.com>:

HI,
This is a reproduce method for why PostgreSQL session cann't release
memory.
I have another case, has the same problem.
so i don't known why postgres cann't release memory? or can you explain
these memory contain which objects? why use this design?
best regards,
digoal

Every PostgreSQL session holds system data in own cache. Usually this cache
is pretty small (for significant numbers of users). But can be pretty big
if your catalog is untypically big and you touch almost all objects from
catalog in session. A implementation of this cache is simple - there is not
delete or limits. There is not garabage collector (and issue related to
GC), what is great, but the long sessions on big catalog can be problem.
The solution is simple - close session over some time or over some number
of operations. Then all memory in caches will be released.

Regards

Pavel

Show quoted text

--
公益是一辈子的事,I'm Digoal,Just Do It.

At 2016-07-08 10:01:18, "Tom Lane" <tgl@sss.pgh.pa.us> wrote:

digoal@126.com writes:

why PostgreSQL session do not release these memory?
it will comsume these memory until disconnect.

Your session has created and accessed 100000 tables plus 100000 views.
I do not think you should complain if that takes a great deal of memory.
Either rethink why you need so many tables, or buy hardware commensurate
with the size of your problem.

regards, tom lane