dirsrvtests/tickets/ticket48383_test.py | 134 ++++++++++++++++++++++++++
ldap/servers/slapd/back-ldbm/import-threads.c | 26 ++---
ldap/servers/slapd/back-ldbm/import.c | 45 ++++++++
ldap/servers/slapd/back-ldbm/import.h | 1
ldap/servers/slapd/util.c | 2
5 files changed, 191 insertions(+), 17 deletions(-)
New commits:
commit daf40aa35733b4f96b38b1612064045bf0766035
Author: William Brown <firstyear(a)redhat.com>
Date: Mon Feb 22 15:05:23 2016 +1000
Ticket 48383 - import tasks with dynamic buffer sizes
Bug Description: db2index, ldif2db and others all use a fifo buffer max size
defined by:
./ldap/servers/slapd/back-ldbm/import.c:48:
job->fifo.bsize = (inst->inst_cache.c_maxsize/10) << 3;
Where c_maxsize is the value of the cn=ldbm backend's cachememsize value
This is very arbitrary, and in a database with few, but large objects, it can
cause the admin to increase cachememsize to a value that could cause an OOM
situation.
Worse, as an admin, because this is effectively .8 of the cachememsize, it's
hard to determine the origin of the message in the error:
import_log_notice(job, "WARNING: skipping entry \"%s\"",
slapi_entry_get_dn(e));
import_log_notice(job, "REASON: entry too large (%lu bytes) for "
"the buffer size (%lu bytes)", (long unsigned int)newesize, (long unsigned
int)job->fifo.bsize);
Fix Description: We now change two things:
* First we set bsize to be the backend cachesize.
* Second, when we go to use the buffer, we check if the entry will fit into
it. If it won't fit, we trigger a check of avaliable bytes of ram. If we have
sufficient we resize the buffer accordingly. If we have insufficent, we skip
the entry as before, with a large error message:
[22/Feb/2016:14:29:47 +1000] - import userRoot: REASON: entry too large
(10020345 bytes) for the buffer size (409600 bytes), and we were UNABLE
to expand buffer.
[22/Feb/2016:14:29:47 +1000] - import userRoot: CRITICAL: skipping entry
"uid=user8,ou=People,dc=example,dc=com" ending line 1042454 of file
"/tmp/slapd-standalone.bck/var/lib/dirsrv/slapd-standalone/ldif/standalone.ldif"
This will make *all* our command line tools more robust, able to function in a
greater variety of environments, and will make the admin experience better.
https://fedorahosted.org/389/ticket/48383
Author: wibrown
Review by: nhosoi, lkrispen (Thanks!)
diff --git a/dirsrvtests/tickets/ticket48383_test.py
b/dirsrvtests/tickets/ticket48383_test.py
new file mode 100644
index 0000000..fc11cee
--- /dev/null
+++ b/dirsrvtests/tickets/ticket48383_test.py
@@ -0,0 +1,134 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+import string
+import random
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ # Creating standalone instance ...
+ standalone = DirSrv(verbose=True)
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ standalone.delete()
+ standalone.create()
+ standalone.open()
+
+ # Delete each instance in the end
+ def fin():
+ # This is useful for analysing the test env.
+ #standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
excludeSuffixes=[], encrypt=False, \
+ # repl_data=True, outputfile='%s/ldif/%s.ldif' %
(standalone.dbdir,SERVERID_STANDALONE ))
+ #standalone.clearBackupFS()
+ #standalone.backupFS()
+ standalone.delete()
+ request.addfinalizer(fin)
+
+ # Clear out the tmp dir
+ standalone.clearTmpDir(__file__)
+
+ return TopologyStandalone(standalone)
+
+
+def test_ticket48383(topology):
+ """
+ This test case will check that we re-alloc buffer sizes on import.c
+
+ We achieve this by setting the servers dbcachesize to a stupid small value
+ and adding huge objects to ds.
+
+ Then when we run db2index, either:
+ * If we are not using the re-alloc code, it will FAIL (Bad)
+ * If we re-alloc properly, it all works regardless.
+ """
+
+ topology.standalone.config.set('nsslapd-maxbersize', '200000000')
+ topology.standalone.restart()
+
+ # Create some stupid huge objects / attributes in DS.
+ # seeAlso is indexed by default. Lets do that!
+ # This will take a while ...
+ data = [random.choice(string.letters) for x in xrange(10000000)]
+ s = "".join(data)
+
+ # This was here for an iteration test.
+ i = 1
+ USER_DN = 'uid=user%s,ou=people,%s' % (i, DEFAULT_SUFFIX)
+ padding = ['%s' % n for n in range(400)]
+
+ user = Entry((USER_DN, {
+ 'objectclass': 'top posixAccount person
extensibleObject'.split(),
+ 'uid': 'user%s' % (i),
+ 'cn': 'user%s' % (i),
+ 'uidNumber': '%s' % (i),
+ 'gidNumber': '%s' % (i),
+ 'homeDirectory': '/home/user%s' % (i),
+ 'description': 'user description',
+ 'sn' : s ,
+ 'padding' : padding ,
+ }))
+
+ try:
+ topology.standalone.add_s(user)
+ except ldap.LDAPError as e:
+ log.fatal('test 48383: Failed to user%s: error %s ' % (i,
e.message['desc']))
+ assert False
+ # Set the dbsize really low.
+
+ topology.standalone.backend.setProperties(bename=DEFAULT_BENAME,
+ prop='nsslapd-cachememsize', values='1')
+
+ ## Does ds try and set a minimum possible value for this?
+ ## Yes: [16/Feb/2016:16:39:18 +1000] - WARNING: cache too small, increasing to 500K
bytes
+ # Given the formula, by default, this means DS will make the buffsize 400k
+ # So an object with a 1MB attribute should break indexing
+
+ # stop the server
+ topology.standalone.stop(timeout=30)
+ # Now export and import the DB. It's easier than db2index ...
+ topology.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
excludeSuffixes=[], encrypt=False, \
+ repl_data=True, outputfile='%s/ldif/%s.ldif' %
(topology.standalone.dbdir,SERVERID_STANDALONE ))
+
+ result = topology.standalone.ldif2db(DEFAULT_BENAME, None, None, False,
'%s/ldif/%s.ldif' % (topology.standalone.dbdir,SERVERID_STANDALONE ))
+
+ assert(result)
+
+ # see if user1 exists at all ....
+
+ result_user = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
'(uid=user1)')
+
+ assert(len(result_user) > 0)
+
+ log.info('Test complete')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c
b/ldap/servers/slapd/back-ldbm/import-threads.c
index e57267e..ae603bc 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -690,13 +690,15 @@ import_producer(void *param)
}
newesize = (slapi_entry_size(ep->ep_entry) + sizeof(struct backentry));
- if (newesize > job->fifo.bsize) { /* entry too big */
- import_log_notice(job, "WARNING: skipping entry \"%s\" "
+ /* Check to see if we have the space in the fifo */
+ /* If not, make it bigger if possible */
+ if (import_fifo_validate_capacity_or_expand(job, newesize) == 1) {
+ import_log_notice(job, "CRITICAL: skipping entry \"%s\"
"
"ending line %d of file \"%s\"",
slapi_entry_get_dn(e),
curr_lineno, curr_filename);
import_log_notice(job, "REASON: entry too large (%lu bytes) for "
- "the buffer size (%lu bytes)", (long unsigned int)newesize,
(long unsigned int)job->fifo.bsize);
+ "the buffer size (%lu bytes), and we were UNABLE to expand
buffer.", (long unsigned int)newesize, (long unsigned int)job->fifo.bsize);
backentry_free(&ep);
job->skipped++;
continue;
@@ -822,11 +824,11 @@ index_set_entry_to_fifo(ImportWorkerInfo *info, Slapi_Entry *e,
}
newesize = (slapi_entry_size(ep->ep_entry) + sizeof(struct backentry));
- if (newesize > job->fifo.bsize) { /* entry too big */
- import_log_notice(job, "WARNING: skipping entry \"%s\"",
+ if (import_fifo_validate_capacity_or_expand(job, newesize) == 1) {
+ import_log_notice(job, "CRITICAL: skipping entry \"%s\"",
slapi_entry_get_dn(e));
import_log_notice(job, "REASON: entry too large (%lu bytes) for "
- "the buffer size (%lu bytes)", (long unsigned int)newesize,
(long unsigned int)job->fifo.bsize);
+ "the buffer size (%lu bytes), and we were UNABLE to expand
buffer.", (long unsigned int)newesize, (long unsigned int)job->fifo.bsize);
backentry_free(&ep);
job->skipped++;
rc = 0; /* go to the next loop */
@@ -2100,11 +2102,11 @@ upgradedn_producer(void *param)
}
newesize = (slapi_entry_size(ep->ep_entry) + sizeof(struct backentry));
- if (newesize > job->fifo.bsize) { /* entry too big */
+ if (import_fifo_validate_capacity_or_expand(job, newesize) == 1) {
import_log_notice(job, "WARNING: skipping entry \"%s\"",
slapi_entry_get_dn(e));
import_log_notice(job, "REASON: entry too large (%lu bytes) for "
- "the buffer size (%lu bytes)", (long unsigned int)newesize,
(long unsigned int)job->fifo.bsize);
+ "the buffer size (%lu bytes), and we were UNABLE to expand
buffer.", (long unsigned int)newesize, (long unsigned int)job->fifo.bsize);
backentry_free(&ep);
job->skipped++;
continue;
@@ -3319,12 +3321,10 @@ static int bulk_import_queue(ImportJob *job, Slapi_Entry *entry)
}
newesize = (slapi_entry_size(ep->ep_entry) + sizeof(struct backentry));
- if (newesize > job->fifo.bsize) { /* entry too big */
+ if (import_fifo_validate_capacity_or_expand(job, newesize) == 1) {
import_log_notice(job, "REASON: entry too large (%lu bytes) for "
- "the effective import buffer size (%lu bytes). "
- "Try increasing nsslapd-cachememsize for the backend instance
\"%s\".",
- (long unsigned int)newesize, (long unsigned int)job->fifo.bsize,
- job->inst->inst_name);
+ "the effective import buffer size (%lu bytes), and we were
UNABLE to expand buffer. ",
+ (long unsigned int)newesize, (long unsigned int)job->fifo.bsize);
backentry_clear_entry(ep); /* entry is released in the frontend on
failure*/
backentry_free( &ep ); /* release the backend wrapper, here */
PR_Unlock(job->wire_lock);
diff --git a/ldap/servers/slapd/back-ldbm/import.c
b/ldap/servers/slapd/back-ldbm/import.c
index 08e31da..9b6ae0d 100644
--- a/ldap/servers/slapd/back-ldbm/import.c
+++ b/ldap/servers/slapd/back-ldbm/import.c
@@ -44,11 +44,11 @@ static int import_fifo_init(ImportJob *job)
job->fifo.size = inst->inst_cache.c_maxsize / 1024; /* guess */
/* byte limit that should be respected to avoid memory starvation */
- /* conservative computing: multiply by .8 to allow for reasonable overflow */
- job->fifo.bsize = (inst->inst_cache.c_maxsize/10) << 3;
+ /* Rather than cachesize * .8, we set it to cachesize for clarity */
+ job->fifo.bsize = inst->inst_cache.c_maxsize;
job->fifo.c_bsize = 0;
-
+
if (job->fifo.size > MAX_FIFO_SIZE)
job->fifo.size = MAX_FIFO_SIZE;
/* has to be at least 1 or 2, and anything less than about 100 destroys
@@ -69,6 +69,45 @@ static int import_fifo_init(ImportJob *job)
return 0;
}
+/*
+ * import_fifo_validate_capacity_or_expand
+ *
+ * This is used to check if the capacity of the fifo is able to accomodate
+ * the entry of the size entrysize. If it is enable to hold the entry the
+ * fifo buffer is automatically expanded.
+ *
+ * \param job The ImportJob queue
+ * \param entrysize The size to check for
+ *
+ * \return int: If able to hold the entry, returns 0. If unable to, but resize was
sucessful, so now able to hold the entry, 0. If unable to hold the entry and unable to
resize, 1.
+ */
+int import_fifo_validate_capacity_or_expand(ImportJob *job, int entrysize) {
+ int result = 1;
+ /* We shoot for four times as much to start with. */
+ size_t request = entrysize * 4;
+ int sane = 0;
+
+ if (entrysize > job->fifo.bsize) {
+ /* Check the amount of memory on the system */
+ sane = util_is_cachesize_sane(&request);
+ if (!sane && entrysize <= request) {
+ /* Did the amount cachesize set still exceed entrysize? It'll do ... */
+ job->fifo.bsize = request;
+ result = 0;
+ } else if (!sane) {
+ /* Can't allocate! No!!! */
+ result = 1;
+ } else {
+ /* Our request was okay, go ahead .... */
+ job->fifo.bsize = request;
+ result = 0;
+ }
+ } else {
+ result = 0;
+ }
+ return result;
+}
+
FifoItem *import_fifo_fetch(ImportJob *job, ID id, int worker)
{
int idx = id % job->fifo.size;
diff --git a/ldap/servers/slapd/back-ldbm/import.h
b/ldap/servers/slapd/back-ldbm/import.h
index 58ad466..a109bba 100644
--- a/ldap/servers/slapd/back-ldbm/import.h
+++ b/ldap/servers/slapd/back-ldbm/import.h
@@ -197,6 +197,7 @@ struct _import_worker_info {
/* import.c */
+int import_fifo_validate_capacity_or_expand(ImportJob *job, int entrysize);
FifoItem *import_fifo_fetch(ImportJob *job, ID id, int worker);
void import_free_job(ImportJob *job);
void import_log_notice(ImportJob *job, char *format, ...)
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
index 3dfc657..41e213e 100644
--- a/ldap/servers/slapd/util.c
+++ b/ldap/servers/slapd/util.c
@@ -1717,7 +1717,7 @@ int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t
*procpages, size
/* This is stupid. If you set %u to %zu to print a size_t, you get literal %zu in
your logs
* So do the filthy cast instead.
*/
- slapi_log_error(SLAPI_LOG_FATAL,"util_info_sys_pages", "USING
pages=%lu, procpages=%lu, availpages=%lu \n",
+ slapi_log_error(SLAPI_LOG_TRACE,"util_info_sys_pages", "USING
pages=%lu, procpages=%lu, availpages=%lu \n",
(unsigned long)*pages, (unsigned long)*procpages, (unsigned long)*availpages);
return 0;