Branch '389-ds-base-1.3.5' - ldap/admin
by Noriko Hosoi
ldap/admin/src/scripts/DSUtil.pm.in | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
New commits:
commit 1abb0ffb2930d019f58d5dac1937ddbb56c9287f
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Mon Jan 30 17:38:01 2017 +0100
Ticket 49016 - (un)register/migration/remove may fail if there is no suffix on 'userRoot' backend
Bug Description:
Previous fix was incomplete in case none of the backend entries have
'nsslapd-suffix' value
Fix Description:
Just return if $suffix keep unmodified
https://fedorahosted.org/389/ticket/49016
Reviewed by: nhosoi
Platforms tested: F23, F25
Flag Day: no
Doc impact: no
(cherry picked from commit bd5fdfc8f4a560eae99672b712235c1260ee42b0)
diff --git a/ldap/admin/src/scripts/DSUtil.pm.in b/ldap/admin/src/scripts/DSUtil.pm.in
index c972805..805a9b9 100644
--- a/ldap/admin/src/scripts/DSUtil.pm.in
+++ b/ldap/admin/src/scripts/DSUtil.pm.in
@@ -965,7 +965,7 @@ sub createInfFromConfig {
$inf->{slapd}->{ServerPort} = $ent->getValues('nsslapd-port');
$inf->{slapd}->{ServerIdentifier} = $id;
- my $suffix;
+ my $suffix = "";
$ent = $conn->search("cn=ldbm database,cn=plugins,cn=config",
"one", "(objectclass=*)");
if (!$ent) {
@@ -981,6 +981,12 @@ sub createInfFromConfig {
last if ($ent->hasValue('cn', 'userRoot', 1));
$ent = $conn->nextEntry();
}
+ if ( "" eq "$suffix" )
+ {
+ push @{$errs}, "error_opening_dseldif", $fname, $!;
+ $conn->close();
+ return 0;
+ }
# we also need the instance dir
$ent = $conn->search("cn=config", "base", "(objectclass=*)");
6 years, 10 months
ldap/admin
by Noriko Hosoi
ldap/admin/src/scripts/DSUtil.pm.in | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
New commits:
commit bd5fdfc8f4a560eae99672b712235c1260ee42b0
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Mon Jan 30 17:38:01 2017 +0100
Ticket 49016 - (un)register/migration/remove may fail if there is no suffix on 'userRoot' backend
Bug Description:
Previous fix was incomplete in case none of the backend entries have
'nsslapd-suffix' value
Fix Description:
Just return if $suffix keep unmodified
https://fedorahosted.org/389/ticket/49016
Reviewed by: nhosoi
Platforms tested: F23, F25
Flag Day: no
Doc impact: no
diff --git a/ldap/admin/src/scripts/DSUtil.pm.in b/ldap/admin/src/scripts/DSUtil.pm.in
index c972805..805a9b9 100644
--- a/ldap/admin/src/scripts/DSUtil.pm.in
+++ b/ldap/admin/src/scripts/DSUtil.pm.in
@@ -965,7 +965,7 @@ sub createInfFromConfig {
$inf->{slapd}->{ServerPort} = $ent->getValues('nsslapd-port');
$inf->{slapd}->{ServerIdentifier} = $id;
- my $suffix;
+ my $suffix = "";
$ent = $conn->search("cn=ldbm database,cn=plugins,cn=config",
"one", "(objectclass=*)");
if (!$ent) {
@@ -981,6 +981,12 @@ sub createInfFromConfig {
last if ($ent->hasValue('cn', 'userRoot', 1));
$ent = $conn->nextEntry();
}
+ if ( "" eq "$suffix" )
+ {
+ push @{$errs}, "error_opening_dseldif", $fname, $!;
+ $conn->close();
+ return 0;
+ }
# we also need the instance dir
$ent = $conn->search("cn=config", "base", "(objectclass=*)");
6 years, 10 months
dirsrvtests/tests
by Simon Pichugin
dirsrvtests/tests/suites/replication/single_master_test.py | 167 +++++++++++++
1 file changed, 167 insertions(+)
New commits:
commit 17f3bef9adfcd6bf999fbe02cb13dd4c4177b3b3
Author: Simon Pichugin <spichugi(a)redhat.com>
Date: Mon Jan 23 11:32:01 2017 +0100
Ticket 48085 - Add single master replication test suite
Description: Port test suite from TET.
- Check that no crash happens during mail attribute replication
- Check that LastUpdate replica attributes show right values,
when no initialization has happened.
Reviewed by: wibrown (Thanks!)
https://fedorahosted.org/389/ticket/48085
diff --git a/dirsrvtests/tests/suites/replication/single_master_test.py b/dirsrvtests/tests/suites/replication/single_master_test.py
new file mode 100644
index 0000000..a743d2c
--- /dev/null
+++ b/dirsrvtests/tests/suites/replication/single_master_test.py
@@ -0,0 +1,167 @@
+import pytest
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_m1c1 as topo_r # Replication
+from lib389.topologies import topology_i2 as topo_nr # No replication
+
+DEBUGGING = os.getenv("DEBUGGING", default=False)
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+TEST_USER_NAME = 'smrepl_test'
+TEST_USER_DN = 'uid={},{}'.format(TEST_USER_NAME, DEFAULT_SUFFIX)
+TEST_USER_PWD = 'smrepl_test'
+
+
+(a)pytest.fixture
+def test_user(topo_r, request):
+ """User for binding operation"""
+
+ log.info('Adding user {}'.format(TEST_USER_DN))
+ try:
+ topo_r.ms["master1"].add_s(Entry((TEST_USER_DN, {
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'cn': TEST_USER_NAME,
+ 'sn': TEST_USER_NAME,
+ 'userpassword': TEST_USER_PWD,
+ 'mail': '{}(a)redhat.com'.format(TEST_USER_NAME),
+ 'uid': TEST_USER_NAME
+ })))
+ except ldap.LDAPError as e:
+ log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN,
+ e.message['desc']))
+ raise e
+
+ def fin():
+ log.info('Deleting user {}'.format(TEST_USER_DN))
+ topo_r.ms["master1"].delete_s(TEST_USER_DN)
+
+ request.addfinalizer(fin)
+
+
+(a)pytest.fixture(scope="module")
+def replica_without_init(topo_nr):
+ """Enable replica without initialization"""
+
+ master = topo_nr.ins["standalone1"]
+ consumer = topo_nr.ins["standalone2"]
+
+ master.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_1)
+ consumer.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_CONSUMER)
+ properties = {RA_NAME: 'meTo_{}:{}'.format(consumer.host, str(consumer.port)),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ agmt = master.agreement.create(suffix=DEFAULT_SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
+
+ return agmt
+
+
+def test_mail_attr_repl(topo_r, test_user):
+ """Check that no crash happens during mail attribute replication
+
+ :Feature: Single master replication
+
+ :Setup: Replication setup with master and consumer instances,
+ test user on master
+
+ :Steps: 1. Check that user was replicated to consumer
+ 2. Back up mail database file
+ 3. Remove mail attribute from the user entry
+ 4. Restore mail database
+ 5. Search for the entry with a substring 'mail=user*'
+ 6. Search for the entry once again to make sure that server is alive
+
+ :Assert: No crash happens
+ """
+
+ master = topo_r.ms["master1"]
+ consumer = topo_r.cs["consumer1"]
+
+ log.info("Wait for a user to be replicated")
+ time.sleep(3)
+
+ log.info("Check that replication is working")
+ entries = consumer.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid={}".format(TEST_USER_NAME),
+ ["uid"])
+ assert entries, "User {} wasn't replicated successfully".format(TEST_USER_NAME)
+
+ entries = consumer.backend.list(DEFAULT_SUFFIX)
+ db_dir = entries[0]["nsslapd-directory"]
+ mail_db = filter(lambda fl: fl.startswith("mail"), os.listdir(db_dir))
+ assert mail_db, "mail.* wasn't found in {}"
+ mail_db_path = os.path.join(db_dir, mail_db[0])
+ backup_path = os.path.join(DEFAULT_BACKUPDIR, mail_db[0])
+
+ consumer.stop()
+ log.info("Back up {} to {}".format(mail_db_path, backup_path))
+ shutil.copyfile(mail_db_path, backup_path)
+ consumer.start()
+
+ log.info("Remove 'mail' attr from master")
+ try:
+ master.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'mail', '{}(a)redhat.com'.format(TEST_USER_NAME))])
+ except ldap.LDAPError as e:
+ log.error('Failed to delete att user {}: error {}'.format(TEST_USER_DN, e.message['desc']))
+ raise e
+
+ log.info("Wait for the replication to happen")
+ time.sleep(5)
+
+ consumer.stop()
+ log.info("Restore {} to {}".format(backup_path, mail_db_path))
+ shutil.copyfile(backup_path, mail_db_path)
+ consumer.start()
+
+ log.info("Make a search for mail attribute in attempt to crash server")
+ consumer.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "mail={}*".format(TEST_USER_NAME), ["mail"])
+
+ log.info("Make sure that server hasn't crashed")
+ entries = consumer.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid={}".format(TEST_USER_NAME),
+ ["uid"])
+ assert entries, "User {} wasn't replicated successfully".format(TEST_USER_NAME)
+
+
+def test_mail_attr_repl(topo_nr, replica_without_init):
+ """Check that LastUpdate replica attributes show right values
+
+ :Feature: Single master replication
+
+ :Setup: Replication setup with master and consumer instances
+ without initialization
+
+ :Steps: 1. Check nsds5replicaLastUpdateStart, nsds5replicaLastUpdateEnd,
+ nsds5replicaLastUpdateStatus attrs
+
+ :Assert: nsds5replicaLastUpdateStart: 0, nsds5replicaLastUpdateEnd: 0 and
+ nsds5replicaLastUpdateStatus is not equal to
+ "0 Replica acquired successfully: Incremental update started"
+ """
+
+ master = topo_nr.ins["standalone1"]
+ consumer = topo_nr.ins["standalone2"]
+
+ assert not master.testReplication(DEFAULT_SUFFIX, consumer)
+
+ agmt = master.search_s(replica_without_init, ldap.SCOPE_BASE, "(objectClass=*)",
+ ["nsds5replicaLastUpdateStart",
+ "nsds5replicaLastUpdateEnd",
+ "nsds5replicaLastUpdateStatus"])[0]
+
+ assert agmt["nsds5replicaLastUpdateStart"] == "19700101000000Z"
+ assert agmt["nsds5replicaLastUpdateEnd"] == "19700101000000Z"
+ assert "Replica acquired successfully" not in agmt["nsds5replicaLastUpdateStatus"]
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
6 years, 10 months
ldap/servers
by William Brown
ldap/servers/slapd/back-ldbm/start.c | 48 ++++++++++++++++++-----------------
1 file changed, 26 insertions(+), 22 deletions(-)
New commits:
commit e998632b45b49813df128cee17b813bee306f580
Author: William Brown <firstyear(a)redhat.com>
Date: Mon Jan 30 12:32:20 2017 +1000
Ticket 49105 - Sig FPE when ns-slapd has 0 backends.
Bug Description: The autotuning system assumed we had 1 or more backends.
As a result, when you start a server with no backends, a divide by 0 was
encountered
Fix Description: Check the backend count before we attempt the division.
https://fedorahosted.org/389/ticket/49105
Author: wibrown
Review by: nhosoi (Thanks!)
diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c
index 56dd2c4..759af8a 100644
--- a/ldap/servers/slapd/back-ldbm/start.c
+++ b/ldap/servers/slapd/back-ldbm/start.c
@@ -150,27 +150,29 @@ ldbm_back_start_autotune(struct ldbminfo *li) {
db_pages = (512 * MEGABYTE) / pagesize;
}
- /* Number of entry cache pages per backend. */
- entry_pages = (zone_pages - db_pages) / backend_count;
- /* Now, clamp this value to a 64mb boundary. */
- /* How many pages are in 64mb? */
- clamp_pages = (64 * MEGABYTE) / pagesize;
- /* Now divide the entry pages by this, and also mod. If mod != 0, we need
- * to add 1 to the diveded number. This should give us:
- * 510 * 1024 * 1024 == 510MB
- * 534773760 bytes
- * 130560 pages at 4096 pages.
- * 16384 pages for 64Mb
- * 130560 / 16384 = 7
- * 130560 % 16384 = 15872 which is != 0
- * therfore 7 + 1, aka 8 * 16384 = 131072 pages = 536870912 bytes = 512MB.
- */
- clamp_div = entry_pages / clamp_pages;
- clamp_mod = entry_pages % clamp_pages;
- if (clamp_mod != 0) {
- /* If we want to clamp down, remove this line. This would change the above from 510mb -> 448mb. */
- clamp_div += 1;
- entry_pages = clamp_div * clamp_pages;
+ if (backend_count > 0 ) {
+ /* Number of entry cache pages per backend. */
+ entry_pages = (zone_pages - db_pages) / backend_count;
+ /* Now, clamp this value to a 64mb boundary. */
+ /* How many pages are in 64mb? */
+ clamp_pages = (64 * MEGABYTE) / pagesize;
+ /* Now divide the entry pages by this, and also mod. If mod != 0, we need
+ * to add 1 to the diveded number. This should give us:
+ * 510 * 1024 * 1024 == 510MB
+ * 534773760 bytes
+ * 130560 pages at 4096 pages.
+ * 16384 pages for 64Mb
+ * 130560 / 16384 = 7
+ * 130560 % 16384 = 15872 which is != 0
+ * therfore 7 + 1, aka 8 * 16384 = 131072 pages = 536870912 bytes = 512MB.
+ */
+ clamp_div = entry_pages / clamp_pages;
+ clamp_mod = entry_pages % clamp_pages;
+ if (clamp_mod != 0) {
+ /* If we want to clamp down, remove this line. This would change the above from 510mb -> 448mb. */
+ clamp_div += 1;
+ entry_pages = clamp_div * clamp_pages;
+ }
}
slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %luk physical memory\n", pages*(pagesize/1024));
@@ -202,7 +204,9 @@ ldbm_back_start_autotune(struct ldbminfo *li) {
/* For each backend */
/* apply the appropriate cache size if 0 */
- li->li_cache_autosize_ec = (unsigned long)entry_pages * pagesize;
+ if (backend_count > 0 ) {
+ li->li_cache_autosize_ec = (unsigned long)entry_pages * pagesize;
+ }
for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
6 years, 10 months
Branch '389-ds-base-1.2.11' - 2 commits - dirsrvtests/tests ldap/servers
by Noriko Hosoi
dirsrvtests/tests/tickets/ticket49104_test.py | 80 ++++++++++++++++++++++++++
ldap/servers/slapd/tools/dbscan.c | 17 +++++
2 files changed, 96 insertions(+), 1 deletion(-)
New commits:
commit b73428c307e25b636d1833c22680571dc9960fc2
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Fri Jan 27 15:41:08 2017 -0800
Ticket #49104 - Add CI test
Description: dbscan-bin crashing due to a segmentation fault
(cherry picked from commit c5a67ce1ad297cf59b5dd0e47c9896e261aba08c)
(cherry picked from commit db458b4be7293c7c6cb390b32109b635a1867fd9)
diff --git a/dirsrvtests/tests/tickets/ticket49104_test.py b/dirsrvtests/tests/tickets/ticket49104_test.py
new file mode 100644
index 0000000..9840b1d
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket49104_test.py
@@ -0,0 +1,80 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2017 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import subprocess
+
+import pytest
+from lib389.tasks import *
+from lib389.topologies import topology_st
+
+log = logging.getLogger(__name__)
+
+def test_ticket49104_setup(topology_st):
+ """
+ Generate an ldif file having 10K entries and import it.
+ """
+ # Generate a test ldif (100k entries)
+ ldif_dir = topology_st.standalone.get_ldif_dir()
+ import_ldif = ldif_dir + '/49104.ldif'
+ try:
+ topology_st.standalone.buildLDIF(100000, import_ldif)
+ except OSError as e:
+ log.fatal('ticket 49104: failed to create test ldif,\
+ error: %s - %s' % (e.errno, e.strerror))
+ assert False
+
+ # Online
+ try:
+ topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
+ input_file=import_ldif,
+ args={TASK_WAIT: True})
+ except ValueError:
+ log.fatal('ticket 49104: Online import failed')
+ assert False
+
+def test_ticket49104(topology_st):
+ """
+ Run dbscan with valgrind changing the truncate size.
+ If there is no Invalid report, we can claim the test has passed.
+ """
+ log.info("Test ticket 49104 -- dbscan crashes by memory corruption")
+ myvallog = '/tmp/val49104.out'
+ if os.path.exists(myvallog):
+ os.remove(myvallog)
+ prog = os.path.join(topology_st.standalone.get_bin_dir(), 'dbscan-bin')
+ valcmd = 'valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=%s ' % myvallog
+ id2entry = os.path.join(topology_st.standalone.dbdir, DEFAULT_BENAME, 'id2entry.db')
+
+ for i in range(20, 30):
+ cmd = valcmd + '%s -f %s -t %d -R' % (prog, id2entry , i)
+ log.info('Running script: %s' % cmd)
+ proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
+ outs = ''
+ try:
+ outs = proc.communicate()
+ except OSError as e:
+ log.exception('dbscan: error executing (%s): error %d - %s' %
+ (cmd, e.errno, e.strerror))
+ raise e
+
+ grep = 'egrep "Invalid read|Invalid write" %s' % myvallog
+ p = os.popen(grep, "r")
+ l = p.readline()
+ if 'Invalid' in l:
+ log.fatal('ERROR: valgrind reported invalid read/write: %s' % l)
+ assert False
+
+ log.info('ticket 49104 - PASSED')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
commit 93946f9e853d5f17b894ffb11a1dcfaeb9698890
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Thu Jan 26 15:10:46 2017 -0800
Ticket #49104 - dbscan-bin crashing due to a segmentation fault
Description: There was a logic error in format_raw. When a truncate
option (-t width) is given, the function cut the output and replace
the last 5 bytes with " ...\0". The position to start the replace
was not correct in some case.
https://fedorahosted.org/389/ticket/49104
Reviewed by wibrown(a)redhat.com (Thank you, William!!)
(cherry picked from commit efeb2f6e873df32ce7545fa1fb319806d2108fda)
(cherry picked from commit 3e3bcee42d3cb800457127e0d71ed7ce7a5d65a0)
diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c
index d84f138..c9445c6 100644
--- a/ldap/servers/slapd/tools/dbscan.c
+++ b/ldap/servers/slapd/tools/dbscan.c
@@ -239,6 +239,7 @@ static char *format_raw(unsigned char *s, int len, int flags,
return NULL;
for (p = s, o = buf, i = 0; i < len && o < bufend; p++, i++) {
+ int ishex = 0;
if ((*p == '%') || (*p <= ' ') || (*p >= 126)) {
/* index keys are stored with their trailing NUL */
if ((*p == 0) && (i == len-1))
@@ -252,18 +253,32 @@ static char *format_raw(unsigned char *s, int len, int flags,
*o++ = '%';
*o++ = hex[*p / 16];
*o++ = hex[*p % 16];
+ ishex = 1;
}
} else {
*o++ = *p;
}
if (truncatesiz > 0 && o > bufend - 5) {
/* truncate it */
+ /*
+ * Padding " ...\0" at the end of the buf.
+ * If dumped as %##, truncate the partial value if any.
+ */
+ o = bufend - 5;
+ if (ishex) {
+ if ((o > buf) && *(o-1) == '%') {
+ o -= 1;
+ } else if ((o > buf + 1) && *(o-2) == '%') {
+ o -= 2;
+ }
+ }
strcpy((char *)o, " ...");
i = len;
o += 4;
+ break;
}
}
- *o = 0;
+ *o = '\0';
return (char *)buf;
}
6 years, 10 months
Branch '389-ds-base-1.3.5' - 2 commits - dirsrvtests/tests ldap/servers
by Noriko Hosoi
dirsrvtests/tests/tickets/ticket49104_test.py | 80 ++++++++++++++++++++++++++
ldap/servers/slapd/tools/dbscan.c | 17 +++++
2 files changed, 96 insertions(+), 1 deletion(-)
New commits:
commit db458b4be7293c7c6cb390b32109b635a1867fd9
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Fri Jan 27 15:41:08 2017 -0800
Ticket #49104 - Add CI test
Description: dbscan-bin crashing due to a segmentation fault
(cherry picked from commit c5a67ce1ad297cf59b5dd0e47c9896e261aba08c)
diff --git a/dirsrvtests/tests/tickets/ticket49104_test.py b/dirsrvtests/tests/tickets/ticket49104_test.py
new file mode 100644
index 0000000..9840b1d
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket49104_test.py
@@ -0,0 +1,80 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2017 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import subprocess
+
+import pytest
+from lib389.tasks import *
+from lib389.topologies import topology_st
+
+log = logging.getLogger(__name__)
+
+def test_ticket49104_setup(topology_st):
+ """
+ Generate an ldif file having 10K entries and import it.
+ """
+ # Generate a test ldif (100k entries)
+ ldif_dir = topology_st.standalone.get_ldif_dir()
+ import_ldif = ldif_dir + '/49104.ldif'
+ try:
+ topology_st.standalone.buildLDIF(100000, import_ldif)
+ except OSError as e:
+ log.fatal('ticket 49104: failed to create test ldif,\
+ error: %s - %s' % (e.errno, e.strerror))
+ assert False
+
+ # Online
+ try:
+ topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
+ input_file=import_ldif,
+ args={TASK_WAIT: True})
+ except ValueError:
+ log.fatal('ticket 49104: Online import failed')
+ assert False
+
+def test_ticket49104(topology_st):
+ """
+ Run dbscan with valgrind changing the truncate size.
+ If there is no Invalid report, we can claim the test has passed.
+ """
+ log.info("Test ticket 49104 -- dbscan crashes by memory corruption")
+ myvallog = '/tmp/val49104.out'
+ if os.path.exists(myvallog):
+ os.remove(myvallog)
+ prog = os.path.join(topology_st.standalone.get_bin_dir(), 'dbscan-bin')
+ valcmd = 'valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=%s ' % myvallog
+ id2entry = os.path.join(topology_st.standalone.dbdir, DEFAULT_BENAME, 'id2entry.db')
+
+ for i in range(20, 30):
+ cmd = valcmd + '%s -f %s -t %d -R' % (prog, id2entry , i)
+ log.info('Running script: %s' % cmd)
+ proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
+ outs = ''
+ try:
+ outs = proc.communicate()
+ except OSError as e:
+ log.exception('dbscan: error executing (%s): error %d - %s' %
+ (cmd, e.errno, e.strerror))
+ raise e
+
+ grep = 'egrep "Invalid read|Invalid write" %s' % myvallog
+ p = os.popen(grep, "r")
+ l = p.readline()
+ if 'Invalid' in l:
+ log.fatal('ERROR: valgrind reported invalid read/write: %s' % l)
+ assert False
+
+ log.info('ticket 49104 - PASSED')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
commit 3e3bcee42d3cb800457127e0d71ed7ce7a5d65a0
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Thu Jan 26 15:10:46 2017 -0800
Ticket #49104 - dbscan-bin crashing due to a segmentation fault
Description: There was a logic error in format_raw. When a truncate
option (-t width) is given, the function cut the output and replace
the last 5 bytes with " ...\0". The position to start the replace
was not correct in some case.
https://fedorahosted.org/389/ticket/49104
Reviewed by wibrown(a)redhat.com (Thank you, William!!)
(cherry picked from commit efeb2f6e873df32ce7545fa1fb319806d2108fda)
diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c
index 70b9409..8b2c933 100644
--- a/ldap/servers/slapd/tools/dbscan.c
+++ b/ldap/servers/slapd/tools/dbscan.c
@@ -202,6 +202,7 @@ static char *format_raw(unsigned char *s, int len, int flags,
return NULL;
for (p = s, o = buf, i = 0; i < len && o < bufend; p++, i++) {
+ int ishex = 0;
if ((*p == '%') || (*p <= ' ') || (*p >= 126)) {
/* index keys are stored with their trailing NUL */
if ((*p == 0) && (i == len-1))
@@ -215,18 +216,32 @@ static char *format_raw(unsigned char *s, int len, int flags,
*o++ = '%';
*o++ = hex[*p / 16];
*o++ = hex[*p % 16];
+ ishex = 1;
}
} else {
*o++ = *p;
}
if (truncatesiz > 0 && o > bufend - 5) {
/* truncate it */
+ /*
+ * Padding " ...\0" at the end of the buf.
+ * If dumped as %##, truncate the partial value if any.
+ */
+ o = bufend - 5;
+ if (ishex) {
+ if ((o > buf) && *(o-1) == '%') {
+ o -= 1;
+ } else if ((o > buf + 1) && *(o-2) == '%') {
+ o -= 2;
+ }
+ }
strcpy((char *)o, " ...");
i = len;
o += 4;
+ break;
}
}
- *o = 0;
+ *o = '\0';
return (char *)buf;
}
6 years, 10 months
2 commits - dirsrvtests/tests ldap/servers
by Noriko Hosoi
dirsrvtests/tests/tickets/ticket49104_test.py | 80 ++++++++++++++++++++++++++
ldap/servers/slapd/tools/dbscan.c | 17 +++++
2 files changed, 96 insertions(+), 1 deletion(-)
New commits:
commit c5a67ce1ad297cf59b5dd0e47c9896e261aba08c
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Fri Jan 27 15:41:08 2017 -0800
Ticket #49104 - Add CI test
Description: dbscan-bin crashing due to a segmentation fault
diff --git a/dirsrvtests/tests/tickets/ticket49104_test.py b/dirsrvtests/tests/tickets/ticket49104_test.py
new file mode 100644
index 0000000..9840b1d
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket49104_test.py
@@ -0,0 +1,80 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2017 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import subprocess
+
+import pytest
+from lib389.tasks import *
+from lib389.topologies import topology_st
+
+log = logging.getLogger(__name__)
+
+def test_ticket49104_setup(topology_st):
+ """
+ Generate an ldif file having 10K entries and import it.
+ """
+ # Generate a test ldif (100k entries)
+ ldif_dir = topology_st.standalone.get_ldif_dir()
+ import_ldif = ldif_dir + '/49104.ldif'
+ try:
+ topology_st.standalone.buildLDIF(100000, import_ldif)
+ except OSError as e:
+ log.fatal('ticket 49104: failed to create test ldif,\
+ error: %s - %s' % (e.errno, e.strerror))
+ assert False
+
+ # Online
+ try:
+ topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
+ input_file=import_ldif,
+ args={TASK_WAIT: True})
+ except ValueError:
+ log.fatal('ticket 49104: Online import failed')
+ assert False
+
+def test_ticket49104(topology_st):
+ """
+ Run dbscan with valgrind changing the truncate size.
+ If there is no Invalid report, we can claim the test has passed.
+ """
+ log.info("Test ticket 49104 -- dbscan crashes by memory corruption")
+ myvallog = '/tmp/val49104.out'
+ if os.path.exists(myvallog):
+ os.remove(myvallog)
+ prog = os.path.join(topology_st.standalone.get_bin_dir(), 'dbscan-bin')
+ valcmd = 'valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=%s ' % myvallog
+ id2entry = os.path.join(topology_st.standalone.dbdir, DEFAULT_BENAME, 'id2entry.db')
+
+ for i in range(20, 30):
+ cmd = valcmd + '%s -f %s -t %d -R' % (prog, id2entry , i)
+ log.info('Running script: %s' % cmd)
+ proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
+ outs = ''
+ try:
+ outs = proc.communicate()
+ except OSError as e:
+ log.exception('dbscan: error executing (%s): error %d - %s' %
+ (cmd, e.errno, e.strerror))
+ raise e
+
+ grep = 'egrep "Invalid read|Invalid write" %s' % myvallog
+ p = os.popen(grep, "r")
+ l = p.readline()
+ if 'Invalid' in l:
+ log.fatal('ERROR: valgrind reported invalid read/write: %s' % l)
+ assert False
+
+ log.info('ticket 49104 - PASSED')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
commit efeb2f6e873df32ce7545fa1fb319806d2108fda
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Thu Jan 26 15:10:46 2017 -0800
Ticket #49104 - dbscan-bin crashing due to a segmentation fault
Description: There was a logic error in format_raw. When a truncate
option (-t width) is given, the function cut the output and replace
the last 5 bytes with " ...\0". The position to start the replace
was not correct in some case.
https://fedorahosted.org/389/ticket/49104
Reviewed by wibrown(a)redhat.com (Thank you, William!!)
diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c
index 0a7f85e..33252b8 100644
--- a/ldap/servers/slapd/tools/dbscan.c
+++ b/ldap/servers/slapd/tools/dbscan.c
@@ -202,6 +202,7 @@ static char *format_raw(unsigned char *s, int len, int flags,
return NULL;
for (p = s, o = buf, i = 0; i < len && o < bufend; p++, i++) {
+ int ishex = 0;
if ((*p == '%') || (*p <= ' ') || (*p >= 126)) {
/* index keys are stored with their trailing NUL */
if ((*p == 0) && (i == len-1))
@@ -215,18 +216,32 @@ static char *format_raw(unsigned char *s, int len, int flags,
*o++ = '%';
*o++ = hex[*p / 16];
*o++ = hex[*p % 16];
+ ishex = 1;
}
} else {
*o++ = *p;
}
if (truncatesiz > 0 && o > bufend - 5) {
/* truncate it */
+ /*
+ * Padding " ...\0" at the end of the buf.
+ * If dumped as %##, truncate the partial value if any.
+ */
+ o = bufend - 5;
+ if (ishex) {
+ if ((o > buf) && *(o-1) == '%') {
+ o -= 1;
+ } else if ((o > buf + 1) && *(o-2) == '%') {
+ o -= 2;
+ }
+ }
strcpy((char *)o, " ...");
i = len;
o += 4;
+ break;
}
}
- *o = 0;
+ *o = '\0';
return (char *)buf;
}
6 years, 10 months
ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/dblayer.c | 8 ++++----
ldap/servers/slapd/back-ldbm/dbverify.c | 2 +-
ldap/servers/slapd/back-ldbm/import-threads.c | 11 ++++++-----
ldap/servers/slapd/back-ldbm/instance.c | 11 ++++++-----
ldap/servers/slapd/ssl.c | 25 +++++++++++++++++++------
5 files changed, 36 insertions(+), 21 deletions(-)
New commits:
commit 9fc10279fd79d602cda9e0250d96f32b8d005120
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jan 4 16:54:20 2017 -0500
Ticket 49075 - Adjust log severity levels
Description: There were some levels that were set too severely for normal
messages. Also in ssl.c we test if the cert db file exists
before we try and chmod it.
https://fedorahosted.org/389/ticket/49075
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 04d31b1..683994f 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -1473,11 +1473,11 @@ dblayer_start(struct ldbminfo *li, int dbmode)
(priv->dblayer_lock_config != priv->dblayer_previous_lock_config)) &&
!(dbmode & (DBLAYER_ARCHIVE_MODE|DBLAYER_EXPORT_MODE)) ) {
if (priv->dblayer_cachesize != priv->dblayer_previous_cachesize) {
- slapi_log_err(SLAPI_LOG_NOTICE, "dblayer_start", "Resizing db cache size: %lu -> %lu\n",
+ slapi_log_err(SLAPI_LOG_INFO, "dblayer_start", "Resizing db cache size: %lu -> %lu\n",
priv->dblayer_previous_cachesize, priv->dblayer_cachesize);
}
if (priv->dblayer_ncache != priv->dblayer_previous_ncache) {
- slapi_log_err(SLAPI_LOG_NOTICE, "dblayer_start", "Resizing db cache count: %d -> %d\n",
+ slapi_log_err(SLAPI_LOG_INFO, "dblayer_start", "Resizing db cache count: %d -> %d\n",
priv->dblayer_previous_ncache, priv->dblayer_ncache);
}
if (priv->dblayer_lock_config != priv->dblayer_previous_lock_config) {
@@ -1989,7 +1989,7 @@ int dblayer_instance_start(backend *be, int mode)
* but nsslapd-db-private-import-mem should work with import,
* as well */
if (priv->dblayer_private_import_mem) {
- slapi_log_err(SLAPI_LOG_WARNING,
+ slapi_log_err(SLAPI_LOG_INFO,
"dblayer_instance_start", "Import is running with "
"nsslapd-db-private-import-mem on; "
"No other process is allowed to access the database\n");
@@ -5656,7 +5656,7 @@ dblayer_copyfile(char *source, char *destination, int overwrite, int mode)
destination, strerror(errno));
goto error;
}
- slapi_log_err(SLAPI_LOG_BACKLDBM,
+ slapi_log_err(SLAPI_LOG_INFO,
"dblayer_copyfile", "Copying %s to %s\n", source, destination);
/* Loop round reading data and writing it */
while (1)
diff --git a/ldap/servers/slapd/back-ldbm/dbverify.c b/ldap/servers/slapd/back-ldbm/dbverify.c
index cb175bd..53c9f78 100644
--- a/ldap/servers/slapd/back-ldbm/dbverify.c
+++ b/ldap/servers/slapd/back-ldbm/dbverify.c
@@ -159,7 +159,7 @@ dbverify_ext( ldbm_instance *inst, int verbose )
{
if (verbose)
{
- slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
+ slapi_log_err(SLAPI_LOG_INFO, "dbverify_ext",
"%s: ok\n", dbdir);
}
}
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index 0557778..5b81427 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -1465,7 +1465,7 @@ upgradedn_producer(void *param)
if (!chk_dn_norm && !chk_dn_norm_sp) {
/* Nothing to do... */
- slapi_log_err(SLAPI_LOG_ERR, "upgradedn_producer",
+ slapi_log_err(SLAPI_LOG_INFO, "upgradedn_producer",
"UpgradeDnFormat is not required.\n");
info->state = FINISHED;
goto done;
@@ -1526,7 +1526,7 @@ upgradedn_producer(void *param)
if (0 != db_rval) {
if (DB_NOTFOUND == db_rval) {
- slapi_log_err(SLAPI_LOG_ERR, "upgradedn_producer",
+ slapi_log_err(SLAPI_LOG_INFO, "upgradedn_producer",
"%s: Finished reading database\n", inst->inst_name);
if (job->task) {
slapi_task_log_notice(job->task,
@@ -1604,7 +1604,7 @@ upgradedn_producer(void *param)
pid, &id, &psrdn, &curr_entry);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR,
- "uptradedn: Failed to compose dn for "
+ "upgradedn: Failed to compose dn for "
"(rdn: %s, ID: %d)\n", rdn, temp_id);
slapi_ch_free_string(&rdn);
slapi_rdn_done(&psrdn);
@@ -2101,9 +2101,10 @@ upgradedn_producer(void *param)
newesize = (slapi_entry_size(ep->ep_entry) + sizeof(struct backentry));
if (import_fifo_validate_capacity_or_expand(job, newesize) == 1) {
- import_log_notice(job, SLAPI_LOG_ERR, "upgradedn_producer", "Skipping entry \"%s\"",
+ import_log_notice(job, SLAPI_LOG_NOTICE, "upgradedn_producer", "Skipping entry \"%s\"",
slapi_entry_get_dn(e));
- import_log_notice(job, SLAPI_LOG_ERR, "upgradedn_producer", "REASON: entry too large (%lu bytes) for "
+ import_log_notice(job, SLAPI_LOG_NOTICE, "upgradedn_producer",
+ "REASON: entry too large (%lu bytes) for "
"the buffer size (%lu bytes), and we were UNABLE to expand buffer.",
(long unsigned int)newesize, (long unsigned int)job->fifo.bsize);
backentry_free(&ep);
diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c
index 8474854..f79d048 100644
--- a/ldap/servers/slapd/back-ldbm/instance.c
+++ b/ldap/servers/slapd/back-ldbm/instance.c
@@ -249,9 +249,9 @@ ldbm_instance_start(backend *be)
if (be->be_state != BE_STATE_STOPPED &&
be->be_state != BE_STATE_DELETED) {
- slapi_log_err(SLAPI_LOG_TRACE,
- "ldbm_instance_start", "Warning - backend is in a wrong state - %d\n",
- be->be_state);
+ slapi_log_err(SLAPI_LOG_TRACE, "ldbm_instance_start",
+ "Warning - backend is in a wrong state - %d\n",
+ be->be_state);
PR_Unlock (be->be_state_lock);
return 0;
}
@@ -370,8 +370,9 @@ ldbm_instance_destructor(void **arg)
{
ldbm_instance *inst = (ldbm_instance *) *arg;
- slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_destructor", "Destructor for instance %s called\n",
- inst->inst_name);
+ slapi_log_err(SLAPI_LOG_TRACE, "ldbm_instance_destructor",
+ "Destructor for instance %s called\n",
+ inst->inst_name);
slapi_counter_destroy(&(inst->inst_ref_count));
slapi_ch_free_string(&inst->inst_name);
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
index f6da414..f35b3f1 100644
--- a/ldap/servers/slapd/ssl.c
+++ b/ldap/servers/slapd/ssl.c
@@ -25,6 +25,7 @@
#define NEED_TOK_PBE /* defines tokPBE and ptokPBE - see slap.h */
#include "slap.h"
+#include <unistd.h>
#include "svrcore.h"
#include "fe.h"
@@ -1288,27 +1289,39 @@ slapd_nss_init(int init_ssl, int config_available)
secmoddb_file_name = slapi_ch_smprintf("%s/secmod.db", certdir);
pkcs11txt_file_name = slapi_ch_smprintf("%s/pkcs11.txt", certdir);
- if(chmod(cert8db_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP )){
+ if(access(cert8db_file_name, F_OK) == 0 &&
+ chmod(cert8db_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP ))
+ {
slapi_log_err(SLAPI_LOG_WARNING, "Security Initialization", "slapd_nss_init - chmod failed for file %s error (%d) %s.\n",
cert8db_file_name, errno, slapd_system_strerror(errno));
}
- if(chmod(cert9db_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP )){
+ if(access(cert9db_file_name, F_OK) == 0 &&
+ chmod(cert9db_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP ))
+ {
slapi_log_err(SLAPI_LOG_WARNING, "Security Initialization", "slapd_nss_init - chmod failed for file %s error (%d) %s.\n",
cert9db_file_name, errno, slapd_system_strerror(errno));
}
- if(chmod(key3db_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP )){
+ if(access(key3db_file_name, F_OK) == 0 &&
+ chmod(key3db_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP ))
+ {
slapi_log_err(SLAPI_LOG_WARNING, "Security Initialization", "slapd_nss_init - chmod failed for file %s error (%d) %s.\n",
key3db_file_name, errno, slapd_system_strerror(errno));
}
- if(chmod(key4db_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP )){
+ if(access(key4db_file_name, F_OK) == 0 &&
+ chmod(key4db_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP ))
+ {
slapi_log_err(SLAPI_LOG_WARNING, "Security Initialization", "slapd_nss_init - chmod failed for file %s error (%d) %s.\n",
key4db_file_name, errno, slapd_system_strerror(errno));
}
- if(chmod(secmoddb_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP )){
+ if(access(secmoddb_file_name, F_OK) == 0 &&
+ chmod(secmoddb_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP ))
+ {
slapi_log_err(SLAPI_LOG_WARNING, "Security Initialization", "slapd_nss_init - chmod failed for file %s error (%d) %s.\n",
secmoddb_file_name, errno, slapd_system_strerror(errno));
}
- if(chmod(pkcs11txt_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP )){
+ if(access(pkcs11txt_file_name, F_OK) == 0 &&
+ chmod(pkcs11txt_file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP ))
+ {
slapi_log_err(SLAPI_LOG_WARNING, "Security Initialization", "slapd_nss_init - chmod failed for file %s error (%d) %s.\n",
pkcs11txt_file_name, errno, slapd_system_strerror(errno));
}
6 years, 10 months
Branch '389-ds-base-1.3.5' - 2 commits - ldap/servers
by Noriko Hosoi
ldap/servers/plugins/cos/cos_cache.c | 73 +++++++++++++++++++++++++------
ldap/servers/plugins/replication/csnpl.c | 4 -
2 files changed, 63 insertions(+), 14 deletions(-)
New commits:
commit 3ac12cb94a8873b0fa4ddb12f924cc58bd9c9872
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Tue Jan 10 14:32:53 2017 +0100
Ticket 49079: deadlock on cos cache rebuild
Bug Description:
To rebuild the cache cos_cache_creation the thread gets cos definitions from backend.
It means change_lock is held then cos_cache_creation will acquire some backend pages.
A deadlock can happen if cos_post_op is called while backend is locked.
For example if a bepreop (urp) does an internal update on a cos definition.
Then the thread holds backend pages, that will be needed by cos_cache_creation,
and will acquire change_lock for notification of the cos_cache thread
Fix Description:
Let cos cache rebuild thread run without holding change_lock.
The lock prevents parallel run but a flag can do the same.
https://fedorahosted.org/389/ticket/49079
Reviewed by: William Brown and Ludwig Krispenz (thanks to you both !!)
Platforms tested: F23
Flag Day: no
Doc impact: no
(cherry picked from commit ac44337bd97fe63071e7d83e9dcd788f2af1feab)
diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c
index 8a32630..87b4ba5 100644
--- a/ldap/servers/plugins/cos/cos_cache.c
+++ b/ldap/servers/plugins/cos/cos_cache.c
@@ -111,7 +111,9 @@ void * cos_get_plugin_identity();
/* the global plugin handle */
static volatile vattr_sp_handle *vattr_handle = NULL;
+/* both variables are protected by change_lock */
static int cos_cache_notify_flag = 0;
+static PRBool cos_cache_at_work = PR_FALSE;
/* service definition cache structs */
@@ -199,7 +201,8 @@ typedef struct _cos_cache cosCache;
static cosCache *pCache; /* always the current global cache, only use getref to get */
/* the place to start if you want a new cache */
-static int cos_cache_create();
+static int cos_cache_create_unlock(void);
+static int cos_cache_creation_lock(void);
/* cache index related functions */
static int cos_cache_index_all(cosCache *pCache);
@@ -386,7 +389,7 @@ static void cos_cache_wait_on_change(void *arg)
pCache = 0;
/* create initial cache */
- cos_cache_create();
+ cos_cache_creation_lock();
slapi_lock_mutex(start_lock);
started = 1;
@@ -419,7 +422,7 @@ static void cos_cache_wait_on_change(void *arg)
* before we go running off doing lots of stuff lets check if we should stop
*/
if(keeprunning) {
- cos_cache_create();
+ cos_cache_creation_lock();
}
cos_cache_notify_flag = 0; /* Dealt with it */
}/* while */
@@ -431,22 +434,25 @@ static void cos_cache_wait_on_change(void *arg)
LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_wait_on_change thread exit\n",0,0,0);
}
+
/*
- cos_cache_create
+ cos_cache_create_unlock
---------------------
Walks the definitions in the DIT and creates the cache.
Once created, it swaps the new cache for the old one,
releasing its refcount to the old cache and allowing it
to be destroyed.
+
+ called while change_lock is NOT held
*/
-static int cos_cache_create()
+static int cos_cache_create_unlock(void)
{
int ret = -1;
cosCache *pNewCache;
static int firstTime = 1;
int cache_built = 0;
- LDAPDebug( LDAP_DEBUG_TRACE, "--> cos_cache_create\n",0,0,0);
+ LDAPDebug( LDAP_DEBUG_TRACE, "--> cos_cache_create_unlock\n",0,0,0);
pNewCache = (cosCache*)slapi_ch_malloc(sizeof(cosCache));
if(pNewCache)
@@ -509,21 +515,21 @@ static int cos_cache_create()
{
/* we should not go on without proper schema checking */
cos_cache_release(pNewCache);
- LDAPDebug( LDAP_DEBUG_ANY, "cos_cache_create: failed to cache the schema\n",0,0,0);
+ LDAPDebug( LDAP_DEBUG_ANY, "cos_cache_create_unlock: failed to cache the schema\n",0,0,0);
}
}
else
{
/* currently we cannot go on without the indexes */
cos_cache_release(pNewCache);
- LDAPDebug( LDAP_DEBUG_ANY, "cos_cache_create: failed to index cache\n",0,0,0);
+ LDAPDebug( LDAP_DEBUG_ANY, "cos_cache_create_unlock: failed to index cache\n",0,0,0);
}
}
else
{
if(firstTime)
{
- LDAPDebug( LDAP_DEBUG_PLUGIN, "cos_cache_create: cos disabled\n",0,0,0);
+ LDAPDebug( LDAP_DEBUG_PLUGIN, "cos_cache_create_unlock: cos disabled\n",0,0,0);
firstTime = 0;
}
@@ -531,7 +537,7 @@ static int cos_cache_create()
}
}
else
- LDAPDebug( LDAP_DEBUG_ANY, "cos_cache_create: memory allocation failure\n",0,0,0);
+ LDAPDebug( LDAP_DEBUG_ANY, "cos_cache_create_unlock: memory allocation failure\n",0,0,0);
/* make sure we have a new cache */
@@ -563,10 +569,53 @@ static int cos_cache_create()
}
- LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_create\n",0,0,0);
+ LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_create_unlock\n",0,0,0);
return ret;
}
+/* cos_cache_creation_lock is called with change_lock being hold:
+ * slapi_lock_mutex(change_lock)
+ *
+ * To rebuild the cache cos_cache_creation gets cos definitions from backend, that
+ * means change_lock is held then cos_cache_creation will acquire some backend pages.
+ *
+ * A deadlock can happen if cos_post_op is called while backend is locked.
+ * For example if a bepreop (urp) does an internal update on a cos definition,
+ * the thread holds backend pages that will be needed by cos_cache_creation.
+ *
+ * A solution is to use a flag 'cos_cache_at_work' protected by change_lock,
+ * release change_lock, recreate the cos_cache, acquire change_lock reset the flag.
+ *
+ * returned value: result of cos_cache_create_unlock
+ *
+ */
+static int cos_cache_creation_lock(void)
+{
+ int ret = -1;
+ int max_tries = 10;
+
+ for (; max_tries != 0; max_tries--) {
+ /* if the cos_cache is already under work (cos_cache_create_unlock)
+ * wait 1 second
+ */
+ if (cos_cache_at_work) {
+ slapi_log_error(SLAPI_LOG_FATAL, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_creation_lock already rebuilding cos_cache... retry\n");
+ DS_Sleep (PR_MillisecondsToInterval(1000));
+ continue;
+ }
+ cos_cache_at_work = PR_TRUE;
+ slapi_unlock_mutex(change_lock);
+ ret = cos_cache_create_unlock();
+ slapi_lock_mutex(change_lock);
+ cos_cache_at_work = PR_FALSE;
+ break;
+ }
+ if (!max_tries) {
+ slapi_log_error(SLAPI_LOG_FATAL, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_creation_lock rebuilt was to long, skip this rebuild\n");
+ }
+
+ return ret;
+}
/*
cos_cache_build_definition_list
@@ -1639,7 +1688,7 @@ int cos_cache_getref(cos_cache **pptheCache)
slapi_lock_mutex(change_lock);
if(pCache == NULL)
{
- if(cos_cache_create())
+ if(cos_cache_creation_lock())
{
/* there was a problem or no COS definitions were found */
LDAPDebug( LDAP_DEBUG_PLUGIN, "cos_cache_getref: no cos cache created\n",0,0,0);
commit 3fa6596bdc677cdb3fb65b7baf6fd567485c91a7
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Wed Jan 25 13:39:08 2017 -0800
Ticket 49008 backport 1.3.5 : aborted operation can leave RUV in incorrect state
Description: Fixed 2 backport errors in commit 79a3deafe943a3ce5c31c50272939146d17bd7ac.
diff --git a/ldap/servers/plugins/replication/csnpl.c b/ldap/servers/plugins/replication/csnpl.c
index db1ae13..a696fc1 100644
--- a/ldap/servers/plugins/replication/csnpl.c
+++ b/ldap/servers/plugins/replication/csnpl.c
@@ -220,14 +220,14 @@ int csnplCommitAll (CSNPL *csnpl, const CSN *csn)
char csn_str[CSN_STRSIZE];
csn_as_string(csn, PR_FALSE, csn_str);
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
"csnplCommitALL: committing all csns for csn %s\n", csn_str);
slapi_rwlock_wrlock (csnpl->csnLock);
data = (csnpldata *)llistGetFirst(csnpl->csnList, &iterator);
while (NULL != data)
{
csn_as_string(data->csn, PR_FALSE, csn_str);
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
"csnplCommitALL: processing data csn %s\n", csn_str);
if (csn_is_equal(data->csn, csn) ||
csn_is_equal(data->prim_csn, csn)) {
6 years, 10 months
Branch '389-ds-base-1.3.5' - ldap/servers
by Ludwig Krispenz
ldap/servers/plugins/replication/csnpl.c | 75 ++++++++++++++++++++---
ldap/servers/plugins/replication/csnpl.h | 5 +
ldap/servers/plugins/replication/repl5.h | 2
ldap/servers/plugins/replication/repl5_init.c | 22 ++++++
ldap/servers/plugins/replication/repl5_plugins.c | 40 +++++++-----
ldap/servers/plugins/replication/repl5_replica.c | 6 -
ldap/servers/plugins/replication/repl5_ruv.c | 74 ++++++++++++++++------
ldap/servers/plugins/replication/repl5_ruv.h | 4 -
ldap/servers/slapd/csn.c | 15 ++++
ldap/servers/slapd/slapi-private.h | 2
10 files changed, 195 insertions(+), 50 deletions(-)
New commits:
commit 79a3deafe943a3ce5c31c50272939146d17bd7ac
Author: Ludwig Krispenz <lkrispen(a)redhat.com>
Date: Tue Jan 24 15:07:19 2017 +0100
Ticket 49008 backport 1.3.5 : aborted operation can leave RUV in incorrect state
Bug description:
If a plugin operation succeeded, but the operation itself fails and is aborted the RUV is in an incorrect state (rolled up to the succesful plugin op)
Fix Decription:
Introduce a "primary_csn", this is the csn of the main operation, either a client operation or a replicated operation.
csns generated by internal operations, eg by plugins are secondary csn.
Maintain the primary csn in thread local data, like it is used for the agreement name (or txn stack): prim_csn.
Extend the data structure of the pending list to keep prim_csn for each inserted csn
If a csn is created or received check prim_csn: if it exists use it, if it doesn't exist set it
when inserting a csn to the pending list pass the prim_csn
when cancelling a csn, if it is the prim_csn also cancell all secondary csns
when committing a csn,
if it is not the primary csn, do nothing
if it is the prim_csn trigger the pending list rollup, stop at the first not committed csn
if the RID of the prim_csn is not the local RID also rollup the pending list for the local RID.
Reviewed by: Thierry, Thanks
diff --git a/ldap/servers/plugins/replication/csnpl.c b/ldap/servers/plugins/replication/csnpl.c
index acd38d0..db1ae13 100644
--- a/ldap/servers/plugins/replication/csnpl.c
+++ b/ldap/servers/plugins/replication/csnpl.c
@@ -24,8 +24,9 @@ struct csnpl
typedef struct _csnpldata
{
- PRBool committed; /* True if CSN committed */
- CSN *csn; /* The actual CSN */
+ PRBool committed; /* True if CSN committed */
+ CSN *csn; /* The actual CSN */
+ const CSN *prim_csn; /* The primary CSN of an operation consising of multiple sub ops*/
} csnpldata;
/* forward declarations */
@@ -103,7 +104,7 @@ void csnplFree (CSNPL **csnpl)
* 1 if the csn has already been seen
* -1 for any other kind of errors
*/
-int csnplInsert (CSNPL *csnpl, const CSN *csn)
+int csnplInsert (CSNPL *csnpl, const CSN *csn, const CSN *prim_csn)
{
int rc;
csnpldata *csnplnode;
@@ -131,6 +132,7 @@ int csnplInsert (CSNPL *csnpl, const CSN *csn)
csnplnode = (csnpldata *)slapi_ch_malloc(sizeof(csnpldata));
csnplnode->committed = PR_FALSE;
csnplnode->csn = csn_dup(csn);
+ csnplnode->prim_csn = prim_csn;
csn_as_string(csn, PR_FALSE, csn_str);
rc = llistInsertTail (csnpl->csnList, csn_str, csnplnode);
@@ -186,6 +188,57 @@ int csnplRemove (CSNPL *csnpl, const CSN *csn)
return 0;
}
+int csnplRemoveAll (CSNPL *csnpl, const CSN *csn)
+{
+ csnpldata *data;
+ void *iterator;
+
+ slapi_rwlock_wrlock (csnpl->csnLock);
+ data = (csnpldata *)llistGetFirst(csnpl->csnList, &iterator);
+ while (NULL != data)
+ {
+ if (csn_is_equal(data->csn, csn) ||
+ csn_is_equal(data->prim_csn, csn)) {
+ csnpldata_free(&data);
+ data = (csnpldata *)llistRemoveCurrentAndGetNext(csnpl->csnList, &iterator);
+ } else {
+ data = (csnpldata *)llistGetNext (csnpl->csnList, &iterator);
+ }
+ }
+#ifdef DEBUG
+ _csnplDumpContentNoLock(csnpl, "csnplRemoveAll");
+#endif
+ slapi_rwlock_unlock (csnpl->csnLock);
+ return 0;
+}
+
+
+int csnplCommitAll (CSNPL *csnpl, const CSN *csn)
+{
+ csnpldata *data;
+ void *iterator;
+ char csn_str[CSN_STRSIZE];
+
+ csn_as_string(csn, PR_FALSE, csn_str);
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
+ "csnplCommitALL: committing all csns for csn %s\n", csn_str);
+ slapi_rwlock_wrlock (csnpl->csnLock);
+ data = (csnpldata *)llistGetFirst(csnpl->csnList, &iterator);
+ while (NULL != data)
+ {
+ csn_as_string(data->csn, PR_FALSE, csn_str);
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
+ "csnplCommitALL: processing data csn %s\n", csn_str);
+ if (csn_is_equal(data->csn, csn) ||
+ csn_is_equal(data->prim_csn, csn)) {
+ data->committed = PR_TRUE;
+ }
+ data = (csnpldata *)llistGetNext (csnpl->csnList, &iterator);
+ }
+ slapi_rwlock_unlock (csnpl->csnLock);
+ return 0;
+}
+
int csnplCommit (CSNPL *csnpl, const CSN *csn)
{
csnpldata *data;
@@ -276,13 +329,12 @@ csnplRollUp(CSNPL *csnpl, CSN **first_commited)
*first_commited = NULL;
}
data = (csnpldata *)llistGetFirst(csnpl->csnList, &iterator);
- while (NULL != data)
+ while (NULL != data && data->committed)
{
if (NULL != largest_committed_csn && freeit)
{
csn_free(&largest_committed_csn);
}
- if (data->committed) {
freeit = PR_TRUE;
largest_committed_csn = data->csn; /* Save it */
if (first_commited && (*first_commited == NULL)) {
@@ -294,9 +346,6 @@ csnplRollUp(CSNPL *csnpl, CSN **first_commited)
data->csn = NULL;
csnpldata_free(&data);
data = (csnpldata *)llistRemoveCurrentAndGetNext(csnpl->csnList, &iterator);
- } else {
- data = (csnpldata *)llistGetNext (csnpl->csnList, &iterator);
- }
}
#ifdef DEBUG
@@ -326,6 +375,7 @@ static void _csnplDumpContentNoLock(CSNPL *csnpl, const char *caller)
csnpldata *data;
void *iterator;
char csn_str[CSN_STRSIZE];
+ char primcsn_str[CSN_STRSIZE];
data = (csnpldata *)llistGetFirst(csnpl->csnList, &iterator);
if (data) {
@@ -334,11 +384,18 @@ static void _csnplDumpContentNoLock(CSNPL *csnpl, const char *caller)
}
while (data)
{
- slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "%s, %s\n",
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "%s,(prim %s), %s\n",
csn_as_string(data->csn, PR_FALSE, csn_str),
+ data->prim_csn ? csn_as_string(data->prim_csn, PR_FALSE, primcsn_str) : " ",
data->committed ? "committed" : "not committed");
data = (csnpldata *)llistGetNext (csnpl->csnList, &iterator);
}
}
#endif
+/* wrapper around csn_free, to satisfy NSPR thread context API */
+void
+csnplFreeCSN (void *arg)
+{
+ csn_free((CSN **)&arg);
+}
diff --git a/ldap/servers/plugins/replication/csnpl.h b/ldap/servers/plugins/replication/csnpl.h
index 32e3ff7..f5c28f5 100644
--- a/ldap/servers/plugins/replication/csnpl.h
+++ b/ldap/servers/plugins/replication/csnpl.h
@@ -22,10 +22,13 @@ typedef struct csnpl CSNPL;
CSNPL* csnplNew ();
void csnplFree (CSNPL **csnpl);
-int csnplInsert (CSNPL *csnpl, const CSN *csn);
+int csnplInsert (CSNPL *csnpl, const CSN *csn, const CSN *prim_csn);
int csnplRemove (CSNPL *csnpl, const CSN *csn);
+int csnplRemoveAll (CSNPL *csnpl, const CSN *csn);
+int csnplCommitAll (CSNPL *csnpl, const CSN *csn);
CSN* csnplGetMinCSN (CSNPL *csnpl, PRBool *committed);
int csnplCommit (CSNPL *csnpl, const CSN *csn);
CSN *csnplRollUp(CSNPL *csnpl, CSN ** first);
void csnplDumpContent(CSNPL *csnpl, const char *caller);
+
#endif
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 4ab2355..27ad416 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -232,6 +232,8 @@ int multimaster_be_betxnpostop_modify (Slapi_PBlock *pb);
extern int repl5_is_betxn;
char* get_thread_private_agmtname ();
void set_thread_private_agmtname (const char *agmtname);
+void set_thread_primary_csn (const CSN *prim_csn);
+CSN* get_thread_primary_csn(void);
void* get_thread_private_cache ();
void set_thread_private_cache (void *buf);
char* get_repl_session_id (Slapi_PBlock *pb, char *id, CSN **opcsn);
diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c
index 0304ed5..1570655 100644
--- a/ldap/servers/plugins/replication/repl5_init.c
+++ b/ldap/servers/plugins/replication/repl5_init.c
@@ -136,6 +136,7 @@ static int multimaster_started_flag = 0;
/* Thread private data and interface */
static PRUintn thread_private_agmtname; /* thread private index for logging*/
static PRUintn thread_private_cache;
+static PRUintn thread_primary_csn;
char*
get_thread_private_agmtname()
@@ -153,6 +154,26 @@ set_thread_private_agmtname(const char *agmtname)
PR_SetThreadPrivate(thread_private_agmtname, (void *)agmtname);
}
+CSN*
+get_thread_primary_csn(void)
+{
+ CSN *prim_csn = NULL;
+ if (thread_primary_csn)
+ prim_csn = (CSN *)PR_GetThreadPrivate(thread_primary_csn);
+ return prim_csn;
+}
+void
+set_thread_primary_csn(const CSN *prim_csn)
+{
+ if (thread_primary_csn) {
+ if (prim_csn) {
+ PR_SetThreadPrivate(thread_primary_csn, (void *)csn_dup(prim_csn));
+ } else {
+ PR_SetThreadPrivate(thread_primary_csn, NULL);
+ }
+ }
+}
+
void*
get_thread_private_cache ()
{
@@ -721,6 +742,7 @@ multimaster_start( Slapi_PBlock *pb )
/* Initialize thread private data for logging. Ignore if fails */
PR_NewThreadPrivateIndex (&thread_private_agmtname, NULL);
PR_NewThreadPrivateIndex (&thread_private_cache, NULL);
+ PR_NewThreadPrivateIndex (&thread_primary_csn, csnplFreeCSN);
/* Decode the command line args to see if we're dumping to LDIF */
is_ldif_dump = check_for_ldif_dump(pb);
diff --git a/ldap/servers/plugins/replication/repl5_plugins.c b/ldap/servers/plugins/replication/repl5_plugins.c
index b331c81..84624e9 100644
--- a/ldap/servers/plugins/replication/repl5_plugins.c
+++ b/ldap/servers/plugins/replication/repl5_plugins.c
@@ -1033,9 +1033,11 @@ static int
write_changelog_and_ruv (Slapi_PBlock *pb)
{
Slapi_Operation *op = NULL;
+ CSN *opcsn;
+ CSN *prim_csn;
int rc;
slapi_operation_parameters *op_params = NULL;
- Object *repl_obj;
+ Object *repl_obj = NULL;
int return_value = SLAPI_PLUGIN_SUCCESS;
Replica *r;
Slapi_Backend *be;
@@ -1063,17 +1065,17 @@ write_changelog_and_ruv (Slapi_PBlock *pb)
{
return return_value;
}
+ /* we only log changes for operations applied to a replica */
+ repl_obj = replica_get_replica_for_op (pb);
+ if (repl_obj == NULL)
+ return return_value;
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc);
if (rc) { /* op failed - just return */
- return return_value;
+ cancel_opcsn(pb);
+ goto common_return;
}
- /* we only log changes for operations applied to a replica */
- repl_obj = replica_get_replica_for_op (pb);
- if (repl_obj == NULL)
- return return_value;
-
r = (Replica*)object_get_data (repl_obj);
PR_ASSERT (r);
@@ -1108,7 +1110,7 @@ write_changelog_and_ruv (Slapi_PBlock *pb)
slapi_pblock_get (pb, SLAPI_OPERATION_PARAMETERS, &op_params);
if (NULL == op_params) {
- return return_value;
+ goto common_return;
}
/* need to set uniqueid operation parameter */
@@ -1127,19 +1129,18 @@ write_changelog_and_ruv (Slapi_PBlock *pb)
slapi_pblock_get (pb, SLAPI_ENTRY_PRE_OP, &e);
}
if (NULL == e) {
- return return_value;
+ goto common_return;
}
uniqueid = slapi_entry_get_uniqueid (e);
if (NULL == uniqueid) {
- return return_value;
+ goto common_return;
}
op_params->target_address.uniqueid = slapi_ch_strdup (uniqueid);
}
if( op_params->csn && is_cleaned_rid(csn_get_replicaid(op_params->csn))){
/* this RID has been cleaned */
- object_release (repl_obj);
- return return_value;
+ goto common_return;
}
/* we might have stripped all the mods - in that case we do not
@@ -1152,7 +1153,7 @@ write_changelog_and_ruv (Slapi_PBlock *pb)
{
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
"write_changelog_and_ruv: Skipped due to DISKFULL\n");
- return return_value;
+ goto common_return;
}
slapi_pblock_get(pb, SLAPI_TXN, &txn);
rc = cl5WriteOperationTxn(repl_name, repl_gen, op_params,
@@ -1188,7 +1189,6 @@ write_changelog_and_ruv (Slapi_PBlock *pb)
*/
if (0 == return_value) {
char csn_str[CSN_STRSIZE] = {'\0'};
- CSN *opcsn;
int rc;
const char *dn = op_params ? REPL_GET_DN(&op_params->target_address) : "unknown";
Slapi_DN *sdn = op_params ? (&op_params->target_address)->sdn : NULL;
@@ -1220,7 +1220,15 @@ write_changelog_and_ruv (Slapi_PBlock *pb)
}
}
- object_release (repl_obj);
+common_return:
+ opcsn = operation_get_csn(op);
+ prim_csn = get_thread_primary_csn();
+ if (csn_is_equal(opcsn, prim_csn)) {
+ set_thread_primary_csn(NULL);
+ }
+ if (repl_obj) {
+ object_release (repl_obj);
+ }
return return_value;
}
@@ -1417,7 +1425,7 @@ cancel_opcsn (Slapi_PBlock *pb)
ruv_obj = replica_get_ruv (r);
PR_ASSERT (ruv_obj);
- ruv_cancel_csn_inprogress ((RUV*)object_get_data (ruv_obj), opcsn);
+ ruv_cancel_csn_inprogress ((RUV*)object_get_data (ruv_obj), opcsn, replica_get_rid(r));
object_release (ruv_obj);
}
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 7360d97..602653a 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -903,7 +903,7 @@ replica_update_ruv(Replica *r, const CSN *updated_csn, const char *replica_purl)
}
}
/* Update max csn for local and remote replicas */
- rc = ruv_update_ruv (ruv, updated_csn, replica_purl, rid == r->repl_rid);
+ rc = ruv_update_ruv (ruv, updated_csn, replica_purl, r->repl_rid);
if (RUV_COVERS_CSN == rc)
{
slapi_log_error(SLAPI_LOG_REPL,
@@ -3626,7 +3626,7 @@ assign_csn_callback(const CSN *csn, void *data)
if (NULL != r->min_csn_pl)
{
- if (csnplInsert(r->min_csn_pl, csn) != 0)
+ if (csnplInsert(r->min_csn_pl, csn, NULL) != 0)
{
char csn_str[CSN_STRSIZE]; /* For logging only */
/* Ack, we can't keep track of min csn. Punt. */
@@ -3674,7 +3674,7 @@ abort_csn_callback(const CSN *csn, void *data)
}
}
- ruv_cancel_csn_inprogress (ruv, csn);
+ ruv_cancel_csn_inprogress (ruv, csn, replica_get_rid(r));
replica_unlock(r->repl_lock);
object_release (ruv_obj);
diff --git a/ldap/servers/plugins/replication/repl5_ruv.c b/ldap/servers/plugins/replication/repl5_ruv.c
index 5d6e1c3..c2d3bb4 100644
--- a/ldap/servers/plugins/replication/repl5_ruv.c
+++ b/ldap/servers/plugins/replication/repl5_ruv.c
@@ -77,6 +77,7 @@ static char *get_replgen_from_berval(const struct berval *bval);
static const char * const prefix_replicageneration = "{replicageneration}";
static const char * const prefix_ruvcsn = "{replica "; /* intentionally missing '}' */
+static int ruv_update_ruv_element (RUV *ruv, RUVElement *replica, const CSN *csn, const char *replica_purl, PRBool isLocal);
/* API implementation */
@@ -1602,6 +1603,7 @@ int ruv_add_csn_inprogress (RUV *ruv, const CSN *csn)
char csn_str[CSN_STRSIZE];
int rc = RUV_SUCCESS;
int rid = csn_get_replicaid (csn);
+ CSN *prim_csn;
PR_ASSERT (ruv && csn);
@@ -1639,8 +1641,12 @@ int ruv_add_csn_inprogress (RUV *ruv, const CSN *csn)
rc = RUV_COVERS_CSN;
goto done;
}
-
- rc = csnplInsert (replica->csnpl, csn);
+ prim_csn = get_thread_primary_csn();
+ if (prim_csn == NULL) {
+ set_thread_primary_csn(csn);
+ prim_csn = get_thread_primary_csn();
+ }
+ rc = csnplInsert (replica->csnpl, csn, prim_csn);
if (rc == 1) /* we already seen this csn */
{
if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
@@ -1648,6 +1654,7 @@ int ruv_add_csn_inprogress (RUV *ruv, const CSN *csn)
"the csn %s has already be seen - ignoring\n",
csn_as_string (csn, PR_FALSE, csn_str));
}
+ set_thread_primary_csn(NULL);
rc = RUV_COVERS_CSN;
}
else if(rc != 0)
@@ -1672,24 +1679,36 @@ done:
return rc;
}
-int ruv_cancel_csn_inprogress (RUV *ruv, const CSN *csn)
+int ruv_cancel_csn_inprogress (RUV *ruv, const CSN *csn, ReplicaId local_rid)
{
RUVElement* replica;
int rc = RUV_SUCCESS;
+ CSN *prim_csn = NULL;
+
PR_ASSERT (ruv && csn);
+ prim_csn = get_thread_primary_csn();
/* locate ruvElement */
slapi_rwlock_wrlock (ruv->lock);
replica = ruvGetReplica (ruv, csn_get_replicaid (csn));
- if (replica == NULL)
- {
+ if (replica == NULL) {
/* ONREPL - log error */
- rc = RUV_NOTFOUND;
- goto done;
- }
-
- rc = csnplRemove (replica->csnpl, csn);
+ rc = RUV_NOTFOUND;
+ goto done;
+ }
+ if (csn_is_equal(csn, prim_csn)) {
+ /* the prim csn is cancelled, lets remove all dependent csns */
+ ReplicaId prim_rid = csn_get_replicaid (csn);
+ replica = ruvGetReplica (ruv, prim_rid);
+ rc = csnplRemoveAll (replica->csnpl, prim_csn);
+ if (prim_rid != local_rid) {
+ replica = ruvGetReplica (ruv, local_rid);
+ rc = csnplRemoveAll (replica->csnpl, prim_csn);
+ }
+ } else {
+ rc = csnplRemove (replica->csnpl, csn);
+ }
if (rc != 0)
rc = RUV_NOTFOUND;
else
@@ -1700,19 +1719,37 @@ done:
return rc;
}
-int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, PRBool isLocal)
+int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, ReplicaId local_rid)
+{
+ int rc=RUV_SUCCESS;
+ RUVElement *replica;
+ ReplicaId prim_rid;
+
+ CSN *prim_csn = get_thread_primary_csn();
+
+ if (! csn_is_equal(csn, prim_csn)) {
+ /* not a primary csn, nothing to do */
+ return rc;
+ }
+ slapi_rwlock_wrlock (ruv->lock);
+ prim_rid = csn_get_replicaid (csn);
+ replica = ruvGetReplica (ruv, local_rid);
+ rc = ruv_update_ruv_element(ruv, replica, csn, replica_purl, PR_TRUE);
+ if ( rc || local_rid == prim_rid) goto done;
+ replica = ruvGetReplica (ruv, prim_rid);
+ rc = ruv_update_ruv_element(ruv, replica, csn, replica_purl, PR_FALSE);
+done:
+ slapi_rwlock_unlock (ruv->lock);
+ return rc;
+}
+static int
+ruv_update_ruv_element (RUV *ruv, RUVElement *replica, const CSN *csn, const char *replica_purl, PRBool isLocal)
{
int rc=RUV_SUCCESS;
char csn_str[CSN_STRSIZE];
CSN *max_csn;
CSN *first_csn = NULL;
- RUVElement *replica;
- PR_ASSERT (ruv && csn);
-
- slapi_rwlock_wrlock (ruv->lock);
-
- replica = ruvGetReplica (ruv, csn_get_replicaid (csn));
if (replica == NULL)
{
/* we should have a ruv element at this point because it would have
@@ -1722,7 +1759,7 @@ int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, PRBool i
goto done;
}
- if (csnplCommit(replica->csnpl, csn) != 0)
+ if (csnplCommitAll(replica->csnpl, csn) != 0)
{
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "ruv_update_ruv: cannot commit csn %s\n",
csn_as_string(csn, PR_FALSE, csn_str));
@@ -1763,7 +1800,6 @@ int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, PRBool i
}
done:
- slapi_rwlock_unlock (ruv->lock);
return rc;
}
diff --git a/ldap/servers/plugins/replication/repl5_ruv.h b/ldap/servers/plugins/replication/repl5_ruv.h
index e9eff5a..c8960fd 100644
--- a/ldap/servers/plugins/replication/repl5_ruv.h
+++ b/ldap/servers/plugins/replication/repl5_ruv.h
@@ -109,8 +109,8 @@ PRInt32 ruv_replica_count (const RUV *ruv);
char **ruv_get_referrals(const RUV *ruv);
void ruv_dump(const RUV *ruv, char *ruv_name, PRFileDesc *prFile);
int ruv_add_csn_inprogress (RUV *ruv, const CSN *csn);
-int ruv_cancel_csn_inprogress (RUV *ruv, const CSN *csn);
-int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, PRBool isLocal);
+int ruv_cancel_csn_inprogress (RUV *ruv, const CSN *csn, ReplicaId rid);
+int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, ReplicaId local_rid);
int ruv_move_local_supplier_to_first(RUV *ruv, ReplicaId rid);
int ruv_get_first_id_and_purl(RUV *ruv, ReplicaId *rid, char **replica_purl );
int ruv_local_contains_supplier(RUV *ruv, ReplicaId rid);
diff --git a/ldap/servers/slapd/csn.c b/ldap/servers/slapd/csn.c
index a3f4815..175f82a 100644
--- a/ldap/servers/slapd/csn.c
+++ b/ldap/servers/slapd/csn.c
@@ -268,6 +268,21 @@ csn_as_attr_option_string(CSNType t,const CSN *csn,char *ss)
return s;
}
+int
+csn_is_equal(const CSN *csn1, const CSN *csn2)
+{
+ int retval = 0;
+ if ((csn1 == NULL && csn2 == NULL) ||
+ (csn1 && csn2 &&
+ csn1->tstamp == csn2->tstamp &&
+ csn1->seqnum == csn2->seqnum &&
+ csn1->rid == csn2->rid &&
+ csn1->subseqnum == csn2->subseqnum)) {
+ retval = 1;
+ }
+ return retval;
+}
+
int
csn_compare_ext(const CSN *csn1, const CSN *csn2, unsigned int flags)
{
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 52d1c4a..e909e9c 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -166,6 +166,7 @@ time_t csn_get_time(const CSN *csn);
PRUint16 csn_get_seqnum(const CSN *csn);
PRUint16 csn_get_subseqnum(const CSN *csn);
char *csn_as_string(const CSN *csn, PRBool replicaIdOrder, char *ss); /* WARNING: ss must be CSN_STRSIZE bytes, or NULL. */
+int csn_is_equal(const CSN *csn1, const CSN *csn2);
int csn_compare(const CSN *csn1, const CSN *csn2);
int csn_compare_ext(const CSN *csn1, const CSN *csn2, unsigned int flags);
#define CSN_COMPARE_SKIP_SUBSEQ 0x1
@@ -181,6 +182,7 @@ const CSN *csn_max(const CSN *csn1,const CSN *csn2);
a csn from the set.*/
int csn_increment_subsequence (CSN *csn);
+void csnplFreeCSN (void *arg);
/*
* csnset.c
*/
6 years, 10 months