[rhq] modules/core modules/enterprise
by Jay Shaughnessy
modules/core/domain/src/main/java/org/rhq/core/domain/criteria/Criteria.java | 24 +++-
modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java | 60 ++++------
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java | 16 ++
3 files changed, 63 insertions(+), 37 deletions(-)
New commits:
commit 6b83d05c24d61097441e36ddf995428ebb3870df
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Fri Mar 1 17:59:52 2013 -0500
A fix to the Criteria class's sortId support, recently added to support
default ID sorting in CriteriaQuery.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/Criteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/Criteria.java
index 2ae6859..84c31e2 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/Criteria.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/Criteria.java
@@ -36,6 +36,7 @@ import org.rhq.core.domain.authz.Permission;
import org.rhq.core.domain.util.PageControl;
import org.rhq.core.domain.util.PageList;
import org.rhq.core.domain.util.PageOrdering;
+import org.rhq.enterprise.server.util.CriteriaQueryGenerator;
/**
* @author Joseph Marques
@@ -43,7 +44,28 @@ import org.rhq.core.domain.util.PageOrdering;
@XmlAccessorType(XmlAccessType.FIELD)
public abstract class Criteria implements Serializable, BaseCriteria {
public enum Type {
- FILTER, FETCH, SORT;
+ FILTER(), FETCH(), SORT(new String[] { "sortId" });
+
+ private List<String> globalFields;
+
+ /**
+ * Use this to get the global fields for this Criteria field type. Don't use inspection as the field names
+ * for this abstract base class do not conform (for legacy reasons) to the prefix convention help by the
+ * subclasses. This is likely only relevant to {@link CriteriaQueryGenerator}.
+ *
+ * @return The set of global fields for this Criteria field type. Meaning, usable by all subclasses.
+ */
+ public List<String> getGlobalFields() {
+ return globalFields;
+ }
+
+ private Type() {
+ this.globalFields = new ArrayList(0);
+ }
+
+ private Type(String[] globalFields) {
+ this.globalFields = Arrays.asList(globalFields);
+ }
}
/**
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java
index b1ca9fe..ca80dd7 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java
@@ -960,7 +960,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
assertNotNull(r);
assertEquals(b.getName(), r.getName());
}
-
+
@Test(enabled = TESTS_ENABLED)
public void testFindBundlesByCriteriaPaging() throws Exception {
Bundle b = null;
@@ -1009,14 +1009,13 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
assertEquals(1, bs.size());
assertEquals(b10, bs.get(0));
}
-
-
+
@Test(enabled = TESTS_ENABLED)
public void testFindAndDeleteBundlesByCriteriaQuery() throws Exception {
// verify that all bundle version objects are actually parsed.
Map<String, Bundle> bundleNames = new HashMap<String, Bundle>();
- final int bundleCount = 50;
-
+ final int bundleCount = 50;
+
Bundle b01 = createBundle("name01");
for (int i = 1; i < bundleCount; i++) {
createBundle("name" + String.format("%02d", i + 1));
@@ -1030,7 +1029,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
final List<Integer> pagesFlipped = new ArrayList<Integer>();
pagesFlipped.add(0);
-
+
// iterate over the results with CriteriaQuery
CriteriaQueryExecutor<Bundle, BundleCriteria> queryExecutor = new CriteriaQueryExecutor<Bundle, BundleCriteria>() {
@Override
@@ -1040,8 +1039,8 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
}
};
- CriteriaQuery<Bundle, BundleCriteria> bundles = new CriteriaQuery<Bundle, BundleCriteria>(
- criteria, queryExecutor);
+ CriteriaQuery<Bundle, BundleCriteria> bundles = new CriteriaQuery<Bundle, BundleCriteria>(criteria,
+ queryExecutor);
List<Integer> toRemove = new ArrayList<Integer>(bundleNames.size());
String prevName = null;
@@ -1053,7 +1052,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
toRemove.add(b.getId());
bundleNames.remove(String.valueOf(b.getName()));
}
-
+
// remove the bundles
for (int id : toRemove) {
bundleManager.deleteBundle(overlord, id);
@@ -1062,14 +1061,14 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
// check if the page was flipped the correct amount of times
assertTrue("While iterating the bundles, the findBundlesByCriteria should be called " + bundleCount / pageSize
+ " times" + pageSize, pagesFlipped.get(0) == bundleCount / pageSize);
-
+
// check if the last name is equal to "name01"
assertEquals("The name should be \"name01\"", b01.getName(), prevName);
// test that entire list parsed spanning multiple pages
- assertTrue("Expected bundleNames to be empty. Still " + bundleNames.size()
- + " bundle(s).", bundleNames.isEmpty());
-
+ assertTrue("Expected bundleNames to be empty. Still " + bundleNames.size() + " bundle(s).",
+ bundleNames.isEmpty());
+
// check if everything is deleted
PageList<Bundle> bvs = null;
criteria = new BundleCriteria();
@@ -1118,7 +1117,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
assertNotNull(bvOut.getBundleDeployments());
assertTrue(bvOut.getBundleDeployments().isEmpty());
}
-
+
@Test(enabled = TESTS_ENABLED)
public void testFindBundleVersionsByCriteriaPaging() throws Exception {
Bundle b1 = createBundle("one");
@@ -1148,16 +1147,12 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
assertFalse(bvs.get(0).equals(bvs.get(1)));
assertEquals(bv60, bvs.get(2));
}
-
- // This test is disabled, because it is failing. The sorting by id, defined on the Criteria class does not work
- // if the sortId field is not defined on the criteria sub-class. This is because of the fact that getPageControl()
- // defined on class CriteriaQueryGenerator calls CriteriaQueryGenerator.getFields() that ignores fields defined
- // on the Criteria class (takes into considerations fields from the sub-classes).
- @Test(enabled = DISABLED)
+
+ @Test(enabled = ENABLED)
public void testFindAndDeleteBundleVersionsByCriteriaQuery() throws Exception {
//verify that all bundle version objects are actually parsed.
Map<String, BundleVersion> bundleVersionVersions = new HashMap<String, BundleVersion>();
-
+
final int bundleVersionCount = 220;
Bundle bundle = createBundle("one");
for (int i = 0; i < bundleVersionCount; i++) {
@@ -1175,7 +1170,7 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
// the List is used because of the access from the anonymous class
final List<Integer> pagesFlipped = new ArrayList<Integer>();
pagesFlipped.add(0);
-
+
// iterate over the results with CriteriaQuery
CriteriaQueryExecutor<BundleVersion, BundleVersionCriteria> queryExecutor = new CriteriaQueryExecutor<BundleVersion, BundleVersionCriteria>() {
@Override
@@ -1199,24 +1194,24 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
toDelete.add(bv.getId());
bundleVersionVersions.remove(String.valueOf(bv.getVersion()));
}
-
+
// check if the page was flipped the correct amount of times (this formula works only for this particular case)
- assertTrue("While iterating the bundle versions, the findBundleVersionsByCriteria() should be called " + bundleVersionCount / pageSize
- + " times" + pageSize, pagesFlipped.get(0) == bundleVersionCount / pageSize);
-
+ assertTrue("While iterating the bundle versions, the findBundleVersionsByCriteria() should be called "
+ + bundleVersionCount / pageSize + " times" + pageSize, pagesFlipped.get(0) == bundleVersionCount / pageSize);
+
// delete all
for (int id : toDelete) {
bundleManager.deleteBundleVersion(overlord, id, true);
}
-
+
// check whether every record was processed when iterating over the bundleVersions
- assertTrue("Expected bundleVersions to be empty. Still " + bundleVersionVersions.size()
- + " version(s).", bundleVersionVersions.isEmpty());
+ assertTrue("Expected bundleVersions to be empty. Still " + bundleVersionVersions.size() + " version(s).",
+ bundleVersionVersions.isEmpty());
// test that entire list parsed spanning multiple pages
- assertTrue("Expected bundleVersions to be empty. Still " + bundleVersionVersions.size()
- + " version(s).", bundleVersionVersions.isEmpty());
-
+ assertTrue("Expected bundleVersions to be empty. Still " + bundleVersionVersions.size() + " version(s).",
+ bundleVersionVersions.isEmpty());
+
// check if everything is deleted
PageList<BundleVersion> bvs = null;
criteria = new BundleVersionCriteria();
@@ -1288,7 +1283,6 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
assertEquals(1, bundles.size());
}
-
// helper methods
private BundleType createBundleType(String name) throws Exception {
final String fullName = TEST_PREFIX + "-type-" + name;
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java
index 54fce8f..67b6316 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/util/CriteriaQueryGenerator.java
@@ -569,15 +569,25 @@ public final class CriteriaQueryGenerator {
List<Field> results = new ArrayList<Field>();
Class<?> currentLevelClass = criteria.getClass();
- while (currentLevelClass.equals(Criteria.class) == false) {
+ boolean isCriteriaClass = false;
+
+ do {
+ isCriteriaClass = currentLevelClass.equals(Criteria.class);
+
for (Field field : currentLevelClass.getDeclaredFields()) {
field.setAccessible(true);
- if (field.getName().startsWith(prefix)) {
+ if (isCriteriaClass) {
+ if (fieldType.getGlobalFields().contains(field.getName()))
+ results.add(field);
+
+ } else if (field.getName().startsWith(prefix)) {
results.add(field);
}
}
+
currentLevelClass = currentLevelClass.getSuperclass();
- }
+
+ } while (!isCriteriaClass);
return results;
}
11 years, 2 months
[rhq] Branch 'feature/cassandra-backend' - 2 commits - modules/enterprise
by snegrea
modules/enterprise/server/server-metrics/pom.xml | 21
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java | 248 ++++++++--
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java | 243 +++++++++
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java | 4
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java | 4
modules/enterprise/server/server-metrics/tools/create_test_data.py | 69 ++
6 files changed, 535 insertions(+), 54 deletions(-)
New commits:
commit 42295b1f952ee8f61071e09ecb0fc1cccd3fb7fe
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Fri Mar 1 16:20:58 2013 -0600
Add dependencies for data migration command line runner.
diff --git a/modules/enterprise/server/server-metrics/pom.xml b/modules/enterprise/server/server-metrics/pom.xml
index 0db7f68..b24f9d4 100644
--- a/modules/enterprise/server/server-metrics/pom.xml
+++ b/modules/enterprise/server/server-metrics/pom.xml
@@ -127,6 +127,27 @@
</dependency>
<dependency>
+ <groupId>commons-cli</groupId>
+ <artifactId>commons-cli</artifactId>
+ <version>1.2</version>
+ <scope>provided</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>postgresql</groupId>
+ <artifactId>postgresql</artifactId>
+ <!-- NOTE: version defined in root pom dependencyManagement section -->
+ </dependency>
+
+
+ <dependency>
+ <groupId>org.hibernate</groupId>
+ <artifactId>hibernate-entitymanager</artifactId>
+ <scope>provided</scope>
+ </dependency>
+
+
+ <dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-testng</artifactId>
<version>${powermock.version}</version>
commit 14ea631efbd17b94c6e5654eff8695ccad4f86a7
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Fri Mar 1 15:46:16 2013 -0600
Add a command line runner for the data migrator to assist with benchmarking and potential usage within the application. Various updates for the data migrator. Also, update the tool that generates random data to support sql configuration from command line options.
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
index ff46be8..cb72a07 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
@@ -20,17 +20,26 @@
package org.rhq.server.metrics;
+import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto;
+import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl;
+
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Date;
+import java.util.LinkedList;
import java.util.List;
+import java.util.Queue;
import javax.persistence.EntityManager;
import javax.persistence.Query;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
+import com.datastax.driver.core.Statement;
+import com.datastax.driver.core.querybuilder.QueryBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.measurement.MeasurementDataNumeric1D;
import org.rhq.core.domain.measurement.MeasurementDataNumeric1H;
@@ -45,14 +54,21 @@ import org.rhq.server.metrics.domain.MetricsTable;
*/
public class DataMigrator {
- private static final int MAX_RECORDS_TO_MIGRATE = 1000;
+ private final Log log = LogFactory.getLog(DataMigrator.class);
+
+ private static final int MAX_RECORDS_TO_LOAD_FROM_SQL = 30000;
+ private static final int MAX_RECORDS_TO_BATCH_TO_CASSANDRA = 500;
private static final int MAX_NUMBER_OF_FAILURES = 5;
private final EntityManager entityManager;
+
private final Session session;
+ private boolean telemetry;
+
private boolean deleteDataImmediatelyAfterMigration;
private boolean deleteAllDataAtEndOfMigration;
+
private boolean runRawDataMigration;
private boolean run1HAggregateDataMigration;
private boolean run6HAggregateDataMigration;
@@ -68,6 +84,8 @@ public class DataMigrator {
this.run1HAggregateDataMigration = true;
this.run6HAggregateDataMigration = true;
this.run1DAggregateDataMigration = true;
+
+ this.telemetry = false;
}
public void run1HAggregateDataMigration(boolean value) {
@@ -82,14 +100,28 @@ public class DataMigrator {
this.run1DAggregateDataMigration = value;
}
- public void deleteDataImmediatelyAfterMigration(boolean value) {
- this.deleteDataImmediatelyAfterMigration = value;
- this.deleteAllDataAtEndOfMigration = !value;
+
+ public void deleteDataImmediatelyAfterMigration() {
+ this.deleteDataImmediatelyAfterMigration = true;
+ this.deleteAllDataAtEndOfMigration = false;
}
- public void deleteAllDataAtEndOfMigration(boolean value) {
- this.deleteAllDataAtEndOfMigration = value;
- this.deleteDataImmediatelyAfterMigration = !value;
+ public void deleteAllDataAtEndOfMigration() {
+ this.deleteAllDataAtEndOfMigration = true;
+ this.deleteDataImmediatelyAfterMigration = false;
+ }
+
+ public void preserveData() {
+ this.deleteAllDataAtEndOfMigration = false;
+ this.deleteDataImmediatelyAfterMigration = false;
+ }
+
+ public void enableTelemetry() {
+ this.telemetry = true;
+ }
+
+ public void disableTelemetry() {
+ this.telemetry = false;
}
public void migrateData() throws Exception {
@@ -125,11 +157,15 @@ public class DataMigrator {
int numberOfFailures = 0;
Exception caughtException = null;
+ log.info(migrator.getClass());
+
while (numberOfFailures < MAX_NUMBER_OF_FAILURES) {
try {
migrator.work();
return;
} catch (Exception e) {
+ log.error("Migrator " + migrator.getClass() + " failed. Retrying!", e);
+
caughtException = e;
numberOfFailures++;
}
@@ -202,14 +238,20 @@ public class DataMigrator {
while (true) {
Query q = entityManager.createNamedQuery(query);
- q.setMaxResults(MAX_RECORDS_TO_MIGRATE);
+ q.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
existingData = (List<MeasurementDataNumericAggregateInterface>) q.getResultList();
if (existingData.size() == 0) {
break;
}
- insertDataToCassandra(existingData);
+ try {
+ insertDataToCassandra(existingData);
+ } catch (Exception e) {
+ log.error("Failed to insert " + metricsTable.toString()
+ + " data. Attempting to insert the current batch of data one more time");
+ insertDataToCassandra(existingData);
+ }
for (Object entity : existingData) {
entityManager.remove(entity);
@@ -220,33 +262,82 @@ public class DataMigrator {
@SuppressWarnings("unchecked")
private void performFullMigration() throws Exception {
- Query q = entityManager.createNamedQuery(query);
- List<MeasurementDataNumericAggregateInterface> existingData = (List<MeasurementDataNumericAggregateInterface>) q
- .getResultList();
+ List<MeasurementDataNumericAggregateInterface> existingData = null;
+ int lastMigratedRecord = 0;
+
+ while (true) {
+ Query q = entityManager.createNamedQuery(query);
+ q.setFirstResult(lastMigratedRecord + 1);
+ q.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
+
+ existingData = (List<MeasurementDataNumericAggregateInterface>) q.getResultList();
+
+ if (existingData.size() == 0) {
+ break;
+ }
- insertDataToCassandra(existingData);
+ lastMigratedRecord += existingData.size();
+
+ try{
+ insertDataToCassandra(existingData);
+ } catch (Exception e) {
+ log.error("Failed to insert " + metricsTable.toString()
+ + " data. Attempting to insert the current batch of data one more time");
+ insertDataToCassandra(existingData);
+ }
+ }
}
private void insertDataToCassandra(List<MeasurementDataNumericAggregateInterface> existingData)
throws Exception {
- String cql = "INSERT INTO " + metricsTable
- + " (schedule_id, time, type, value) VALUES (?, ?, ?, ?) USING TTL " + metricsTable.getTTL();
- PreparedStatement statement = session.prepare(cql);
+ Statement statement = null;
List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
+ List<Statement> statementsAccumulator = new ArrayList<Statement>();
- for (MeasurementDataNumericAggregateInterface measurement : existingData) {
- BoundStatement boundStatement = statement.bind(measurement.getScheduleId(),
- new Date(measurement.getTimestamp()), AggregateType.MIN.ordinal(), measurement.getMin());
- resultSetFutures.add(session.executeAsync(boundStatement));
+ //only need approximate TTL to speed up processing
+ //given that each batch is processed within seconds, getting the
+ //system time once per batch has minimal impact on the record retention
+ long currentTimeMillis = System.currentTimeMillis();
+ long expectedTTLMillis = metricsTable.getTTLinMilliseconds() * 10;
+ long itemTTLSeconds = 0;
- boundStatement = statement.bind(measurement.getScheduleId(), new Date(measurement.getTimestamp()),
- AggregateType.MAX.ordinal(), measurement.getMax());
- resultSetFutures.add(session.executeAsync(boundStatement));
+ for (MeasurementDataNumericAggregateInterface measurement : existingData) {
+ itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + measurement.getTimestamp()) / 1000l;
+
+ statement = QueryBuilder.insertInto(metricsTable.toString())
+ .value("schedule_id", measurement.getScheduleId())
+ .value("time", new Date(measurement.getTimestamp()))
+ .value("type", AggregateType.MIN.ordinal())
+ .value("value", measurement.getMin())
+ .using(ttl((int) itemTTLSeconds));;
+ statementsAccumulator.add(statement);
+
+ statement = insertInto(metricsTable.toString())
+ .value("schedule_id", measurement.getScheduleId())
+ .value("time", new Date(measurement.getTimestamp()))
+ .value("type", AggregateType.MAX.ordinal())
+ .value("value", measurement.getMax())
+ .using(ttl((int) itemTTLSeconds));
+ statementsAccumulator.add(statement);
+
+ statement = insertInto(metricsTable.toString()).value("schedule_id", measurement.getScheduleId())
+ .value("time", new Date(measurement.getTimestamp()))
+ .value("type", AggregateType.AVG.ordinal())
+ .value("value", Double.parseDouble(measurement.getValue().toString()))
+ .using(ttl((int) itemTTLSeconds));
+ statementsAccumulator.add(statement);
+
+ if (statementsAccumulator.size() == MAX_RECORDS_TO_BATCH_TO_CASSANDRA) {
+ resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[]) statementsAccumulator
+ .toArray(new Statement[statementsAccumulator.size()]))));
+ statementsAccumulator.clear();
+ }
+ }
- boundStatement = statement.bind(measurement.getScheduleId(), new Date(measurement.getTimestamp()),
- AggregateType.AVG.ordinal(), Double.parseDouble(measurement.getValue().toString()));
- resultSetFutures.add(session.executeAsync(boundStatement));
+ if (statementsAccumulator.size() != 0) {
+ resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[]) statementsAccumulator
+ .toArray(new Statement[statementsAccumulator.size()]))));
}
for (ResultSetFuture future : resultSetFutures) {
@@ -258,6 +349,8 @@ public class DataMigrator {
private class RawDataMigrator implements CallableMigrationWorker {
+ Queue<String> tablesNotProcessed = new LinkedList<String>(Arrays.asList(getRawDataTables()));
+
public void work() throws Exception {
if (deleteDataImmediatelyAfterMigration) {
performBatchedMigration();
@@ -270,20 +363,28 @@ public class DataMigrator {
private void performBatchedMigration() throws Exception {
List<Object[]> existingData = null;
- for (String table : getRawDataTables()) {
- String selectQuery = "SELECT schedule_id, value, time_stamp FROM " + table;
+ while (!tablesNotProcessed.isEmpty()) {
+ String table = tablesNotProcessed.peek();
+
+ String selectQuery = "SELECT schedule_id, time_stamp, value FROM " + table;
String deleteQuery = "DELETE FROM " + table + " WHERE schedule_id = ?";
while (true) {
Query query = entityManager.createNativeQuery(selectQuery);
- query.setMaxResults(MAX_RECORDS_TO_MIGRATE);
+ query.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
existingData = query.getResultList();
if (existingData.size() == 0) {
break;
}
- insertDataToCassandra(existingData);
+ try {
+ insertDataToCassandra(existingData);
+ } catch (Exception e) {
+ log.error("Failed to insert " + MetricsTable.RAW.toString()
+ + " data. Attempting to insert the current batch of data one more time");
+ insertDataToCassandra(existingData);
+ }
query = entityManager.createNativeQuery(deleteQuery);
@@ -292,6 +393,8 @@ public class DataMigrator {
query.executeUpdate();
}
}
+
+ tablesNotProcessed.poll();
}
}
@@ -299,26 +402,81 @@ public class DataMigrator {
private void performFullMigration() throws Exception {
List<Object[]> existingData = null;
- for (String table : getRawDataTables()) {
- String selectQuery = "SELECT schedule_id, value, time_stamp FROM " + table;
- Query query = entityManager.createNativeQuery(selectQuery);
- existingData = query.getResultList();
- insertDataToCassandra(existingData);
+ while (!tablesNotProcessed.isEmpty()) {
+ String table = tablesNotProcessed.peek();
+
+ log.info("Start migrating raw table: " + table);
+
+ int lastMigratedRecord = 0;
+
+ while (true) {
+ String selectQuery = "SELECT schedule_id, time_stamp, value FROM " + table;
+ Query query = entityManager.createNativeQuery(selectQuery);
+ query.setFirstResult(lastMigratedRecord + 1);
+ query.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
+
+ existingData = query.getResultList();
+
+ if (existingData.size() == 0) {
+ break;
+ }
+
+ lastMigratedRecord += existingData.size();
+
+ try {
+ insertDataToCassandra(existingData);
+ } catch (Exception e) {
+ log.error("Failed to insert " + MetricsTable.RAW.toString()
+ + " data. Attempting to insert the current batch of data one more time");
+ insertDataToCassandra(existingData);
+ }
+
+ if (lastMigratedRecord % MAX_RECORDS_TO_LOAD_FROM_SQL == 0) {
+ log.info("------------" + lastMigratedRecord + "---------------------");
+ }
+ }
+
+ log.info("Done migrating raw table" + table + "---------------------");
+ tablesNotProcessed.poll();
}
}
private void insertDataToCassandra(List<Object[]> existingData) throws Exception {
- String cql = "INSERT INTO " + MetricsTable.RAW + " (schedule_id, time, value) VALUES (?, ?, ?) USING TTL "
- + MetricsTable.RAW.getTTL();
- PreparedStatement statement = session.prepare(cql);
-
List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
+ List<Statement> statementsAccumulator = new ArrayList<Statement>();
+
+ //only need approximate TTL to speed up processing
+ //given that each batch is processed within seconds, getting the
+ //system time once per batch has minimal impact on the record retention
+ long currentTimeMillis = System.currentTimeMillis();
+ long expectedTTLMillis = MetricsTable.RAW.getTTLinMilliseconds() * 10;
+ long creationTimeMillis = 0;
+ long itemTTLSeconds = 0;
for (Object[] rawDataPoint : existingData) {
- BoundStatement boundStatement = statement.bind(Integer.parseInt(rawDataPoint[0].toString()),
- new Date(Long.parseLong(rawDataPoint[1].toString())),
- Double.parseDouble(rawDataPoint[2].toString()));
- resultSetFutures.add(session.executeAsync(boundStatement));
+ creationTimeMillis = Long.parseLong(rawDataPoint[1].toString());
+ itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
+
+ if (itemTTLSeconds > 0) {
+ Statement boundStatement = QueryBuilder.insertInto(MetricsTable.RAW.toString())
+ .value("schedule_id", Integer.parseInt(rawDataPoint[0].toString()))
+ .value("time", new Date(creationTimeMillis))
+ .value("value", Double.parseDouble(rawDataPoint[2].toString()))
+ .using(ttl((int) itemTTLSeconds));
+
+ statementsAccumulator.add(boundStatement);
+ }
+
+ if (statementsAccumulator.size() == MAX_RECORDS_TO_BATCH_TO_CASSANDRA) {
+ resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[]) statementsAccumulator
+ .toArray(new Statement[statementsAccumulator.size()]))));
+ statementsAccumulator.clear();
+ }
+ }
+
+ if (statementsAccumulator.size() != 0) {
+ resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[]) statementsAccumulator
+ .toArray(new Statement[statementsAccumulator.size()]))));
}
for (ResultSetFuture future : resultSetFutures) {
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
new file mode 100644
index 0000000..37e941b
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
@@ -0,0 +1,243 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2011, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics;
+
+import java.util.Properties;
+
+import javax.persistence.EntityManager;
+import javax.persistence.EntityManagerFactory;
+
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.ProtocolOptions.Compression;
+import com.datastax.driver.core.Session;
+import com.datastax.driver.core.SimpleAuthInfoProvider;
+import com.datastax.driver.core.exceptions.NoHostAvailableException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.ejb.Ejb3Configuration;
+
+import org.rhq.cassandra.CassandraNode;
+
+/**
+ * @author Stefan Negrea
+ *
+ * Only postgres is supported by the runner, however the data migrator itself can run
+ * with any database.
+ *
+ * Maven command to run this from the command line:
+ *
+ * mvn install -DskipTests exec:java -Dexec.mainClass="org.rhq.server.metrics.DataMigratorRunner"
+ *
+ *
+ */
+@SuppressWarnings({ "static-access", "deprecation" })
+public class DataMigratorRunner {
+
+ private final Log log = LogFactory.getLog(DataMigratorRunner.class);
+
+ //Cassandra
+ private String cassandraUser;
+ private Option cassandraUserOption = OptionBuilder.withLongOpt("cassandra-user").hasArg().create();
+
+ private String cassandraPassword;
+ private Option cassandraPasswordOption = OptionBuilder.withLongOpt("cassandra-password").hasArg().create();
+
+ private String[] cassandraHosts;
+ private Option cassandraHostsOption = OptionBuilder.withLongOpt("cassandra-hosts").hasArg().create();
+
+ private boolean cassandraCompression;
+ private Option cassandraCompressionOption = OptionBuilder.withLongOpt("cassandra-compression").create();
+
+ //SQL
+ private String sqlUser;
+ private Option sqlUserOption = OptionBuilder.withLongOpt("sql-user").hasArg().create();
+
+ private String sqlPassword;
+ private Option sqlPasswordOption = OptionBuilder.withLongOpt("sql-password").hasArg().create();
+
+ private String sqlHost;
+ private Option sqlHostOption = OptionBuilder.withLongOpt("sql-host").hasArg().create();
+
+ private String sqlPort;
+ private Option sqlPortOption = OptionBuilder.withLongOpt("sql-port").hasArg().create();
+
+ private String sqlDB;
+ private Option sqlDBOption = OptionBuilder.withLongOpt("sql-db").hasArg().create();
+
+ /**
+ * @param args
+ * @throws ParseException
+ */
+ public static void main(String[] args) throws Exception {
+
+ try{
+ DataMigratorRunner runner = new DataMigratorRunner();
+ runner.configure(args);
+ runner.run();
+ } catch (Exception e) {
+ System.out.println(e);
+ }
+
+ System.exit(0);
+ }
+
+ private void configure(String args[]) throws Exception {
+ Options options = new Options();
+ options.addOption(cassandraUserOption);
+ options.addOption(cassandraPasswordOption);
+ options.addOption(cassandraHostsOption);
+ options.addOption(cassandraCompressionOption);
+
+ options.addOption(sqlUserOption);
+ options.addOption(sqlPasswordOption);
+ options.addOption(sqlHostOption);
+ options.addOption(sqlPortOption);
+ options.addOption(sqlDBOption);
+
+ CommandLineParser parser = new PosixParser();
+ CommandLine commandLine = parser.parse(options, args);
+
+ parseCassandraOptionsWithDefault(commandLine);
+ parseSQLOptionsWithDefault(commandLine);
+ }
+
+ private void run() throws Exception {
+ log.info("Creating Entity Manager");
+ EntityManager entityManager = this.createEntityManager();
+ log.info("Done creating Entity Manager");
+
+ log.info("Creating Cassandra session");
+ Session session = this.createCassandraSession();
+ log.info("Done creating Cassandra session");
+
+ DataMigrator migrator = new DataMigrator(entityManager, session);
+
+ migrator.preserveData();
+ migrator.run1DAggregateDataMigration(false);
+ migrator.run6HAggregateDataMigration(false);
+ migrator.run1HAggregateDataMigration(false);
+
+ migrator.migrateData();
+ }
+
+ private Session createCassandraSession() throws Exception {
+ Compression selectedCompression = Compression.NONE;
+ if (cassandraCompression) {
+ selectedCompression = Compression.SNAPPY;
+ }
+
+ Cluster cluster = Cluster
+ .builder()
+ .addContactPoints(cassandraHosts)
+ .withCompression(selectedCompression)
+ .withoutMetrics()
+ .withAuthInfoProvider(
+ new SimpleAuthInfoProvider().add("username", cassandraUser).add("password", cassandraPassword)).build();
+
+ return cluster.connect("rhq");
+ }
+
+ private EntityManager createEntityManager() throws Exception {
+ Properties properties = new Properties();
+ properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
+ properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
+ properties.put("hibernate.driver_class", "org.postgresql.Driver");
+ properties.put("hibernate.connection.username", sqlUser);
+ properties.put("hibernate.connection.password", sqlPassword);
+ properties.put("hibernate.connection.url", "jdbc:postgresql://" + sqlHost + ":" + sqlPort + "/" + sqlDB);
+
+ Ejb3Configuration configuration = new Ejb3Configuration();
+ configuration.setProperties(properties);
+ EntityManagerFactory factory = configuration.buildEntityManagerFactory();
+ return factory.createEntityManager();
+ }
+
+ private void parseCassandraOptionsWithDefault(CommandLine commandLine) throws NoHostAvailableException {
+ if (commandLine.hasOption(cassandraUserOption.getLongOpt())) {
+ cassandraUser = commandLine.getOptionValue(cassandraUserOption.getLongOpt());
+ } else {
+ cassandraUser = "rhqadmin";
+ }
+
+ if (commandLine.hasOption(cassandraPasswordOption.getLongOpt())) {
+ cassandraPassword = commandLine.getOptionValue(cassandraPasswordOption.getLongOpt());
+ } else {
+ cassandraPassword = "rhqadmin";
+ }
+
+ if (commandLine.hasOption(cassandraHostsOption.getLongOpt())) {
+ String[] seeds = commandLine.getOptionValue(cassandraHostsOption.getLongOpt()).split(",");
+ cassandraHosts = new String[seeds.length];
+ for (int i = 0; i < seeds.length; ++i) {
+ CassandraNode node = CassandraNode.parseNode(seeds[i]);
+ cassandraHosts[i] = node.getHostName();
+ }
+ } else {
+ cassandraHosts = new String[] { "127.0.0.1", "127.0.0.2" };
+ }
+
+ if (commandLine.hasOption(cassandraCompressionOption.getLongOpt())) {
+ cassandraCompression = true;
+ } else {
+ cassandraCompression = false;
+ }
+ }
+
+ private void parseSQLOptionsWithDefault(CommandLine commandLine) throws NoHostAvailableException {
+ if (commandLine.hasOption(sqlUserOption.getLongOpt())) {
+ sqlUser = commandLine.getOptionValue(sqlUserOption.getLongOpt());
+ } else {
+ sqlUser = "rhqadmin";
+ }
+
+ if (commandLine.hasOption(sqlPasswordOption.getLongOpt())) {
+ sqlPassword = commandLine.getOptionValue(sqlPasswordOption.getLongOpt());
+ } else {
+ sqlPassword = "rhqadmin";
+ }
+
+ if (commandLine.hasOption(sqlHostOption.getLongOpt())) {
+ sqlHost = commandLine.getOptionValue(sqlHostOption.getLongOpt());
+ } else {
+ sqlHost = "localhost";
+ }
+
+ if (commandLine.hasOption(sqlPortOption.getLongOpt())) {
+ sqlPort = commandLine.getOptionValue(sqlPortOption.getLongOpt());
+ } else {
+ sqlPort = "5432";
+ }
+
+ if (commandLine.hasOption(sqlDBOption.getLongOpt())) {
+ sqlDB = commandLine.getOptionValue(sqlDBOption.getLongOpt());
+ } else {
+ sqlDB = "rhq_db";
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java
index 5373b75..c542225 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java
@@ -118,6 +118,7 @@ public class ListPagedResult<T> implements Iterable<T> {
@SuppressWarnings({ "unchecked", "rawtypes" })
private final List<?> localValuesToBind = new ArrayList(valuesToBind);
private ResultSet resultSet = retrieveNextResultSet(null, localValuesToBind);
+ private T lastRetrievedItem = null;
public boolean hasNext() {
resultSet = retrieveNextResultSet(resultSet, localValuesToBind);
@@ -125,7 +126,8 @@ public class ListPagedResult<T> implements Iterable<T> {
}
public T next() {
- return mapper.mapOne(resultSet);
+ lastRetrievedItem = mapper.mapOne(resultSet);
+ return lastRetrievedItem;
}
public void remove() {
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java
index de112d1..07bf927 100644
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java
@@ -53,6 +53,10 @@ public enum MetricsTable {
return this.ttl;
}
+ public long getTTLinMilliseconds() {
+ return this.ttl * 1000l;
+ }
+
@Override
public String toString() {
return this.tableName;
diff --git a/modules/enterprise/server/server-metrics/tools/create_test_data.py b/modules/enterprise/server/server-metrics/tools/create_test_data.py
index ba73ebd..4e60f04 100644
--- a/modules/enterprise/server/server-metrics/tools/create_test_data.py
+++ b/modules/enterprise/server/server-metrics/tools/create_test_data.py
@@ -33,6 +33,27 @@ import sys
import random
import time
import cStringIO
+import math
+from optparse import OptionParser
+
+
+def parse_arguments():
+ parser = OptionParser()
+
+ #db settings
+ parser.add_option("--host", default="localhost", action="store", type="string", dest="db_host")
+ parser.add_option("--database", default="rhq_db", action="store", type="string", dest="db_database")
+ parser.add_option("--user", default="rhqadmin", action="store", type="string", dest="db_user")
+ parser.add_option("--password", default="rhqadmin", action="store", type="string", dest="db_password")
+
+ #program settings
+ parser.add_option("--agents", default=10, action="store", type="int", dest="number_of_agents")
+
+
+ (options, args) = parser.parse_args()
+ print options
+
+ return options
class MetricType:
Raw,Aggregate = range(2)
@@ -86,11 +107,14 @@ def generate_random_aggregate_value():
def insert_data(connection,data,table_name,table_columns):
#do the postgres insertion using copy_from functionality since it's the fastest method
#to bulk import data
+ start_time = time.time()
input_data = cStringIO.StringIO(data)
cursor = connection.cursor()
cursor.copy_from(input_data,sep="\t",table = table_name, columns = table_columns)
cursor.close()
connection.commit()
+ end_time = time.time()
+ return end_time - start_time
def delete_table_data(connection, table):
@@ -99,11 +123,24 @@ def delete_table_data(connection, table):
connection.commit()
+def calculate_mean_standard_deviation(numbers):
+
+ average = reduce(lambda x, y: x + y, numbers) / len(numbers)
+ variance = map(lambda x: (x - average)**2, numbers)
+ average_variance = reduce(lambda x, y: x + y, variance) / len(variance)
+ standard_deviation = math.sqrt(average_variance)
+ return average, standard_deviation
-#Main Script
+
+#Main Script
script_start_time = time.time()
+
+#Parse Command Line Arguments
+options = parse_arguments()
+
+
#General Configuration
raw_tables = map(lambda x: "RHQ_MEAS_DATA_NUM_R"+str(x).zfill(2),range(15))
raw_table_columns = ["schedule_id","time_stamp","value"]
@@ -114,14 +151,14 @@ aggregate_table_columns = ["schedule_id","time_stamp","value","minvalue","maxval
# see the estimation guideline at https://docs.jboss.org/author/display/RHQ/Metrics+Data+Migration+-+Design
raw_metrics_per_agent = 900000
aggregate_metrics_per_agent = 700000
-number_of_agents = 10
+
# some constants to be used by the algorithm
batch_increment = 10000
data_start_time = int(time.time() - 2*604800)*1000 # now - 2 week, convert to milliseconds
#establish the db connection, update the settings for your local environment
-connection = psycopg2.connect(database="rhq_db",user="rhqadmin",password="rhqadmin")
+connection = psycopg2.connect(host=options.db_host,database=options.db_database,user=options.db_user,password=options.db_password)
#Delete all data available
@@ -129,9 +166,14 @@ map(lambda x : delete_table_data(connection, x), raw_tables)
map(lambda x : delete_table_data(connection, x), aggregate_tables)
+
+metric_agent_inserting_time = []
+metric_agent_total_time = []
+
#Generate random raw and aggregate data
-for j in range(number_of_agents) :
- agent_generation_start_time = time.time()
+for j in range(options.number_of_agents) :
+ agent_start_time = time.time()
+ agent_inserting_time = 0
#generate and insert random metrics
for i in range(0,raw_metrics_per_agent,batch_increment):
@@ -142,7 +184,7 @@ for j in range(number_of_agents) :
start_of_schedule_id_sequence = j * batch_increment, metricType = MetricType.Raw)
data_to_insert = "\n".join(map(str,map(lambda x: "\t".join((str(s) for s in x)),raw_metrics)))
- insert_data(connection,data_to_insert,random_raw_table,raw_table_columns)
+ agent_inserting_time += insert_data(connection,data_to_insert,random_raw_table,raw_table_columns)
#generate and insert aggregate metrics
for i in range(0,aggregate_metrics_per_agent,batch_increment):
@@ -153,8 +195,19 @@ for j in range(number_of_agents) :
start_of_schedule_id_sequence = j * batch_increment, metricType = MetricType.Aggregate )
data_to_insert = "\n".join(map(str,map(lambda x: "\t".join((str(s) for s in x)),aggregate_metrics)))
- insert_data(connection,data_to_insert,random_aggregate_table,aggregate_table_columns)
+ agent_inserting_time += insert_data(connection,data_to_insert,random_aggregate_table,aggregate_table_columns)
+
+ metric_agent_inserting_time.append(agent_inserting_time)
+ metric_agent_total_time.append( time.time() - agent_start_time )
- print "Data inserted for agent #",j, " - total time:", time.time() - agent_generation_start_time, " seconds"
+ print "Data created for agent #",j, " - total time:", time.time() - agent_start_time, " seconds"
print "Total time: ",time.time() - script_start_time, " seconds"
+
+average_inserting_time, standard_deviation_inserting_time = calculate_mean_standard_deviation(metric_agent_inserting_time)
+print "Time spent inserting data: ", metric_agent_inserting_time, " seconds"
+print "Average time inserting data for 1 agent:", average_inserting_time, " seconds with a standard deviation of ", standard_deviation_inserting_time
+
+average_time, standard_deviation_time = calculate_mean_standard_deviation(metric_agent_total_time)
+print "Time spent for each agent: ", metric_agent_total_time , " seconds"
+print "Average time for 1 agent:", average_time, " seconds with a standard deviation of ", standard_deviation_time
11 years, 2 months
[rhq] modules/enterprise
by Jay Shaughnessy
modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/system/SystemManagerBeanTest.java | 19 ++++++----
1 file changed, 12 insertions(+), 7 deletions(-)
New commits:
commit 379b3039453268e473ed402d4bd317b1544e9af8
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Fri Mar 1 15:35:23 2013 -0500
Rearrange tests to vacuum comes before analyze. There is no real good reason
for this, we're seeing if it resolves an issue with a Jenkins run.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/system/SystemManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/system/SystemManagerBeanTest.java
index 104ff85..3d9e15e 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/system/SystemManagerBeanTest.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/system/SystemManagerBeanTest.java
@@ -55,13 +55,17 @@ public class SystemManagerBeanTest extends AbstractEJB3Test {
unprepareServerPluginService();
}
- @SuppressWarnings("deprecation")
- public void testGetSystemConfiguration() {
- assert null != systemManager.getSystemConfiguration(overlord);
+ public void testVacuum() {
+ System.out.println("Starting Vacuum");
+ systemManager.vacuum(overlord);
+ System.out.println("Done with Vacuum");
}
+ @Test(dependsOnMethods = { "testVacuum" })
public void testAnalyze() {
+ System.out.println("Starting Analyze");
systemManager.analyze(overlord);
+ System.out.println("Done with Analyze");
}
public void testEnableHibernateStatistics() {
@@ -76,15 +80,16 @@ public class SystemManagerBeanTest extends AbstractEJB3Test {
systemManager.reindex(overlord);
}
- public void testVacuum() {
- systemManager.vacuum(overlord);
- }
-
public void testVacuumAppdef() {
systemManager.vacuumAppdef(overlord);
}
@SuppressWarnings("deprecation")
+ public void testGetSystemConfiguration() {
+ assert null != systemManager.getSystemConfiguration(overlord);
+ }
+
+ @SuppressWarnings("deprecation")
public void testLegacySystemSettingsInCorrectFormat() throws Exception {
//some of the properties are represented differently
//in the new style settings and the the old style
11 years, 2 months
[rhq] modules/core
by Thomas Segismont
modules/core/native-system/src/test/java/org/rhq/core/system/DeadProcessInfoRefreshTest.java | 8 ++++++++
1 file changed, 8 insertions(+)
New commits:
commit 65393aa5cd02369ec38bd0da674c2f6d37f7a8db
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Fri Mar 1 18:48:50 2013 +0100
Don't fail DeadProcessInfoRefreshTest on Mac OSX
It fails because Sigar returns a wrong value of the process state (see community thread URL)
diff --git a/modules/core/native-system/src/test/java/org/rhq/core/system/DeadProcessInfoRefreshTest.java b/modules/core/native-system/src/test/java/org/rhq/core/system/DeadProcessInfoRefreshTest.java
index 9ee811e..d49e6b4 100644
--- a/modules/core/native-system/src/test/java/org/rhq/core/system/DeadProcessInfoRefreshTest.java
+++ b/modules/core/native-system/src/test/java/org/rhq/core/system/DeadProcessInfoRefreshTest.java
@@ -26,6 +26,7 @@ import static org.testng.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
+import org.hyperic.sigar.OperatingSystem;
import org.testng.annotations.AfterTest;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
@@ -96,10 +97,17 @@ public class DeadProcessInfoRefreshTest {
/**
* We want to be sure that once the process has been reported down, subsequent calls to refresh will not report it
* up. See this thread on VMWare forum: http://communities.vmware.com/message/2187972#2187972
+ *
+ * Unfortunately there is no work around for this failure on Mac OSX so the test will silently return on this
+ * platform.
+ *
* @throws Exception
*/
@Test(timeOut = 1000 * 10)
public void testRefreshInterval() throws Exception {
+ if (OperatingSystem.getInstance().getName().equals(OperatingSystem.NAME_MACOSX)) {
+ return;
+ }
// Sigar should see the process running
assertTrue(testProcessInfo.freshSnapshot().isRunning());
// Send kill
11 years, 2 months
[rhq] modules/core
by Thomas Segismont
modules/core/native-system/src/test/resources/log4j.xml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
New commits:
commit eb6e853f45e8ac412cdc38cd9f8428badbf79594
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Fri Mar 1 16:05:05 2013 +0100
Higher logging level by default in native system tests
diff --git a/modules/core/native-system/src/test/resources/log4j.xml b/modules/core/native-system/src/test/resources/log4j.xml
index 05d0951..014dc36 100644
--- a/modules/core/native-system/src/test/resources/log4j.xml
+++ b/modules/core/native-system/src/test/resources/log4j.xml
@@ -1,9 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
-<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="true">
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
<appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
- <param name="Threshold" value="DEBUG"/>
+ <param name="Threshold" value="INFO"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%-5p %d{dd-MM HH:mm:ss,SSS} (%F:%M:%L) -%m%n"/>
</layout>
11 years, 2 months
[rhq] modules/enterprise
by Jiri Kremser
modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java | 150 ++++++++++
1 file changed, 150 insertions(+)
New commits:
commit fa74bbe0d6c89363a63b580f28946c182d3a157c
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Fri Mar 1 14:32:16 2013 +0100
Two more tests added testing the CriteriaQuery api and BundleManagerBean.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java
index dfe2c72..b1ca9fe 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/bundle/BundleManagerBeanTest.java
@@ -83,6 +83,8 @@ import org.rhq.enterprise.server.resource.ResourceManagerLocal;
import org.rhq.enterprise.server.test.AbstractEJB3Test;
import org.rhq.enterprise.server.test.TestAgentClient;
import org.rhq.enterprise.server.test.TestServerCommunicationsService;
+import org.rhq.enterprise.server.util.CriteriaQuery;
+import org.rhq.enterprise.server.util.CriteriaQueryExecutor;
import org.rhq.enterprise.server.util.LookupUtil;
import org.rhq.enterprise.server.util.ResourceTreeHelper;
@@ -1007,6 +1009,76 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
assertEquals(1, bs.size());
assertEquals(b10, bs.get(0));
}
+
+
+ @Test(enabled = TESTS_ENABLED)
+ public void testFindAndDeleteBundlesByCriteriaQuery() throws Exception {
+ // verify that all bundle version objects are actually parsed.
+ Map<String, Bundle> bundleNames = new HashMap<String, Bundle>();
+ final int bundleCount = 50;
+
+ Bundle b01 = createBundle("name01");
+ for (int i = 1; i < bundleCount; i++) {
+ createBundle("name" + String.format("%02d", i + 1));
+ }
+
+ BundleCriteria criteria = new BundleCriteria();
+ criteria.addFilterName(TEST_PREFIX);
+ final int pageSize = 10;
+ criteria.setPaging(0, pageSize);
+ criteria.addSortName(PageOrdering.DESC);
+
+ final List<Integer> pagesFlipped = new ArrayList<Integer>();
+ pagesFlipped.add(0);
+
+ // iterate over the results with CriteriaQuery
+ CriteriaQueryExecutor<Bundle, BundleCriteria> queryExecutor = new CriteriaQueryExecutor<Bundle, BundleCriteria>() {
+ @Override
+ public PageList<Bundle> execute(BundleCriteria criteria) {
+ pagesFlipped.set(0, pagesFlipped.get(0) + 1);
+ return bundleManager.findBundlesByCriteria(overlord, criteria);
+ }
+ };
+
+ CriteriaQuery<Bundle, BundleCriteria> bundles = new CriteriaQuery<Bundle, BundleCriteria>(
+ criteria, queryExecutor);
+
+ List<Integer> toRemove = new ArrayList<Integer>(bundleNames.size());
+ String prevName = null;
+ // iterate over the entire result set efficiently
+ String errMsg = "Results should be sorted by names, something is out of order";
+ for (Bundle b : bundles) {
+ assertTrue(errMsg, null == prevName || prevName.compareTo(b.getName()) > 0);
+ prevName = b.getName();
+ toRemove.add(b.getId());
+ bundleNames.remove(String.valueOf(b.getName()));
+ }
+
+ // remove the bundles
+ for (int id : toRemove) {
+ bundleManager.deleteBundle(overlord, id);
+ }
+
+ // check if the page was flipped the correct amount of times
+ assertTrue("While iterating the bundles, the findBundlesByCriteria should be called " + bundleCount / pageSize
+ + " times" + pageSize, pagesFlipped.get(0) == bundleCount / pageSize);
+
+ // check if the last name is equal to "name01"
+ assertEquals("The name should be \"name01\"", b01.getName(), prevName);
+
+ // test that entire list parsed spanning multiple pages
+ assertTrue("Expected bundleNames to be empty. Still " + bundleNames.size()
+ + " bundle(s).", bundleNames.isEmpty());
+
+ // check if everything is deleted
+ PageList<Bundle> bvs = null;
+ criteria = new BundleCriteria();
+ criteria.addFilterName(TEST_PREFIX);
+ criteria.clearPaging(); // fetch all
+ bvs = bundleManager.findBundlesByCriteria(overlord, criteria);
+ assertNotNull(bvs);
+ assertTrue(bvs.isEmpty());
+ }
@Test(enabled = TESTS_ENABLED)
public void testFindBundleVersionsByCriteria() throws Exception {
@@ -1076,6 +1148,84 @@ public class BundleManagerBeanTest extends AbstractEJB3Test {
assertFalse(bvs.get(0).equals(bvs.get(1)));
assertEquals(bv60, bvs.get(2));
}
+
+ // This test is disabled, because it is failing. The sorting by id, defined on the Criteria class does not work
+ // if the sortId field is not defined on the criteria sub-class. This is because of the fact that getPageControl()
+ // defined on class CriteriaQueryGenerator calls CriteriaQueryGenerator.getFields() that ignores fields defined
+ // on the Criteria class (takes into considerations fields from the sub-classes).
+ @Test(enabled = DISABLED)
+ public void testFindAndDeleteBundleVersionsByCriteriaQuery() throws Exception {
+ //verify that all bundle version objects are actually parsed.
+ Map<String, BundleVersion> bundleVersionVersions = new HashMap<String, BundleVersion>();
+
+ final int bundleVersionCount = 220;
+ Bundle bundle = createBundle("one");
+ for (int i = 0; i < bundleVersionCount; i++) {
+ String version = "1." + String.format("%03d", i + 1);
+ BundleVersion bundleVersion = createBundleVersion(bundle.getName(), version, bundle);
+ bundleVersionVersions.put(version, bundleVersion);
+ }
+
+ final int pageSize = 20;
+ BundleVersionCriteria criteria = new BundleVersionCriteria();
+ criteria.addFilterName(TEST_PREFIX);
+ criteria.setPaging(0, pageSize);
+ criteria.addSortId(PageOrdering.DESC);
+
+ // the List is used because of the access from the anonymous class
+ final List<Integer> pagesFlipped = new ArrayList<Integer>();
+ pagesFlipped.add(0);
+
+ // iterate over the results with CriteriaQuery
+ CriteriaQueryExecutor<BundleVersion, BundleVersionCriteria> queryExecutor = new CriteriaQueryExecutor<BundleVersion, BundleVersionCriteria>() {
+ @Override
+ public PageList<BundleVersion> execute(BundleVersionCriteria criteria) {
+ pagesFlipped.set(0, pagesFlipped.get(0) + 1);
+ return bundleManager.findBundleVersionsByCriteria(overlord, criteria);
+ }
+ };
+
+ // initiate first/(total depending on page size) request.
+ CriteriaQuery<BundleVersion, BundleVersionCriteria> bundleVersions = new CriteriaQuery<BundleVersion, BundleVersionCriteria>(
+ criteria, queryExecutor);
+
+ List<Integer> toDelete = new ArrayList<Integer>(bundleVersionVersions.size());
+ Integer prevId = null;
+ // iterate over the entire result set efficiently
+ String errMsg = "Results should be sorted by id, something is out of order";
+ for (BundleVersion bv : bundleVersions) {
+ assertTrue(errMsg, null == prevId || prevId > bv.getId());
+ prevId = bv.getId();
+ toDelete.add(bv.getId());
+ bundleVersionVersions.remove(String.valueOf(bv.getVersion()));
+ }
+
+ // check if the page was flipped the correct amount of times (this formula works only for this particular case)
+ assertTrue("While iterating the bundle versions, the findBundleVersionsByCriteria() should be called " + bundleVersionCount / pageSize
+ + " times" + pageSize, pagesFlipped.get(0) == bundleVersionCount / pageSize);
+
+ // delete all
+ for (int id : toDelete) {
+ bundleManager.deleteBundleVersion(overlord, id, true);
+ }
+
+ // check whether every record was processed when iterating over the bundleVersions
+ assertTrue("Expected bundleVersions to be empty. Still " + bundleVersionVersions.size()
+ + " version(s).", bundleVersionVersions.isEmpty());
+
+ // test that entire list parsed spanning multiple pages
+ assertTrue("Expected bundleVersions to be empty. Still " + bundleVersionVersions.size()
+ + " version(s).", bundleVersionVersions.isEmpty());
+
+ // check if everything is deleted
+ PageList<BundleVersion> bvs = null;
+ criteria = new BundleVersionCriteria();
+ criteria.addFilterName(TEST_PREFIX);
+ criteria.clearPaging(); // fetch all
+ bvs = bundleManager.findBundleVersionsByCriteria(overlord, criteria);
+ assertNotNull(bvs);
+ assertTrue(bvs.isEmpty());
+ }
@Test(enabled = TESTS_ENABLED)
public void testGetAllBundleVersionFilenames() throws Exception {
11 years, 2 months
[rhq] modules/enterprise pom.xml
by lkrejci
modules/enterprise/server/jar/pom.xml | 120 ++--------------------------------
pom.xml | 31 +++++---
2 files changed, 28 insertions(+), 123 deletions(-)
New commits:
commit 61803672ded5471c17b57a6d1d7d93646acf0462
Author: Lukas Krejci <lkrejci(a)redhat.com>
Date: Thu Feb 28 15:01:32 2013 +0100
Getting the API check ready for separately versioned parts of RHQ.
To execute an API check build, you only need to -Psignature-check. No need
specify the base version anymore because that is now resolved per module
from maven as the latest RELEASED and published version (as opposed to
LATEST).
diff --git a/modules/enterprise/server/jar/pom.xml b/modules/enterprise/server/jar/pom.xml
index d59460f..bd5437a 100644
--- a/modules/enterprise/server/jar/pom.xml
+++ b/modules/enterprise/server/jar/pom.xml
@@ -621,6 +621,16 @@
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>clirr-maven-plugin</artifactId>
+ <configuration>
+ <includes>
+ <include>**/*Remote</include>
+ <include>**/ServerVersion</include>
+ </includes>
+ </configuration>
+ </plugin>
</plugins>
</build>
@@ -987,116 +997,6 @@
</plugins>
</build>
</profile>
-
- <profile>
- <id>cobertura</id>
- <build>
- <plugins>
- <plugin>
- <artifactId>maven-antrun-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>net.sourceforge.cobertura</groupId>
- <artifactId>cobertura</artifactId>
- <version>${cobertura.version}</version>
- </dependency>
- </dependencies>
- <executions>
- <execution>
- <id>cobertura-instrument</id>
- <phase>process-test-classes</phase>
- <configuration>
- <target>
- <!-- prepare directory structure for cobertura-->
- <mkdir dir="target/cobertura"/>
- <mkdir dir="target/cobertura/backup"/>
- <!-- backup all classes so that we can instrument the original classes-->
- <copy toDir="target/cobertura/backup" verbose="true" overwrite="true">
- <fileset dir="target/classes">
- <include name="**/*.class"/>
- </fileset>
- </copy>
- <!-- create a properties file and save there location of cobertura data file-->
- <touch file="target/classes/cobertura.properties"/>
- <echo file="target/classes/cobertura.properties">net.sourceforge.cobertura.datafile=${project.build.directory}/cobertura/cobertura.ser</echo>
- <taskdef classpathref="maven.plugin.classpath" resource="tasks.properties"/>
- <!-- instrument all classes in target/classes directory -->
- <cobertura-instrument datafile="${project.build.directory}/cobertura/cobertura.ser"
- todir="${project.build.directory}/classes">
- <fileset dir="${project.build.directory}/classes">
- <include name="**/*.class"/>
- <exclude name="**/DynamicConfigurationPropertyLocal.class"/>
- <exclude name="**/DynamicConfigurationPropertyBean.class"/>
- </fileset>
- </cobertura-instrument>
- </target>
- </configuration>
- <goals>
- <goal>run</goal>
- </goals>
- </execution>
- <execution>
- <id>cobertura-report</id>
- <phase>prepare-package</phase>
- <configuration>
- <target>
- <taskdef classpathref="maven.plugin.classpath" resource="tasks.properties"/>
- <!-- prepare directory structure for cobertura-->
- <mkdir dir="target/cobertura"/>
- <mkdir dir="target/site/cobertura"/>
- <!-- restore classes from backup folder to classes folder -->
- <copy toDir="target/classes" verbose="true" overwrite="true">
- <fileset dir="target/cobertura/backup">
- <include name="**/*.class"/>
- </fileset>
- </copy>
- <!-- delete backup folder-->
- <delete dir="target/cobertura/backup"/>
- <!-- create a code coverage report -->
- <cobertura-report format="html" datafile="${project.build.directory}/cobertura/cobertura.ser"
- destdir="${project.build.directory}/site/cobertura">
- <fileset dir="${basedir}/src/main/java">
- <include name="**/*.java"/>
- </fileset>
- </cobertura-report>
- <!-- delete cobertura.properties file -->
- <delete file="target/classes/cobertura.properties"/>
- </target>
- </configuration>
- <goals>
- <goal>run</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- </profile>
-
- <profile>
- <id>signature-check</id>
- <activation>
- <property>
- <name>signature-check-base-version</name>
- </property>
- </activation>
- <build>
- <plugins>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>clirr-maven-plugin</artifactId>
- <version>${clirr.version}</version>
- <configuration>
- <includes>
- <include>**/*Remote</include>
- <include>**/ServerVersion</include>
- </includes>
- </configuration>
- </plugin>
- </plugins>
- </build>
- </profile>
-
</profiles>
<reporting>
diff --git a/pom.xml b/pom.xml
index afd1a61..b049493 100644
--- a/pom.xml
+++ b/pom.xml
@@ -230,10 +230,6 @@
part of our public API and is therefore not API-checked. -->
<rhq.internal>true</rhq.internal>
- <!-- The name of the file in the module's root dir that contains the intentional
- api changes to be ignored by Clirr api checks -->
- <intentional-api-changes-file>intentional-api-changes-since-${signature-check-base-version}.xml</intentional-api-changes-file>
-
<jacoco.version>0.6.0.201210061924</jacoco.version>
<jacoco-arquillian-extension.version>1.0.0.Alpha5</jacoco-arquillian-extension.version>
</properties>
@@ -831,7 +827,7 @@
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
- <version>1.5</version>
+ <version>1.7</version>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
@@ -1689,17 +1685,26 @@
<profile>
<id>signature-check</id>
- <activation>
- <property>
- <name>signature-check-base-version</name>
- </property>
- </activation>
<build>
<plugins>
<plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>version-resolver</id>
+ <goals>
+ <goal>released-version</goal>
+ </goals>
+ <configuration>
+ <propertyPrefix>signature-check-base</propertyPrefix>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>clirr-maven-plugin</artifactId>
- <version>${clirr.version}</version>
<executions>
<execution>
<id>signature-check</id>
@@ -1711,7 +1716,7 @@
<comparisonArtifact>
<groupId>${project.groupId}</groupId>
<artifactId>${project.artifactId}</artifactId>
- <version>${signature-check-base-version}</version>
+ <version>${signature-check-base.version}</version>
</comparisonArtifact>
</comparisonArtifacts>
<logResults>true</logResults>
@@ -1719,7 +1724,7 @@
<failOnError>true</failOnError>
<failOnWarning>true</failOnWarning>
<skip>${rhq.internal}</skip>
- <ignoredDifferencesFile>${intentional-api-changes-file}</ignoredDifferencesFile>
+ <ignoredDifferencesFile>intentional-api-changes-since-${signature-check-base.version}.xml</ignoredDifferencesFile>
</configuration>
</execution>
</executions>
11 years, 2 months