modules/enterprise/server/server-metrics/pom.xml
| 21
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
| 248 ++++++++--
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
| 243 +++++++++
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java
| 4
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java
| 4
modules/enterprise/server/server-metrics/tools/create_test_data.py
| 69 ++
6 files changed, 535 insertions(+), 54 deletions(-)
New commits:
commit 42295b1f952ee8f61071e09ecb0fc1cccd3fb7fe
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Fri Mar 1 16:20:58 2013 -0600
Add dependencies for data migration command line runner.
diff --git a/modules/enterprise/server/server-metrics/pom.xml
b/modules/enterprise/server/server-metrics/pom.xml
index 0db7f68..b24f9d4 100644
--- a/modules/enterprise/server/server-metrics/pom.xml
+++ b/modules/enterprise/server/server-metrics/pom.xml
@@ -127,6 +127,27 @@
</dependency>
<dependency>
+ <groupId>commons-cli</groupId>
+ <artifactId>commons-cli</artifactId>
+ <version>1.2</version>
+ <scope>provided</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>postgresql</groupId>
+ <artifactId>postgresql</artifactId>
+ <!-- NOTE: version defined in root pom dependencyManagement section -->
+ </dependency>
+
+
+ <dependency>
+ <groupId>org.hibernate</groupId>
+ <artifactId>hibernate-entitymanager</artifactId>
+ <scope>provided</scope>
+ </dependency>
+
+
+ <dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-testng</artifactId>
<version>${powermock.version}</version>
commit 14ea631efbd17b94c6e5654eff8695ccad4f86a7
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Fri Mar 1 15:46:16 2013 -0600
Add a command line runner for the data migrator to assist with benchmarking and
potential usage within the application. Various updates for the data migrator. Also,
update the tool that generates random data to support sql configuration from command line
options.
diff --git
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
index ff46be8..cb72a07 100644
---
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
+++
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
@@ -20,17 +20,26 @@
package org.rhq.server.metrics;
+import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto;
+import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl;
+
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Date;
+import java.util.LinkedList;
import java.util.List;
+import java.util.Queue;
import javax.persistence.EntityManager;
import javax.persistence.Query;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
+import com.datastax.driver.core.Statement;
+import com.datastax.driver.core.querybuilder.QueryBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.rhq.core.domain.measurement.MeasurementDataNumeric1D;
import org.rhq.core.domain.measurement.MeasurementDataNumeric1H;
@@ -45,14 +54,21 @@ import org.rhq.server.metrics.domain.MetricsTable;
*/
public class DataMigrator {
- private static final int MAX_RECORDS_TO_MIGRATE = 1000;
+ private final Log log = LogFactory.getLog(DataMigrator.class);
+
+ private static final int MAX_RECORDS_TO_LOAD_FROM_SQL = 30000;
+ private static final int MAX_RECORDS_TO_BATCH_TO_CASSANDRA = 500;
private static final int MAX_NUMBER_OF_FAILURES = 5;
private final EntityManager entityManager;
+
private final Session session;
+ private boolean telemetry;
+
private boolean deleteDataImmediatelyAfterMigration;
private boolean deleteAllDataAtEndOfMigration;
+
private boolean runRawDataMigration;
private boolean run1HAggregateDataMigration;
private boolean run6HAggregateDataMigration;
@@ -68,6 +84,8 @@ public class DataMigrator {
this.run1HAggregateDataMigration = true;
this.run6HAggregateDataMigration = true;
this.run1DAggregateDataMigration = true;
+
+ this.telemetry = false;
}
public void run1HAggregateDataMigration(boolean value) {
@@ -82,14 +100,28 @@ public class DataMigrator {
this.run1DAggregateDataMigration = value;
}
- public void deleteDataImmediatelyAfterMigration(boolean value) {
- this.deleteDataImmediatelyAfterMigration = value;
- this.deleteAllDataAtEndOfMigration = !value;
+
+ public void deleteDataImmediatelyAfterMigration() {
+ this.deleteDataImmediatelyAfterMigration = true;
+ this.deleteAllDataAtEndOfMigration = false;
}
- public void deleteAllDataAtEndOfMigration(boolean value) {
- this.deleteAllDataAtEndOfMigration = value;
- this.deleteDataImmediatelyAfterMigration = !value;
+ public void deleteAllDataAtEndOfMigration() {
+ this.deleteAllDataAtEndOfMigration = true;
+ this.deleteDataImmediatelyAfterMigration = false;
+ }
+
+ public void preserveData() {
+ this.deleteAllDataAtEndOfMigration = false;
+ this.deleteDataImmediatelyAfterMigration = false;
+ }
+
+ public void enableTelemetry() {
+ this.telemetry = true;
+ }
+
+ public void disableTelemetry() {
+ this.telemetry = false;
}
public void migrateData() throws Exception {
@@ -125,11 +157,15 @@ public class DataMigrator {
int numberOfFailures = 0;
Exception caughtException = null;
+ log.info(migrator.getClass());
+
while (numberOfFailures < MAX_NUMBER_OF_FAILURES) {
try {
migrator.work();
return;
} catch (Exception e) {
+ log.error("Migrator " + migrator.getClass() + " failed.
Retrying!", e);
+
caughtException = e;
numberOfFailures++;
}
@@ -202,14 +238,20 @@ public class DataMigrator {
while (true) {
Query q = entityManager.createNamedQuery(query);
- q.setMaxResults(MAX_RECORDS_TO_MIGRATE);
+ q.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
existingData = (List<MeasurementDataNumericAggregateInterface>)
q.getResultList();
if (existingData.size() == 0) {
break;
}
- insertDataToCassandra(existingData);
+ try {
+ insertDataToCassandra(existingData);
+ } catch (Exception e) {
+ log.error("Failed to insert " + metricsTable.toString()
+ + " data. Attempting to insert the current batch of data one
more time");
+ insertDataToCassandra(existingData);
+ }
for (Object entity : existingData) {
entityManager.remove(entity);
@@ -220,33 +262,82 @@ public class DataMigrator {
@SuppressWarnings("unchecked")
private void performFullMigration() throws Exception {
- Query q = entityManager.createNamedQuery(query);
- List<MeasurementDataNumericAggregateInterface> existingData =
(List<MeasurementDataNumericAggregateInterface>) q
- .getResultList();
+ List<MeasurementDataNumericAggregateInterface> existingData = null;
+ int lastMigratedRecord = 0;
+
+ while (true) {
+ Query q = entityManager.createNamedQuery(query);
+ q.setFirstResult(lastMigratedRecord + 1);
+ q.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
+
+ existingData = (List<MeasurementDataNumericAggregateInterface>)
q.getResultList();
+
+ if (existingData.size() == 0) {
+ break;
+ }
- insertDataToCassandra(existingData);
+ lastMigratedRecord += existingData.size();
+
+ try{
+ insertDataToCassandra(existingData);
+ } catch (Exception e) {
+ log.error("Failed to insert " + metricsTable.toString()
+ + " data. Attempting to insert the current batch of data one
more time");
+ insertDataToCassandra(existingData);
+ }
+ }
}
private void
insertDataToCassandra(List<MeasurementDataNumericAggregateInterface> existingData)
throws Exception {
- String cql = "INSERT INTO " + metricsTable
- + " (schedule_id, time, type, value) VALUES (?, ?, ?, ?) USING TTL
" + metricsTable.getTTL();
- PreparedStatement statement = session.prepare(cql);
+ Statement statement = null;
List<ResultSetFuture> resultSetFutures = new
ArrayList<ResultSetFuture>();
+ List<Statement> statementsAccumulator = new
ArrayList<Statement>();
- for (MeasurementDataNumericAggregateInterface measurement : existingData) {
- BoundStatement boundStatement =
statement.bind(measurement.getScheduleId(),
- new Date(measurement.getTimestamp()), AggregateType.MIN.ordinal(),
measurement.getMin());
- resultSetFutures.add(session.executeAsync(boundStatement));
+ //only need approximate TTL to speed up processing
+ //given that each batch is processed within seconds, getting the
+ //system time once per batch has minimal impact on the record retention
+ long currentTimeMillis = System.currentTimeMillis();
+ long expectedTTLMillis = metricsTable.getTTLinMilliseconds() * 10;
+ long itemTTLSeconds = 0;
- boundStatement = statement.bind(measurement.getScheduleId(), new
Date(measurement.getTimestamp()),
- AggregateType.MAX.ordinal(), measurement.getMax());
- resultSetFutures.add(session.executeAsync(boundStatement));
+ for (MeasurementDataNumericAggregateInterface measurement : existingData) {
+ itemTTLSeconds = (expectedTTLMillis - currentTimeMillis +
measurement.getTimestamp()) / 1000l;
+
+ statement = QueryBuilder.insertInto(metricsTable.toString())
+ .value("schedule_id", measurement.getScheduleId())
+ .value("time", new Date(measurement.getTimestamp()))
+ .value("type", AggregateType.MIN.ordinal())
+ .value("value", measurement.getMin())
+ .using(ttl((int) itemTTLSeconds));;
+ statementsAccumulator.add(statement);
+
+ statement = insertInto(metricsTable.toString())
+ .value("schedule_id", measurement.getScheduleId())
+ .value("time", new Date(measurement.getTimestamp()))
+ .value("type", AggregateType.MAX.ordinal())
+ .value("value", measurement.getMax())
+ .using(ttl((int) itemTTLSeconds));
+ statementsAccumulator.add(statement);
+
+ statement =
insertInto(metricsTable.toString()).value("schedule_id",
measurement.getScheduleId())
+ .value("time", new Date(measurement.getTimestamp()))
+ .value("type", AggregateType.AVG.ordinal())
+ .value("value",
Double.parseDouble(measurement.getValue().toString()))
+ .using(ttl((int) itemTTLSeconds));
+ statementsAccumulator.add(statement);
+
+ if (statementsAccumulator.size() == MAX_RECORDS_TO_BATCH_TO_CASSANDRA) {
+
resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[])
statementsAccumulator
+ .toArray(new Statement[statementsAccumulator.size()]))));
+ statementsAccumulator.clear();
+ }
+ }
- boundStatement = statement.bind(measurement.getScheduleId(), new
Date(measurement.getTimestamp()),
- AggregateType.AVG.ordinal(),
Double.parseDouble(measurement.getValue().toString()));
- resultSetFutures.add(session.executeAsync(boundStatement));
+ if (statementsAccumulator.size() != 0) {
+
resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[])
statementsAccumulator
+ .toArray(new Statement[statementsAccumulator.size()]))));
}
for (ResultSetFuture future : resultSetFutures) {
@@ -258,6 +349,8 @@ public class DataMigrator {
private class RawDataMigrator implements CallableMigrationWorker {
+ Queue<String> tablesNotProcessed = new
LinkedList<String>(Arrays.asList(getRawDataTables()));
+
public void work() throws Exception {
if (deleteDataImmediatelyAfterMigration) {
performBatchedMigration();
@@ -270,20 +363,28 @@ public class DataMigrator {
private void performBatchedMigration() throws Exception {
List<Object[]> existingData = null;
- for (String table : getRawDataTables()) {
- String selectQuery = "SELECT schedule_id, value, time_stamp FROM
" + table;
+ while (!tablesNotProcessed.isEmpty()) {
+ String table = tablesNotProcessed.peek();
+
+ String selectQuery = "SELECT schedule_id, time_stamp, value FROM
" + table;
String deleteQuery = "DELETE FROM " + table + " WHERE
schedule_id = ?";
while (true) {
Query query = entityManager.createNativeQuery(selectQuery);
- query.setMaxResults(MAX_RECORDS_TO_MIGRATE);
+ query.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
existingData = query.getResultList();
if (existingData.size() == 0) {
break;
}
- insertDataToCassandra(existingData);
+ try {
+ insertDataToCassandra(existingData);
+ } catch (Exception e) {
+ log.error("Failed to insert " +
MetricsTable.RAW.toString()
+ + " data. Attempting to insert the current batch of data
one more time");
+ insertDataToCassandra(existingData);
+ }
query = entityManager.createNativeQuery(deleteQuery);
@@ -292,6 +393,8 @@ public class DataMigrator {
query.executeUpdate();
}
}
+
+ tablesNotProcessed.poll();
}
}
@@ -299,26 +402,81 @@ public class DataMigrator {
private void performFullMigration() throws Exception {
List<Object[]> existingData = null;
- for (String table : getRawDataTables()) {
- String selectQuery = "SELECT schedule_id, value, time_stamp FROM
" + table;
- Query query = entityManager.createNativeQuery(selectQuery);
- existingData = query.getResultList();
- insertDataToCassandra(existingData);
+ while (!tablesNotProcessed.isEmpty()) {
+ String table = tablesNotProcessed.peek();
+
+ log.info("Start migrating raw table: " + table);
+
+ int lastMigratedRecord = 0;
+
+ while (true) {
+ String selectQuery = "SELECT schedule_id, time_stamp, value FROM
" + table;
+ Query query = entityManager.createNativeQuery(selectQuery);
+ query.setFirstResult(lastMigratedRecord + 1);
+ query.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
+
+ existingData = query.getResultList();
+
+ if (existingData.size() == 0) {
+ break;
+ }
+
+ lastMigratedRecord += existingData.size();
+
+ try {
+ insertDataToCassandra(existingData);
+ } catch (Exception e) {
+ log.error("Failed to insert " +
MetricsTable.RAW.toString()
+ + " data. Attempting to insert the current batch of data
one more time");
+ insertDataToCassandra(existingData);
+ }
+
+ if (lastMigratedRecord % MAX_RECORDS_TO_LOAD_FROM_SQL == 0) {
+ log.info("------------" + lastMigratedRecord +
"---------------------");
+ }
+ }
+
+ log.info("Done migrating raw table" + table +
"---------------------");
+ tablesNotProcessed.poll();
}
}
private void insertDataToCassandra(List<Object[]> existingData) throws
Exception {
- String cql = "INSERT INTO " + MetricsTable.RAW + "
(schedule_id, time, value) VALUES (?, ?, ?) USING TTL "
- + MetricsTable.RAW.getTTL();
- PreparedStatement statement = session.prepare(cql);
-
List<ResultSetFuture> resultSetFutures = new
ArrayList<ResultSetFuture>();
+ List<Statement> statementsAccumulator = new
ArrayList<Statement>();
+
+ //only need approximate TTL to speed up processing
+ //given that each batch is processed within seconds, getting the
+ //system time once per batch has minimal impact on the record retention
+ long currentTimeMillis = System.currentTimeMillis();
+ long expectedTTLMillis = MetricsTable.RAW.getTTLinMilliseconds() * 10;
+ long creationTimeMillis = 0;
+ long itemTTLSeconds = 0;
for (Object[] rawDataPoint : existingData) {
- BoundStatement boundStatement =
statement.bind(Integer.parseInt(rawDataPoint[0].toString()),
- new Date(Long.parseLong(rawDataPoint[1].toString())),
- Double.parseDouble(rawDataPoint[2].toString()));
- resultSetFutures.add(session.executeAsync(boundStatement));
+ creationTimeMillis = Long.parseLong(rawDataPoint[1].toString());
+ itemTTLSeconds = (expectedTTLMillis - currentTimeMillis +
creationTimeMillis) / 1000l;
+
+ if (itemTTLSeconds > 0) {
+ Statement boundStatement =
QueryBuilder.insertInto(MetricsTable.RAW.toString())
+ .value("schedule_id",
Integer.parseInt(rawDataPoint[0].toString()))
+ .value("time", new Date(creationTimeMillis))
+ .value("value",
Double.parseDouble(rawDataPoint[2].toString()))
+ .using(ttl((int) itemTTLSeconds));
+
+ statementsAccumulator.add(boundStatement);
+ }
+
+ if (statementsAccumulator.size() == MAX_RECORDS_TO_BATCH_TO_CASSANDRA) {
+
resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[])
statementsAccumulator
+ .toArray(new Statement[statementsAccumulator.size()]))));
+ statementsAccumulator.clear();
+ }
+ }
+
+ if (statementsAccumulator.size() != 0) {
+
resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[])
statementsAccumulator
+ .toArray(new Statement[statementsAccumulator.size()]))));
}
for (ResultSetFuture future : resultSetFutures) {
diff --git
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
new file mode 100644
index 0000000..37e941b
--- /dev/null
+++
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
@@ -0,0 +1,243 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2011, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics;
+
+import java.util.Properties;
+
+import javax.persistence.EntityManager;
+import javax.persistence.EntityManagerFactory;
+
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.ProtocolOptions.Compression;
+import com.datastax.driver.core.Session;
+import com.datastax.driver.core.SimpleAuthInfoProvider;
+import com.datastax.driver.core.exceptions.NoHostAvailableException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.ejb.Ejb3Configuration;
+
+import org.rhq.cassandra.CassandraNode;
+
+/**
+ * @author Stefan Negrea
+ *
+ * Only postgres is supported by the runner, however the data migrator itself can run
+ * with any database.
+ *
+ * Maven command to run this from the command line:
+ *
+ * mvn install -DskipTests exec:java
-Dexec.mainClass="org.rhq.server.metrics.DataMigratorRunner"
+ *
+ *
+ */
+@SuppressWarnings({ "static-access", "deprecation" })
+public class DataMigratorRunner {
+
+ private final Log log = LogFactory.getLog(DataMigratorRunner.class);
+
+ //Cassandra
+ private String cassandraUser;
+ private Option cassandraUserOption =
OptionBuilder.withLongOpt("cassandra-user").hasArg().create();
+
+ private String cassandraPassword;
+ private Option cassandraPasswordOption =
OptionBuilder.withLongOpt("cassandra-password").hasArg().create();
+
+ private String[] cassandraHosts;
+ private Option cassandraHostsOption =
OptionBuilder.withLongOpt("cassandra-hosts").hasArg().create();
+
+ private boolean cassandraCompression;
+ private Option cassandraCompressionOption =
OptionBuilder.withLongOpt("cassandra-compression").create();
+
+ //SQL
+ private String sqlUser;
+ private Option sqlUserOption =
OptionBuilder.withLongOpt("sql-user").hasArg().create();
+
+ private String sqlPassword;
+ private Option sqlPasswordOption =
OptionBuilder.withLongOpt("sql-password").hasArg().create();
+
+ private String sqlHost;
+ private Option sqlHostOption =
OptionBuilder.withLongOpt("sql-host").hasArg().create();
+
+ private String sqlPort;
+ private Option sqlPortOption =
OptionBuilder.withLongOpt("sql-port").hasArg().create();
+
+ private String sqlDB;
+ private Option sqlDBOption =
OptionBuilder.withLongOpt("sql-db").hasArg().create();
+
+ /**
+ * @param args
+ * @throws ParseException
+ */
+ public static void main(String[] args) throws Exception {
+
+ try{
+ DataMigratorRunner runner = new DataMigratorRunner();
+ runner.configure(args);
+ runner.run();
+ } catch (Exception e) {
+ System.out.println(e);
+ }
+
+ System.exit(0);
+ }
+
+ private void configure(String args[]) throws Exception {
+ Options options = new Options();
+ options.addOption(cassandraUserOption);
+ options.addOption(cassandraPasswordOption);
+ options.addOption(cassandraHostsOption);
+ options.addOption(cassandraCompressionOption);
+
+ options.addOption(sqlUserOption);
+ options.addOption(sqlPasswordOption);
+ options.addOption(sqlHostOption);
+ options.addOption(sqlPortOption);
+ options.addOption(sqlDBOption);
+
+ CommandLineParser parser = new PosixParser();
+ CommandLine commandLine = parser.parse(options, args);
+
+ parseCassandraOptionsWithDefault(commandLine);
+ parseSQLOptionsWithDefault(commandLine);
+ }
+
+ private void run() throws Exception {
+ log.info("Creating Entity Manager");
+ EntityManager entityManager = this.createEntityManager();
+ log.info("Done creating Entity Manager");
+
+ log.info("Creating Cassandra session");
+ Session session = this.createCassandraSession();
+ log.info("Done creating Cassandra session");
+
+ DataMigrator migrator = new DataMigrator(entityManager, session);
+
+ migrator.preserveData();
+ migrator.run1DAggregateDataMigration(false);
+ migrator.run6HAggregateDataMigration(false);
+ migrator.run1HAggregateDataMigration(false);
+
+ migrator.migrateData();
+ }
+
+ private Session createCassandraSession() throws Exception {
+ Compression selectedCompression = Compression.NONE;
+ if (cassandraCompression) {
+ selectedCompression = Compression.SNAPPY;
+ }
+
+ Cluster cluster = Cluster
+ .builder()
+ .addContactPoints(cassandraHosts)
+ .withCompression(selectedCompression)
+ .withoutMetrics()
+ .withAuthInfoProvider(
+ new SimpleAuthInfoProvider().add("username",
cassandraUser).add("password", cassandraPassword)).build();
+
+ return cluster.connect("rhq");
+ }
+
+ private EntityManager createEntityManager() throws Exception {
+ Properties properties = new Properties();
+ properties.put("javax.persistence.provider",
"org.hibernate.ejb.HibernatePersistence");
+ properties.put("hibernate.dialect",
"org.hibernate.dialect.PostgreSQLDialect");
+ properties.put("hibernate.driver_class",
"org.postgresql.Driver");
+ properties.put("hibernate.connection.username", sqlUser);
+ properties.put("hibernate.connection.password", sqlPassword);
+ properties.put("hibernate.connection.url",
"jdbc:postgresql://" + sqlHost + ":" + sqlPort + "/" +
sqlDB);
+
+ Ejb3Configuration configuration = new Ejb3Configuration();
+ configuration.setProperties(properties);
+ EntityManagerFactory factory = configuration.buildEntityManagerFactory();
+ return factory.createEntityManager();
+ }
+
+ private void parseCassandraOptionsWithDefault(CommandLine commandLine) throws
NoHostAvailableException {
+ if (commandLine.hasOption(cassandraUserOption.getLongOpt())) {
+ cassandraUser =
commandLine.getOptionValue(cassandraUserOption.getLongOpt());
+ } else {
+ cassandraUser = "rhqadmin";
+ }
+
+ if (commandLine.hasOption(cassandraPasswordOption.getLongOpt())) {
+ cassandraPassword =
commandLine.getOptionValue(cassandraPasswordOption.getLongOpt());
+ } else {
+ cassandraPassword = "rhqadmin";
+ }
+
+ if (commandLine.hasOption(cassandraHostsOption.getLongOpt())) {
+ String[] seeds =
commandLine.getOptionValue(cassandraHostsOption.getLongOpt()).split(",");
+ cassandraHosts = new String[seeds.length];
+ for (int i = 0; i < seeds.length; ++i) {
+ CassandraNode node = CassandraNode.parseNode(seeds[i]);
+ cassandraHosts[i] = node.getHostName();
+ }
+ } else {
+ cassandraHosts = new String[] { "127.0.0.1", "127.0.0.2"
};
+ }
+
+ if (commandLine.hasOption(cassandraCompressionOption.getLongOpt())) {
+ cassandraCompression = true;
+ } else {
+ cassandraCompression = false;
+ }
+ }
+
+ private void parseSQLOptionsWithDefault(CommandLine commandLine) throws
NoHostAvailableException {
+ if (commandLine.hasOption(sqlUserOption.getLongOpt())) {
+ sqlUser = commandLine.getOptionValue(sqlUserOption.getLongOpt());
+ } else {
+ sqlUser = "rhqadmin";
+ }
+
+ if (commandLine.hasOption(sqlPasswordOption.getLongOpt())) {
+ sqlPassword = commandLine.getOptionValue(sqlPasswordOption.getLongOpt());
+ } else {
+ sqlPassword = "rhqadmin";
+ }
+
+ if (commandLine.hasOption(sqlHostOption.getLongOpt())) {
+ sqlHost = commandLine.getOptionValue(sqlHostOption.getLongOpt());
+ } else {
+ sqlHost = "localhost";
+ }
+
+ if (commandLine.hasOption(sqlPortOption.getLongOpt())) {
+ sqlPort = commandLine.getOptionValue(sqlPortOption.getLongOpt());
+ } else {
+ sqlPort = "5432";
+ }
+
+ if (commandLine.hasOption(sqlDBOption.getLongOpt())) {
+ sqlDB = commandLine.getOptionValue(sqlDBOption.getLongOpt());
+ } else {
+ sqlDB = "rhq_db";
+ }
+ }
+}
\ No newline at end of file
diff --git
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java
index 5373b75..c542225 100644
---
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java
+++
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/ListPagedResult.java
@@ -118,6 +118,7 @@ public class ListPagedResult<T> implements Iterable<T> {
@SuppressWarnings({ "unchecked", "rawtypes" })
private final List<?> localValuesToBind = new ArrayList(valuesToBind);
private ResultSet resultSet = retrieveNextResultSet(null,
localValuesToBind);
+ private T lastRetrievedItem = null;
public boolean hasNext() {
resultSet = retrieveNextResultSet(resultSet, localValuesToBind);
@@ -125,7 +126,8 @@ public class ListPagedResult<T> implements Iterable<T> {
}
public T next() {
- return mapper.mapOne(resultSet);
+ lastRetrievedItem = mapper.mapOne(resultSet);
+ return lastRetrievedItem;
}
public void remove() {
diff --git
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java
index de112d1..07bf927 100644
---
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java
+++
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/domain/MetricsTable.java
@@ -53,6 +53,10 @@ public enum MetricsTable {
return this.ttl;
}
+ public long getTTLinMilliseconds() {
+ return this.ttl * 1000l;
+ }
+
@Override
public String toString() {
return this.tableName;
diff --git a/modules/enterprise/server/server-metrics/tools/create_test_data.py
b/modules/enterprise/server/server-metrics/tools/create_test_data.py
index ba73ebd..4e60f04 100644
--- a/modules/enterprise/server/server-metrics/tools/create_test_data.py
+++ b/modules/enterprise/server/server-metrics/tools/create_test_data.py
@@ -33,6 +33,27 @@ import sys
import random
import time
import cStringIO
+import math
+from optparse import OptionParser
+
+
+def parse_arguments():
+ parser = OptionParser()
+
+ #db settings
+ parser.add_option("--host", default="localhost",
action="store", type="string", dest="db_host")
+ parser.add_option("--database", default="rhq_db",
action="store", type="string", dest="db_database")
+ parser.add_option("--user", default="rhqadmin",
action="store", type="string", dest="db_user")
+ parser.add_option("--password", default="rhqadmin",
action="store", type="string", dest="db_password")
+
+ #program settings
+ parser.add_option("--agents", default=10, action="store",
type="int", dest="number_of_agents")
+
+
+ (options, args) = parser.parse_args()
+ print options
+
+ return options
class MetricType:
Raw,Aggregate = range(2)
@@ -86,11 +107,14 @@ def generate_random_aggregate_value():
def insert_data(connection,data,table_name,table_columns):
#do the postgres insertion using copy_from functionality since it's the fastest
method
#to bulk import data
+ start_time = time.time()
input_data = cStringIO.StringIO(data)
cursor = connection.cursor()
cursor.copy_from(input_data,sep="\t",table = table_name, columns =
table_columns)
cursor.close()
connection.commit()
+ end_time = time.time()
+ return end_time - start_time
def delete_table_data(connection, table):
@@ -99,11 +123,24 @@ def delete_table_data(connection, table):
connection.commit()
+def calculate_mean_standard_deviation(numbers):
+
+ average = reduce(lambda x, y: x + y, numbers) / len(numbers)
+ variance = map(lambda x: (x - average)**2, numbers)
+ average_variance = reduce(lambda x, y: x + y, variance) / len(variance)
+ standard_deviation = math.sqrt(average_variance)
+ return average, standard_deviation
-#Main Script
+
+#Main Script
script_start_time = time.time()
+
+#Parse Command Line Arguments
+options = parse_arguments()
+
+
#General Configuration
raw_tables = map(lambda x: "RHQ_MEAS_DATA_NUM_R"+str(x).zfill(2),range(15))
raw_table_columns = ["schedule_id","time_stamp","value"]
@@ -114,14 +151,14 @@ aggregate_table_columns =
["schedule_id","time_stamp","value","minvalue","maxval
# see the estimation guideline at
https://docs.jboss.org/author/display/RHQ/Metrics+Data+Migration+-+Design
raw_metrics_per_agent = 900000
aggregate_metrics_per_agent = 700000
-number_of_agents = 10
+
# some constants to be used by the algorithm
batch_increment = 10000
data_start_time = int(time.time() - 2*604800)*1000 # now - 2 week, convert to
milliseconds
#establish the db connection, update the settings for your local environment
-connection =
psycopg2.connect(database="rhq_db",user="rhqadmin",password="rhqadmin")
+connection =
psycopg2.connect(host=options.db_host,database=options.db_database,user=options.db_user,password=options.db_password)
#Delete all data available
@@ -129,9 +166,14 @@ map(lambda x : delete_table_data(connection, x), raw_tables)
map(lambda x : delete_table_data(connection, x), aggregate_tables)
+
+metric_agent_inserting_time = []
+metric_agent_total_time = []
+
#Generate random raw and aggregate data
-for j in range(number_of_agents) :
- agent_generation_start_time = time.time()
+for j in range(options.number_of_agents) :
+ agent_start_time = time.time()
+ agent_inserting_time = 0
#generate and insert random metrics
for i in range(0,raw_metrics_per_agent,batch_increment):
@@ -142,7 +184,7 @@ for j in range(number_of_agents) :
start_of_schedule_id_sequence = j *
batch_increment, metricType = MetricType.Raw)
data_to_insert = "\n".join(map(str,map(lambda x:
"\t".join((str(s) for s in x)),raw_metrics)))
- insert_data(connection,data_to_insert,random_raw_table,raw_table_columns)
+ agent_inserting_time +=
insert_data(connection,data_to_insert,random_raw_table,raw_table_columns)
#generate and insert aggregate metrics
for i in range(0,aggregate_metrics_per_agent,batch_increment):
@@ -153,8 +195,19 @@ for j in range(number_of_agents) :
start_of_schedule_id_sequence = j *
batch_increment, metricType = MetricType.Aggregate )
data_to_insert = "\n".join(map(str,map(lambda x:
"\t".join((str(s) for s in x)),aggregate_metrics)))
-
insert_data(connection,data_to_insert,random_aggregate_table,aggregate_table_columns)
+ agent_inserting_time +=
insert_data(connection,data_to_insert,random_aggregate_table,aggregate_table_columns)
+
+ metric_agent_inserting_time.append(agent_inserting_time)
+ metric_agent_total_time.append( time.time() - agent_start_time )
- print "Data inserted for agent #",j, " - total time:", time.time()
- agent_generation_start_time, " seconds"
+ print "Data created for agent #",j, " - total time:", time.time()
- agent_start_time, " seconds"
print "Total time: ",time.time() - script_start_time, " seconds"
+
+average_inserting_time, standard_deviation_inserting_time =
calculate_mean_standard_deviation(metric_agent_inserting_time)
+print "Time spent inserting data: ", metric_agent_inserting_time, "
seconds"
+print "Average time inserting data for 1 agent:", average_inserting_time,
" seconds with a standard deviation of ", standard_deviation_inserting_time
+
+average_time, standard_deviation_time =
calculate_mean_standard_deviation(metric_agent_total_time)
+print "Time spent for each agent: ", metric_agent_total_time , "
seconds"
+print "Average time for 1 agent:", average_time, " seconds with a standard
deviation of ", standard_deviation_time