modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
| 311 ++++++----
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
| 58 +
2 files changed, 246 insertions(+), 123 deletions(-)
New commits:
commit 051044cdf6862863e2f64b76b17fbdaed911359a
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Wed Mar 6 13:42:03 2013 -0600
Avoid using named queries to speedup the processing since raw data is sufficient. Run
the migration process in a separate thread. Use the Cassandra batch directly instead of
using an array to accumulate statements before sending them to Cassandra.
diff --git
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
index 8f07098..049b177 100644
---
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
+++
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
@@ -20,7 +20,6 @@
package org.rhq.server.metrics;
-import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto;
import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl;
import java.util.ArrayList;
@@ -35,16 +34,12 @@ import javax.persistence.Query;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
-import com.datastax.driver.core.Statement;
+import com.datastax.driver.core.querybuilder.Batch;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.rhq.core.domain.measurement.MeasurementDataNumeric1D;
-import org.rhq.core.domain.measurement.MeasurementDataNumeric1H;
-import org.rhq.core.domain.measurement.MeasurementDataNumeric6H;
-import org.rhq.core.domain.measurement.MeasurementDataNumericAggregateInterface;
import org.rhq.server.metrics.domain.AggregateType;
import org.rhq.server.metrics.domain.MetricsTable;
@@ -56,10 +51,55 @@ public class DataMigrator {
private final Log log = LogFactory.getLog(DataMigrator.class);
+
private static final int MAX_RECORDS_TO_LOAD_FROM_SQL = 30000;
- private static final int MAX_RECORDS_TO_BATCH_TO_CASSANDRA = 500;
+ private static final int MAX_RAW_BATCH_TO_CASSANDRA = 100;
+ private static final int MAX_AGGREGATE_BATCH_TO_CASSANDRA = 50;
private static final int MAX_NUMBER_OF_FAILURES = 5;
+
+ private enum MigrationQuery {
+ SELECT_1H_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue
FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
+ SELECT_6H_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue
FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
+ SELECT_1D_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue
FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+
+ DELETE_1H_ALL_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
+ DELETE_6H_ALL_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
+ DELETE_1D_ALL_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+
+ DELETE_1H_ENTRY("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1H WHERE schedule_id =
?"),
+ DELETE_6H_ENTRY("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_6H WHERE schedule_id =
?"),
+ DELETE_1D_ENTRY("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1D WHERE schedule_id =
?"),
+
+ SELECT_RAW_DATA("SELECT schedule_id, time_stamp, value FROM %s"),
+ DELETE_RAW_ALL_DATA("DELETE FROM %s"),
+ DELETE_RAW_ENTRY("DELETE FROM %s WHERE schedule_id = ?");
+
+ public static final int SCHEDULE_INDEX = 0;
+ public static final int TIMESTAMP_INDEX = 1;
+ public static final int VALUE_INDEX = 2;
+ public static final int MIN_VALUE_INDEX = 3;
+ public static final int MAX_VALUE_INDEX = 4;
+
+ private String query;
+
+ private MigrationQuery(String query){
+ this.query = query;
+ }
+
+ /**
+ * @return the query
+ */
+ public String getQuery() {
+ return query;
+ }
+
+ @Override
+ public String toString() {
+ return query;
+ }
+ }
+
private final EntityManager entityManager;
private final Session session;
@@ -88,6 +128,10 @@ public class DataMigrator {
this.telemetry = false;
}
+ public void runRawDataMigration(boolean value) {
+ this.runRawDataMigration = value;
+ }
+
public void run1HAggregateDataMigration(boolean value) {
this.run1HAggregateDataMigration = value;
}
@@ -153,25 +197,48 @@ public class DataMigrator {
* @param migrator
* @throws Exception
*/
- private void retryOnFailure(CallableMigrationWorker migrator) throws Exception {
- int numberOfFailures = 0;
- Exception caughtException = null;
+ private Thread retryOnFailure(final CallableMigrationWorker migrator) throws
Exception {
+
+ RunnableWithException runnable = new RunnableWithException() {
+ private Exception exception;
- log.info(migrator.getClass());
+ @Override
+ public void run() {
+ int numberOfFailures = 0;
+ Exception caughtException = null;
- while (numberOfFailures < MAX_NUMBER_OF_FAILURES) {
- try {
- migrator.work();
- return;
- } catch (Exception e) {
- log.error("Migrator " + migrator.getClass() + " failed.
Retrying!", e);
+ log.info(migrator.getClass());
- caughtException = e;
- numberOfFailures++;
+ while (numberOfFailures < MAX_NUMBER_OF_FAILURES) {
+ try {
+ migrator.work();
+ return;
+ } catch (Exception e) {
+ log.error("Migrator " + migrator.getClass() + "
failed. Retrying!", e);
+
+ caughtException = e;
+ numberOfFailures++;
+ }
+ }
+
+ this.exception = caughtException;
}
+
+ @Override
+ public Exception getException() {
+ return this.exception;
+ }
+ };
+
+ Thread localThread = new Thread(runnable);
+ localThread.start();
+ localThread.join();
+
+ if (runnable.getException() != null) {
+ throw runnable.getException();
}
- throw caughtException;
+ return localThread;
}
/**
@@ -200,6 +267,9 @@ public class DataMigrator {
void work() throws Exception;
}
+ private interface RunnableWithException extends Runnable {
+ Exception getException();
+ }
private class AggregateDataMigrator implements CallableMigrationWorker {
@@ -210,17 +280,17 @@ public class DataMigrator {
* @param query
* @param metricsTable
*/
- public AggregateDataMigrator(MetricsTable metricsTable) {
+ public AggregateDataMigrator(MetricsTable metricsTable) throws Exception {
this.metricsTable = metricsTable;
if (MetricsTable.ONE_HOUR.equals(this.metricsTable)) {
- this.query = MeasurementDataNumeric1H.QUERY_FIND_ALL;
- } else if (MetricsTable.ONE_HOUR.equals(this.metricsTable)) {
- this.query = MeasurementDataNumeric6H.QUERY_FIND_ALL;
+ this.query = MigrationQuery.SELECT_1H_DATA.toString();
+ } else if (MetricsTable.SIX_HOUR.equals(this.metricsTable)) {
+ this.query = MigrationQuery.SELECT_6H_DATA.toString();
} else if (MetricsTable.TWENTY_FOUR_HOUR.equals(this.metricsTable)) {
- this.query = MeasurementDataNumeric1D.QUERY_FIND_ALL;
+ this.query = MigrationQuery.SELECT_1D_DATA.toString();
} else {
- this.query = null;
+ throw new Exception("MetricsTable " + metricsTable.toString() +
" not supported by this migrator.");
}
}
@@ -234,12 +304,12 @@ public class DataMigrator {
@SuppressWarnings("unchecked")
private void performedBatchedMigration() throws Exception {
- List<MeasurementDataNumericAggregateInterface> existingData;
+ List<Object[]> existingData;
while (true) {
- Query q = entityManager.createNamedQuery(query);
- q.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
- existingData = (List<MeasurementDataNumericAggregateInterface>)
q.getResultList();
+ Query nativeQuery = entityManager.createNativeQuery(query);
+ nativeQuery.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
+ existingData = nativeQuery.getResultList();
if (existingData.size() == 0) {
break;
@@ -262,15 +332,16 @@ public class DataMigrator {
@SuppressWarnings("unchecked")
private void performFullMigration() throws Exception {
- List<MeasurementDataNumericAggregateInterface> existingData = null;
+ List<Object[]> existingData = null;
int lastMigratedRecord = 0;
+ Query nativeQuery;
while (true) {
- Query q = entityManager.createNamedQuery(query);
- q.setFirstResult(lastMigratedRecord + 1);
- q.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
+ nativeQuery = entityManager.createNativeQuery(query);
+ nativeQuery.setFirstResult(lastMigratedRecord + 1);
+ nativeQuery.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
- existingData = (List<MeasurementDataNumericAggregateInterface>)
q.getResultList();
+ existingData = nativeQuery.getResultList();
if (existingData.size() == 0) {
break;
@@ -285,59 +356,64 @@ public class DataMigrator {
+ " data. Attempting to insert the current batch of data one
more time");
insertDataToCassandra(existingData);
}
+
+ log.info("- " + metricsTable + " - " +
lastMigratedRecord + " -");
}
}
- private void
insertDataToCassandra(List<MeasurementDataNumericAggregateInterface> existingData)
+ private void insertDataToCassandra(List<Object[]> existingData)
throws Exception {
- Statement statement = null;
-
List<ResultSetFuture> resultSetFutures = new
ArrayList<ResultSetFuture>();
- List<Statement> statementsAccumulator = new
ArrayList<Statement>();
+ Batch batch = QueryBuilder.batch();
+ int batchSize = 0;
//only need approximate TTL to speed up processing
//given that each batch is processed within seconds, getting the
//system time once per batch has minimal impact on the record retention
+ long creationTimeMillis;
+ long itemTTLSeconds;
long currentTimeMillis = System.currentTimeMillis();
long expectedTTLMillis = metricsTable.getTTLinMilliseconds() * 10;
- long itemTTLSeconds = 0;
-
- for (MeasurementDataNumericAggregateInterface measurement : existingData) {
- itemTTLSeconds = (expectedTTLMillis - currentTimeMillis +
measurement.getTimestamp()) / 1000l;
-
- statement = QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id", measurement.getScheduleId())
- .value("time", new Date(measurement.getTimestamp()))
- .value("type", AggregateType.MIN.ordinal())
- .value("value", measurement.getMin())
- .using(ttl((int) itemTTLSeconds));;
- statementsAccumulator.add(statement);
-
- statement = insertInto(metricsTable.toString())
- .value("schedule_id", measurement.getScheduleId())
- .value("time", new Date(measurement.getTimestamp()))
- .value("type", AggregateType.MAX.ordinal())
- .value("value", measurement.getMax())
- .using(ttl((int) itemTTLSeconds));
- statementsAccumulator.add(statement);
-
- statement =
insertInto(metricsTable.toString()).value("schedule_id",
measurement.getScheduleId())
- .value("time", new Date(measurement.getTimestamp()))
- .value("type", AggregateType.AVG.ordinal())
- .value("value",
Double.parseDouble(measurement.getValue().toString()))
- .using(ttl((int) itemTTLSeconds));
- statementsAccumulator.add(statement);
-
- if (statementsAccumulator.size() >= MAX_RECORDS_TO_BATCH_TO_CASSANDRA)
{
-
resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[])
statementsAccumulator
- .toArray(new Statement[statementsAccumulator.size()]))));
- statementsAccumulator.clear();
+
+
+ for (Object[] rawMeasurement : existingData) {
+ creationTimeMillis =
Long.parseLong(rawMeasurement[MigrationQuery.TIMESTAMP_INDEX].toString());
+ itemTTLSeconds = (expectedTTLMillis - currentTimeMillis +
creationTimeMillis) / 1000l;
+
+ if(itemTTLSeconds > 0 ){
+ batch.add(QueryBuilder.insertInto(metricsTable.toString())
+ .value("schedule_id",
rawMeasurement[MigrationQuery.SCHEDULE_INDEX])
+ .value("time", new Date(creationTimeMillis))
+ .value("type", AggregateType.AVG.ordinal())
+ .value("value",
rawMeasurement[MigrationQuery.VALUE_INDEX])
+ .using(ttl((int) itemTTLSeconds)));
+
+ batch.add(QueryBuilder.insertInto(metricsTable.toString())
+ .value("schedule_id",
rawMeasurement[MigrationQuery.SCHEDULE_INDEX])
+ .value("time", new Date(creationTimeMillis))
+ .value("type", AggregateType.MIN.ordinal())
+ .value("value",
rawMeasurement[MigrationQuery.MIN_VALUE_INDEX])
+ .using(ttl((int) itemTTLSeconds)));
+
+ batch.add(QueryBuilder.insertInto(metricsTable.toString())
+ .value("schedule_id",
rawMeasurement[MigrationQuery.SCHEDULE_INDEX])
+ .value("time", new Date(creationTimeMillis))
+ .value("type", AggregateType.MAX.ordinal())
+ .value("value",
rawMeasurement[MigrationQuery.MAX_VALUE_INDEX])
+ .using(ttl((int) itemTTLSeconds)));
+
+ batchSize += 3;
+ }
+
+ if (batchSize >= MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
+ resultSetFutures.add(session.executeAsync(batch));
+ batch = QueryBuilder.batch();
+ batchSize = 0;
}
}
- if (statementsAccumulator.size() != 0) {
-
resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[])
statementsAccumulator
- .toArray(new Statement[statementsAccumulator.size()]))));
+ if (batchSize != 0) {
+ resultSetFutures.add(session.executeAsync(batch));
}
for (ResultSetFuture future : resultSetFutures) {
@@ -366,13 +442,14 @@ public class DataMigrator {
while (!tablesNotProcessed.isEmpty()) {
String table = tablesNotProcessed.peek();
- String selectQuery = "SELECT schedule_id, time_stamp, value FROM
" + table;
- String deleteQuery = "DELETE FROM " + table + " WHERE
schedule_id = ?";
+ String selectQuery =
String.format(MigrationQuery.SELECT_RAW_DATA.toString(), table);
+ String deleteQuery =
String.format(MigrationQuery.DELETE_RAW_ENTRY.toString(), table);
+ Query nativeQuery;
while (true) {
- Query query = entityManager.createNativeQuery(selectQuery);
- query.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
- existingData = query.getResultList();
+ nativeQuery = entityManager.createNativeQuery(selectQuery);
+ nativeQuery.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
+ existingData = nativeQuery.getResultList();
if (existingData.size() == 0) {
break;
@@ -386,11 +463,10 @@ public class DataMigrator {
insertDataToCassandra(existingData);
}
- query = entityManager.createNativeQuery(deleteQuery);
-
+ nativeQuery = entityManager.createNativeQuery(deleteQuery);
for (Object[] rawDataPoint : existingData) {
- query.setParameter(0,
Integer.parseInt(rawDataPoint[0].toString()));
- query.executeUpdate();
+ nativeQuery.setParameter(0,
Integer.parseInt(rawDataPoint[0].toString()));
+ nativeQuery.executeUpdate();
}
}
@@ -405,17 +481,17 @@ public class DataMigrator {
while (!tablesNotProcessed.isEmpty()) {
String table = tablesNotProcessed.peek();
+ String selectQuery =
String.format(MigrationQuery.SELECT_RAW_DATA.toString(), table);
+ Query nativeSelectQuery = entityManager.createNativeQuery(selectQuery);
+
log.info("Start migrating raw table: " + table);
int lastMigratedRecord = 0;
-
while (true) {
- String selectQuery = "SELECT schedule_id, time_stamp, value FROM
" + table;
- Query query = entityManager.createNativeQuery(selectQuery);
- query.setFirstResult(lastMigratedRecord + 1);
- query.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
+ nativeSelectQuery.setFirstResult(lastMigratedRecord + 1);
+ nativeSelectQuery.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
- existingData = query.getResultList();
+ existingData = nativeSelectQuery.getResultList();
if (existingData.size() == 0) {
break;
@@ -431,9 +507,7 @@ public class DataMigrator {
insertDataToCassandra(existingData);
}
- if (lastMigratedRecord % MAX_RECORDS_TO_LOAD_FROM_SQL == 0) {
- log.info("------------" + lastMigratedRecord +
"---------------------");
- }
+ log.info("- " + table + " - " +
lastMigratedRecord + " -");
}
log.info("Done migrating raw table" + table +
"---------------------");
@@ -443,40 +517,40 @@ public class DataMigrator {
private void insertDataToCassandra(List<Object[]> existingData) throws
Exception {
List<ResultSetFuture> resultSetFutures = new
ArrayList<ResultSetFuture>();
- List<Statement> statementsAccumulator = new
ArrayList<Statement>();
+ Batch batch = QueryBuilder.batch();
+ int batchSize = 0;
//only need approximate TTL to speed up processing
//given that each batch is processed within seconds, getting the
//system time once per batch has minimal impact on the record retention
+ long creationTimeMillis;
+ long itemTTLSeconds;
long currentTimeMillis = System.currentTimeMillis();
long expectedTTLMillis = MetricsTable.RAW.getTTLinMilliseconds() * 10;
- long creationTimeMillis = 0;
- long itemTTLSeconds = 0;
+
for (Object[] rawDataPoint : existingData) {
- creationTimeMillis = Long.parseLong(rawDataPoint[1].toString());
+ creationTimeMillis =
Long.parseLong(rawDataPoint[MigrationQuery.TIMESTAMP_INDEX].toString());
itemTTLSeconds = (expectedTTLMillis - currentTimeMillis +
creationTimeMillis) / 1000l;
if (itemTTLSeconds > 0) {
- Statement boundStatement =
QueryBuilder.insertInto(MetricsTable.RAW.toString())
- .value("schedule_id",
Integer.parseInt(rawDataPoint[0].toString()))
+ batch.add(QueryBuilder.insertInto(MetricsTable.RAW.toString())
+ .value("schedule_id",
rawDataPoint[MigrationQuery.SCHEDULE_INDEX])
.value("time", new Date(creationTimeMillis))
- .value("value",
Double.parseDouble(rawDataPoint[2].toString()))
- .using(ttl((int) itemTTLSeconds));
-
- statementsAccumulator.add(boundStatement);
+ .value("value",
rawDataPoint[MigrationQuery.VALUE_INDEX])
+ .using(ttl((int) itemTTLSeconds)));
+ batchSize++;
}
- if (statementsAccumulator.size() >= MAX_RECORDS_TO_BATCH_TO_CASSANDRA)
{
-
resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[])
statementsAccumulator
- .toArray(new Statement[statementsAccumulator.size()]))));
- statementsAccumulator.clear();
+ if (batchSize >= MAX_RAW_BATCH_TO_CASSANDRA) {
+ resultSetFutures.add(session.executeAsync(batch));
+ batch = QueryBuilder.batch();
+ batchSize = 0;
}
}
- if (statementsAccumulator.size() != 0) {
-
resultSetFutures.add(session.executeAsync(QueryBuilder.batch((Statement[])
statementsAccumulator
- .toArray(new Statement[statementsAccumulator.size()]))));
+ if (batchSize != 0) {
+ resultSetFutures.add(session.executeAsync(batch));
}
for (ResultSetFuture future : resultSetFutures) {
@@ -489,20 +563,21 @@ public class DataMigrator {
private class DeleteAllData implements CallableMigrationWorker {
public void work() {
- Query q =
entityManager.createNamedQuery(MeasurementDataNumeric1H.QUERY_DELETE_ALL);
- q.executeUpdate();
+ Query nativeQuery =
entityManager.createNativeQuery(MigrationQuery.DELETE_1H_ALL_DATA.toString());
+ nativeQuery.executeUpdate();
- q =
entityManager.createNamedQuery(MeasurementDataNumeric6H.QUERY_DELETE_ALL);
- q.executeUpdate();
+ nativeQuery =
entityManager.createNativeQuery(MigrationQuery.DELETE_6H_ALL_DATA.toString());
+ nativeQuery.executeUpdate();
- q =
entityManager.createNamedQuery(MeasurementDataNumeric1D.QUERY_DELETE_ALL);
- q.executeUpdate();
+ nativeQuery =
entityManager.createNativeQuery(MigrationQuery.DELETE_1D_ALL_DATA.toString());
+ nativeQuery.executeUpdate();
for (String table : getRawDataTables()) {
- String deleteAllData = "DELETE FROM " + table;
- q = entityManager.createNativeQuery(deleteAllData);
- q.executeUpdate();
+ String deleteAllData =
String.format(MigrationQuery.DELETE_RAW_ALL_DATA.toString(), table);
+ nativeQuery = entityManager.createNativeQuery(deleteAllData);
+ nativeQuery.executeUpdate();
}
}
}
}
+
diff --git
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
index 37e941b..41c5969 100644
---
a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
+++
b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
@@ -44,6 +44,7 @@ import org.hibernate.ejb.Ejb3Configuration;
import org.rhq.cassandra.CassandraNode;
+
/**
* @author Stefan Negrea
*
@@ -90,18 +91,31 @@ public class DataMigratorRunner {
private String sqlDB;
private Option sqlDBOption =
OptionBuilder.withLongOpt("sql-db").hasArg().create();
+ //Migration
+ private boolean disableRaw;
+ private Option disableRawOption =
OptionBuilder.withLongOpt("disable-raw-migration").create();
+
+ private boolean disable1H;
+ private Option disable1HOption =
OptionBuilder.withLongOpt("disable-1h-migration").create();
+
+ private boolean disable6H;
+ private Option disable6HOption =
OptionBuilder.withLongOpt("disable-6h-migration").create();
+
+ private boolean disable1D;
+ private Option disable1DOption =
OptionBuilder.withLongOpt("disable-1d-migration").create();
+
/**
* @param args
* @throws ParseException
*/
public static void main(String[] args) throws Exception {
-
try{
DataMigratorRunner runner = new DataMigratorRunner();
runner.configure(args);
runner.run();
} catch (Exception e) {
System.out.println(e);
+ e.printStackTrace();
}
System.exit(0);
@@ -120,11 +134,17 @@ public class DataMigratorRunner {
options.addOption(sqlPortOption);
options.addOption(sqlDBOption);
+ options.addOption(disableRawOption);
+ options.addOption(disable1HOption);
+ options.addOption(disable6HOption);
+ options.addOption(disable1DOption);
+
CommandLineParser parser = new PosixParser();
CommandLine commandLine = parser.parse(options, args);
parseCassandraOptionsWithDefault(commandLine);
parseSQLOptionsWithDefault(commandLine);
+ parseMigrationOptionsWithDefault(commandLine);
}
private void run() throws Exception {
@@ -139,9 +159,10 @@ public class DataMigratorRunner {
DataMigrator migrator = new DataMigrator(entityManager, session);
migrator.preserveData();
- migrator.run1DAggregateDataMigration(false);
- migrator.run6HAggregateDataMigration(false);
- migrator.run1HAggregateDataMigration(false);
+ migrator.runRawDataMigration(!disableRaw);
+ migrator.run1HAggregateDataMigration(!disable1H);
+ migrator.run6HAggregateDataMigration(!disable6H);
+ migrator.run1DAggregateDataMigration(!disable1D);
migrator.migrateData();
}
@@ -240,4 +261,31 @@ public class DataMigratorRunner {
sqlDB = "rhq_db";
}
}
-}
\ No newline at end of file
+
+ private void parseMigrationOptionsWithDefault(CommandLine commandLine) {
+ if (commandLine.hasOption(disableRawOption.getLongOpt())) {
+ disableRaw = true;
+ } else {
+ disableRaw = false;
+ }
+
+ if (commandLine.hasOption(disable1HOption.getLongOpt())) {
+ disable1H = true;
+ } else {
+ disable1H = false;
+ }
+
+ if (commandLine.hasOption(disable6HOption.getLongOpt())) {
+ disable6H = true;
+ } else {
+ disable6H = false;
+ }
+
+ if (commandLine.hasOption(disable1DOption.getLongOpt())) {
+ disable1D = true;
+ } else {
+ disable1D = false;
+ }
+ }
+}
+