[rhq] Branch 'feature/cassandra-backend' - 5 commits - .classpath modules/enterprise modules/plugins
by snegrea
.classpath | 1
modules/enterprise/server/server-metrics/pom.xml | 4
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java | 705 ---------
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java | 644 --------
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java | 730 ++++++++++
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigratorRunner.java | 668 +++++++++
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataBulkExportSource.java | 124 +
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataJPABulkExportSource.java | 58
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataJPASource.java | 64
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataSource.java | 34
modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingPostgresDataBulkExportSource.java | 68
modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/DataSourceTest.java | 169 ++
modules/plugins/cassandra/pom.xml | 56
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 74 -
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java | 86 +
modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml | 11
16 files changed, 2047 insertions(+), 1449 deletions(-)
New commits:
commit 4703c030ca9d63510824bf0544aa855361049633
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue Apr 30 17:15:30 2013 -0500
Add configurable authenticator property to the plugin. The authenticator is detected on discovery and then used for CQL connections.
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
index e16fd88..0dd282e 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
@@ -30,7 +30,7 @@ import static org.rhq.core.system.OperatingSystemType.WINDOWS;
import java.io.File;
import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ProtocolOptions.Compression;
+import com.datastax.driver.core.Cluster.Builder;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.SimpleAuthInfoProvider;
@@ -73,7 +73,9 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
String host = context.getPluginConfiguration().getSimpleValue("host", "localhost");
String clusterName = context.getPluginConfiguration().getSimpleValue("clusterName", "unknown");
String username = context.getPluginConfiguration().getSimpleValue("username", "cassandra");
- String password = context.getPluginConfiguration().getSimpleValue("password", "password");
+ String password = context.getPluginConfiguration().getSimpleValue("password", "cassandra");
+ String authenticatorClassName = context.getPluginConfiguration().getSimpleValue("authenticator",
+ "org.apache.cassandra.auth.AllowAllAuthenticator");
Integer nativePort = 9042;
try {
@@ -83,16 +85,20 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
log.debug("Native transport port parsing failed...", e);
}
+
try {
- Cluster cluster = Cluster
+ Builder clusterBuilder = Cluster
.builder()
.addContactPoints(new String[] { host })
.withoutMetrics()
- .withPort(nativePort)
- .withCompression(Compression.NONE)
- .withAuthInfoProvider(new SimpleAuthInfoProvider().add("username", username).add("password", password))
- .build();
- this.cassandraSession = cluster.connect(clusterName);
+ .withPort(nativePort);
+
+ if (authenticatorClassName.endsWith("PasswordAuthenticator")) {
+ clusterBuilder = clusterBuilder.withAuthInfoProvider(new SimpleAuthInfoProvider().add("username",
+ username).add("password", password));
+ }
+
+ this.cassandraSession = clusterBuilder.build().connect(clusterName);
} catch (Exception e) {
log.error("Connect to Cassandra " + host + ":" + nativePort, e);
throw e;
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
index febddf1..afb5cd9 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
@@ -120,6 +120,10 @@ public class CassandraNodeDiscoveryComponent extends JMXDiscoveryComponent {
pluginConfig.put(new PropertySimple("nativeTransportPort", parsedProperties
.get("native_transport_port")));
}
+
+ if (parsedProperties.get("authenticator") != null) {
+ pluginConfig.put(new PropertySimple("authenticator", parsedProperties.get("authenticator")));
+ }
} catch (Exception e) {
log.error("YAML Configuration load exception ", e);
} finally {
diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
index 905d4b8..66f867d 100644
--- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
@@ -47,6 +47,12 @@
<c:simple-property name="nativeTransportPort" description="The port on which Cassandra listens for CQL client connections." default="9042" type="integer"/>
<c:simple-property name="host" description="The host on which cassandra listens to CQL client connections" default="localhost"/>
<c:simple-property name="clusterName" description="Cluster name" default="localhost"/>
+ <c:simple-property name="authenticator" required="true" default="org.apache.cassandra.auth.AllowAllAuthenticator">
+ <c:property-options>
+ <c:option name="org.apache.cassandra.auth.AllowAllAuthenticator" value="org.apache.cassandra.auth.AllowAllAuthenticator"/>
+ <c:option name="org.apache.cassandra.auth.PasswordAuthenticator" value="org.apache.cassandra.auth.PasswordAuthenticator"/>
+ </c:property-options>
+ </c:simple-property>
</plugin-configuration>
<process-scan name="CassandraDaemon" query="process|basename|match=^java.*,arg|org.apache.cassandra.service.CassandraDaemon|match=.*"/>
commit befa92e41115183f94bda35f1a50c3d2a9e67042
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue Apr 30 15:50:00 2013 -0500
Integrate the experimental bulk export for Postgres into the data migrator code. Also, move all the migrator related code to a sub-package
diff --git a/modules/enterprise/server/server-metrics/pom.xml b/modules/enterprise/server/server-metrics/pom.xml
index 5220281..42f6216 100644
--- a/modules/enterprise/server/server-metrics/pom.xml
+++ b/modules/enterprise/server/server-metrics/pom.xml
@@ -188,7 +188,7 @@
</descriptorRefs>
<archive>
<manifest>
- <mainClass>org.rhq.server.metrics.DataMigratorRunner</mainClass>
+ <mainClass>org.rhq.server.metrics.migrator.DataMigratorRunner</mainClass>
<addClasspath>true</addClasspath>
</manifest>
</archive>
@@ -293,7 +293,7 @@
</descriptorRefs>
<archive>
<manifest>
- <mainClass>org.rhq.server.metrics.DataMigratorRunner</mainClass>
+ <mainClass>org.rhq.server.metrics.migrator.DataMigratorRunner</mainClass>
<addClasspath>true</addClasspath>
</manifest>
</archive>
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
deleted file mode 100644
index 523e4d4..0000000
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigrator.java
+++ /dev/null
@@ -1,705 +0,0 @@
-/*
- * RHQ Management Platform
- * Copyright 2011, Red Hat Middleware LLC, and individual contributors
- * as indicated by the @author tags. See the copyright.txt file in the
- * distribution for a full listing of individual contributors.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-package org.rhq.server.metrics;
-
-import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Date;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Queue;
-
-import javax.persistence.EntityManager;
-import javax.persistence.Query;
-
-import com.datastax.driver.core.ResultSetFuture;
-import com.datastax.driver.core.Session;
-import com.datastax.driver.core.querybuilder.Batch;
-import com.datastax.driver.core.querybuilder.QueryBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.rhq.server.metrics.domain.AggregateType;
-import org.rhq.server.metrics.domain.MetricsTable;
-
-/**
- * @author Stefan Negrea
- *
- */
-public class DataMigrator {
-
- private final Log log = LogFactory.getLog(DataMigrator.class);
-
-
- private static final int MAX_RECORDS_TO_LOAD_FROM_SQL = 30000;
- private static final int MAX_RAW_BATCH_TO_CASSANDRA = 100;
- private static final int MAX_AGGREGATE_BATCH_TO_CASSANDRA = 50;
- private static final int MAX_NUMBER_OF_FAILURES = 5;
- private static final long NUMBER_OF_BATCHES_FOR_ESTIMATION = 4;
- private static final double UNDER_ESTIMATION_FACTOR = .10;
-
-
- private enum MigrationQuery {
- SELECT_1H_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
- SELECT_6H_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
- SELECT_1D_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
-
- DELETE_1H_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
- DELETE_6H_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
- DELETE_1D_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
-
-
- COUNT_1H_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
- COUNT_6H_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
- COUNT_1D_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
-
- COUNT_RAW("SELECT COUNT(*) FROM %s"),
- SELECT_RAW_DATA("SELECT schedule_id, time_stamp, value FROM %s"),
- DELETE_RAW_ALL_DATA("DELETE FROM %s"),
- DELETE_RAW_ENTRY("DELETE FROM %s WHERE schedule_id = ?");
-
- public static final int SCHEDULE_INDEX = 0;
- public static final int TIMESTAMP_INDEX = 1;
- public static final int VALUE_INDEX = 2;
- public static final int MIN_VALUE_INDEX = 3;
- public static final int MAX_VALUE_INDEX = 4;
-
- private String query;
-
- private MigrationQuery(String query){
- this.query = query;
- }
-
- /**
- * @return the query
- */
- public String getQuery() {
- return query;
- }
-
- @Override
- public String toString() {
- return query;
- }
- }
-
- private final EntityManager entityManager;
-
- private final Session session;
-
- private boolean deleteDataImmediatelyAfterMigration;
- private boolean deleteAllDataAtEndOfMigration;
-
- private boolean runRawDataMigration;
- private boolean run1HAggregateDataMigration;
- private boolean run6HAggregateDataMigration;
- private boolean run1DAggregateDataMigration;
-
- private long estimation;
-
- public DataMigrator(EntityManager entityManager, Session session) {
- this.entityManager = entityManager;
- this.session = session;
-
- this.deleteDataImmediatelyAfterMigration = false;
- this.deleteAllDataAtEndOfMigration = false;
- this.runRawDataMigration = true;
- this.run1HAggregateDataMigration = true;
- this.run6HAggregateDataMigration = true;
- this.run1DAggregateDataMigration = true;
- }
-
- public void runRawDataMigration(boolean value) {
- this.runRawDataMigration = value;
- }
-
- public void run1HAggregateDataMigration(boolean value) {
- this.run1HAggregateDataMigration = value;
- }
-
- public void run6HAggregateDataMigration(boolean value) {
- this.run6HAggregateDataMigration = value;
- }
-
- public void run1DAggregateDataMigration(boolean value) {
- this.run1DAggregateDataMigration = value;
- }
-
-
- public void deleteDataImmediatelyAfterMigration() {
- this.deleteDataImmediatelyAfterMigration = true;
- this.deleteAllDataAtEndOfMigration = false;
- }
-
- public void deleteAllDataAtEndOfMigration() {
- this.deleteAllDataAtEndOfMigration = true;
- this.deleteDataImmediatelyAfterMigration = false;
- }
-
- public void preserveData() {
- this.deleteAllDataAtEndOfMigration = false;
- this.deleteDataImmediatelyAfterMigration = false;
- }
-
- public long estimate() throws Exception {
- this.estimation = 0;
- if (runRawDataMigration) {
- retryOnFailure(new RawDataMigrator(), Task.Estimate);
- }
-
- if (run1HAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.ONE_HOUR), Task.Estimate);
- }
-
- if (run6HAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.SIX_HOUR), Task.Estimate);
- }
-
- if (run1DAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.TWENTY_FOUR_HOUR), Task.Estimate);
- }
-
- if (deleteAllDataAtEndOfMigration) {
- retryOnFailure(new DeleteAllData(), Task.Estimate);
- }
-
- estimation = (long) (estimation + estimation * UNDER_ESTIMATION_FACTOR);
-
- return estimation;
- }
-
- public void migrateData() throws Exception {
- if (runRawDataMigration) {
- retryOnFailure(new RawDataMigrator(), Task.Migrate);
- }
-
- if (run1HAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.ONE_HOUR), Task.Migrate);
- }
-
- if (run6HAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.SIX_HOUR), Task.Migrate);
- }
-
- if (run1DAggregateDataMigration) {
- retryOnFailure(new AggregateDataMigrator(MetricsTable.TWENTY_FOUR_HOUR), Task.Migrate);
- }
-
- if (deleteAllDataAtEndOfMigration) {
- retryOnFailure(new DeleteAllData(), Task.Migrate);
- }
- }
-
- public void deleteOldData() throws Exception {
- if (deleteAllDataAtEndOfMigration) {
- retryOnFailure(new DeleteAllData(), Task.Migrate);
- }
- }
-
- /**
- * Retries the migration {@link #MAX_NUMBER_OF_FAILURES} times before
- * failing the migration operation.
- *
- * @param migrator
- * @throws Exception
- */
- private Thread retryOnFailure(final CallableMigrationWorker migrator, final Task task)
- throws Exception {
-
- RunnableWithException runnable = new RunnableWithException() {
- private Exception exception;
-
- @Override
- public void run() {
- int numberOfFailures = 0;
- Exception caughtException = null;
-
- log.info(migrator.getClass());
-
- while (numberOfFailures < MAX_NUMBER_OF_FAILURES) {
- try {
- if (task == Task.Estimate) {
- estimation += migrator.estimate();
- } else {
- migrator.migrate();
- }
- return;
- } catch (Exception e) {
- log.error("Migrator " + migrator.getClass() + " failed. Retrying!", e);
-
- caughtException = e;
- numberOfFailures++;
- }
- }
-
- this.exception = caughtException;
- }
-
- @Override
- public Exception getException() {
- return this.exception;
- }
- };
-
- Thread localThread = new Thread(runnable);
- localThread.start();
- localThread.join();
-
- if (runnable.getException() != null) {
- throw runnable.getException();
- }
-
- return localThread;
- }
-
- /**
- * Returns a list of all the raw SQL metric tables.
- * There is no equivalent in Cassandra, all raw data is stored in a single column family.
- *
- * @return SQL raw metric tables
- */
- private String[] getRawDataTables() {
- int tableCount = 15;
- String tablePrefix = "RHQ_MEAS_DATA_NUM_R";
-
- String[] tables = new String[tableCount];
- for (int i = 0; i < tableCount; i++) {
- if (i < 10) {
- tables[i] = tablePrefix + "0" + i;
- } else {
- tables[i] = tablePrefix + i;
- }
- }
-
- return tables;
- }
-
- enum Task {
- Migrate, Estimate
- }
-
- private interface CallableMigrationWorker {
-
-
- long estimate() throws Exception;
-
- void migrate() throws Exception;
- }
-
- private interface RunnableWithException extends Runnable {
- Exception getException();
- }
-
- private class AggregateDataMigrator implements CallableMigrationWorker {
-
- private final String selectQuery;
- private final String deleteQuery;
- private final String countQuery;
- private final MetricsTable metricsTable;
-
- /**
- * @param query
- * @param metricsTable
- */
- public AggregateDataMigrator(MetricsTable metricsTable) throws Exception {
- this.metricsTable = metricsTable;
-
- if (MetricsTable.ONE_HOUR.equals(this.metricsTable)) {
- this.selectQuery = MigrationQuery.SELECT_1H_DATA.toString();
- this.deleteQuery = MigrationQuery.DELETE_1H_DATA.toString();
- this.countQuery = MigrationQuery.COUNT_1H_DATA.toString();
- } else if (MetricsTable.SIX_HOUR.equals(this.metricsTable)) {
- this.selectQuery = MigrationQuery.SELECT_6H_DATA.toString();
- this.deleteQuery = MigrationQuery.DELETE_6H_DATA.toString();
- this.countQuery = MigrationQuery.COUNT_6H_DATA.toString();
- } else if (MetricsTable.TWENTY_FOUR_HOUR.equals(this.metricsTable)) {
- this.selectQuery = MigrationQuery.SELECT_1D_DATA.toString();
- this.deleteQuery = MigrationQuery.DELETE_1D_DATA.toString();
- this.countQuery = MigrationQuery.COUNT_1D_DATA.toString();
- } else {
- throw new Exception("MetricsTable " + metricsTable.toString() + " not supported by this migrator.");
- }
- }
-
- @Override
- public long estimate() throws Exception {
- Query nativeQuery = entityManager.createNativeQuery(this.countQuery);
- long recordCount = Long.parseLong(nativeQuery.getSingleResult().toString());
- long estimatedTimeToMigrate = this.performMigration(Task.Estimate);
-
- long estimation = (recordCount / (long) MAX_RECORDS_TO_LOAD_FROM_SQL / NUMBER_OF_BATCHES_FOR_ESTIMATION)
- * estimatedTimeToMigrate;
- return estimation;
- }
-
- public void migrate() throws Exception {
- performMigration(Task.Migrate);
- if (deleteDataImmediatelyAfterMigration) {
- deleteTableData();
- }
- }
-
- private void deleteTableData() throws Exception {
- int failureCount = 0;
- while (failureCount < MAX_NUMBER_OF_FAILURES) {
- try {
- entityManager.getTransaction().begin();
- Query nativeQuery = entityManager.createNativeQuery(this.deleteQuery);
- nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
- log.info("- " + metricsTable.toString() + " - Cleaned -");
- } catch (Exception e) {
- log.error("Failed to delete " + metricsTable.toString()
- + " data. Attempting to delete data one more time...");
-
- failureCount++;
- if (failureCount == MAX_NUMBER_OF_FAILURES) {
- throw e;
- }
- }
- }
- }
-
- @SuppressWarnings("unchecked")
- private long performMigration(Task task) throws Exception {
- long migrationStartTime = System.currentTimeMillis();
- long numberOfBatchesMigrated = 0;
-
- List<Object[]> existingData;
- int failureCount;
- Query nativeQuery;
-
- int lastMigratedRecord = 0;
-
- while (true) {
- nativeQuery = entityManager.createNativeQuery(this.selectQuery);
- nativeQuery.setFirstResult(lastMigratedRecord);
- nativeQuery.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
-
- existingData = nativeQuery.getResultList();
-
- if (existingData.size() == 0) {
- break;
- }
-
- lastMigratedRecord += existingData.size();
-
- failureCount = 0;
- while (failureCount < MAX_NUMBER_OF_FAILURES) {
- try {
- insertDataToCassandra(existingData);
- break;
- } catch (Exception e) {
- log.error("Failed to insert " + metricsTable.toString()
- + " data. Attempting to insert the current batch of data one more time");
- log.error(e);
-
- failureCount++;
- if (failureCount == MAX_NUMBER_OF_FAILURES) {
- throw e;
- }
- }
- }
-
- log.info("- " + metricsTable + " - " + lastMigratedRecord + " -");
-
- numberOfBatchesMigrated++;
- if (Task.Estimate.equals(task) && numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
- break;
- }
- }
-
- return System.currentTimeMillis() - migrationStartTime;
- }
-
- private void insertDataToCassandra(List<Object[]> existingData)
- throws Exception {
- List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
- Batch batch = QueryBuilder.batch();
- int batchSize = 0;
-
- //only need approximate TTL to speed up processing
- //given that each batch is processed within seconds, getting the
- //system time once per batch has minimal impact on the record retention
- long creationTimeMillis;
- long itemTTLSeconds;
- long currentTimeMillis = System.currentTimeMillis();
- long expectedTTLMillis = metricsTable.getTTLinMilliseconds() * 10;
-
-
- for (Object[] rawMeasurement : existingData) {
- creationTimeMillis = Long.parseLong(rawMeasurement[MigrationQuery.TIMESTAMP_INDEX].toString());
- itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
-
- if(itemTTLSeconds > 0 ){
- batch.add(QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id",Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString()))
- .value("time", new Date(creationTimeMillis))
- .value("type", AggregateType.AVG.ordinal())
- .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.VALUE_INDEX].toString()))
- .using(ttl((int) itemTTLSeconds)));
-
- batch.add(QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id", Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString()))
- .value("time", new Date(creationTimeMillis))
- .value("type", AggregateType.MIN.ordinal())
- .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MIN_VALUE_INDEX].toString()))
- .using(ttl((int) itemTTLSeconds)));
-
- batch.add(QueryBuilder.insertInto(metricsTable.toString())
- .value("schedule_id", Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString()))
- .value("time", new Date(creationTimeMillis))
- .value("type", AggregateType.MAX.ordinal())
- .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MAX_VALUE_INDEX].toString()))
- .using(ttl((int) itemTTLSeconds)));
-
- batchSize += 3;
- }
-
- if (batchSize >= MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
- resultSetFutures.add(session.executeAsync(batch));
- batch = QueryBuilder.batch();
- batchSize = 0;
- }
- }
-
- if (batchSize != 0) {
- resultSetFutures.add(session.executeAsync(batch));
- }
-
- for (ResultSetFuture future : resultSetFutures) {
- future.get();
- }
- }
- }
-
-
- private class RawDataMigrator implements CallableMigrationWorker {
-
- Queue<String> tablesNotProcessed = new LinkedList<String>(Arrays.asList(getRawDataTables()));
-
- public long estimate() throws Exception {
- long recordCount = 0;
- for (String table : getRawDataTables()) {
- String countQuery = String.format(MigrationQuery.COUNT_RAW.toString(), table);
- Query nativeQuery = entityManager.createNativeQuery(countQuery);
- recordCount += Long.parseLong(nativeQuery.getSingleResult().toString());
- }
-
- long estimatedTimeToMigrate = this.performMigration(Task.Estimate);
- long estimation = (recordCount / (long) MAX_RECORDS_TO_LOAD_FROM_SQL / NUMBER_OF_BATCHES_FOR_ESTIMATION)
- * estimatedTimeToMigrate;
- return estimation;
- }
-
- public void migrate() throws Exception {
- performMigration(Task.Migrate);
- }
-
- @SuppressWarnings("unchecked")
- private long performMigration(Task task) throws Exception {
- long migrationStartTime = System.currentTimeMillis();
- long numberOfBatchesMigrated = 0;
-
- List<Object[]> existingData;
- int failureCount;
-
- while (!tablesNotProcessed.isEmpty()) {
- String table = tablesNotProcessed.peek();
-
- String selectQuery = String.format(MigrationQuery.SELECT_RAW_DATA.toString(), table);
- Query nativeSelectQuery = entityManager.createNativeQuery(selectQuery);
-
- log.info("Start migrating raw table: " + table);
-
- int lastMigratedRecord = 0;
- while (true) {
- nativeSelectQuery.setFirstResult(lastMigratedRecord);
- nativeSelectQuery.setMaxResults(MAX_RECORDS_TO_LOAD_FROM_SQL);
-
- existingData = nativeSelectQuery.getResultList();
-
- if (existingData.size() == 0) {
- break;
- }
-
- lastMigratedRecord += existingData.size();
-
- failureCount = 0;
- while (failureCount < MAX_NUMBER_OF_FAILURES) {
- try {
- insertDataToCassandra(existingData);
- break;
- } catch (Exception e) {
- log.error("Failed to insert " + MetricsTable.RAW.toString()
- + " data. Attempting to insert the current batch of data one more time");
- log.error(e);
-
-
- failureCount++;
- if (failureCount == MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
- throw e;
- }
- }
- }
-
- log.info("- " + table + " - " + lastMigratedRecord + " -");
-
- numberOfBatchesMigrated++;
- if (Task.Estimate.equals(task) && numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
- break;
- }
- }
-
- if (Task.Migrate.equals(task)) {
- log.info("Done migrating raw table" + table + "---------------------");
-
- if (deleteDataImmediatelyAfterMigration) {
- deleteTableData(table);
- }
- } else if (numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
- break;
- }
-
- tablesNotProcessed.poll();
- }
-
- return System.currentTimeMillis() - migrationStartTime;
- }
-
- private void deleteTableData(String table) throws Exception {
- String deleteQuery = String.format(MigrationQuery.DELETE_RAW_ENTRY.toString(), table);
- int failureCount = 0;
- while (failureCount < MAX_NUMBER_OF_FAILURES) {
- try {
- entityManager.getTransaction().begin();
- Query nativeQuery = entityManager.createNativeQuery(deleteQuery);
- nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
- log.info("- " + table + " - Cleaned -");
- } catch (Exception e) {
- log.error("Failed to delete " + table + " data. Attempting to delete data one more time...");
-
- failureCount++;
- if (failureCount == MAX_NUMBER_OF_FAILURES) {
- throw e;
- }
- }
- }
- }
-
- private void insertDataToCassandra(List<Object[]> existingData) throws Exception {
- List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
- Batch batch = QueryBuilder.batch();
- int batchSize = 0;
-
- //only need approximate TTL to speed up processing
- //given that each batch is processed within seconds, getting the
- //system time once per batch has minimal impact on the record retention
- long creationTimeMillis;
- long itemTTLSeconds;
- long currentTimeMillis = System.currentTimeMillis();
- long expectedTTLMillis = MetricsTable.RAW.getTTLinMilliseconds() * 10;
-
-
- for (Object[] rawDataPoint : existingData) {
- creationTimeMillis = Long.parseLong(rawDataPoint[MigrationQuery.TIMESTAMP_INDEX].toString());
- itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
-
- if (itemTTLSeconds > 0) {
- batch.add(QueryBuilder.insertInto(MetricsTable.RAW.toString())
- .value("schedule_id", Integer.parseInt(rawDataPoint[MigrationQuery.SCHEDULE_INDEX].toString()))
- .value("time", new Date(creationTimeMillis))
- .value("value", Double.parseDouble(rawDataPoint[MigrationQuery.VALUE_INDEX].toString()))
- .using(ttl((int) itemTTLSeconds)));
- batchSize++;
- }
-
- if (batchSize >= MAX_RAW_BATCH_TO_CASSANDRA) {
- resultSetFutures.add(session.executeAsync(batch));
- batch = QueryBuilder.batch();
- batchSize = 0;
- }
- }
-
- if (batchSize != 0) {
- resultSetFutures.add(session.executeAsync(batch));
- }
-
- for (ResultSetFuture future : resultSetFutures) {
- future.get();
- }
- }
- }
-
-
- private class DeleteAllData implements CallableMigrationWorker {
-
- public void migrate() {
- Query nativeQuery;
-
- if (run1HAggregateDataMigration) {
- entityManager.getTransaction().begin();
- nativeQuery = entityManager.createNativeQuery(MigrationQuery.DELETE_1H_DATA.toString());
- nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
- log.info("- RHQ_MEASUREMENT_DATA_NUM_1H - Cleaned -");
- }
-
- if (run6HAggregateDataMigration) {
- entityManager.getTransaction().begin();
- nativeQuery = entityManager.createNativeQuery(MigrationQuery.DELETE_6H_DATA.toString());
- nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
- log.info("- RHQ_MEASUREMENT_DATA_NUM_6H - Cleaned -");
- }
-
- if (run1DAggregateDataMigration) {
- entityManager.getTransaction().begin();
- nativeQuery = entityManager.createNativeQuery(MigrationQuery.DELETE_1D_DATA.toString());
- nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
- log.info("- RHQ_MEASUREMENT_DATA_NUM_1D - Cleaned -");
- }
-
- if (runRawDataMigration) {
- for (String table : getRawDataTables()) {
- entityManager.getTransaction().begin();
- String deleteAllData = String.format(MigrationQuery.DELETE_RAW_ALL_DATA.toString(), table);
- nativeQuery = entityManager.createNativeQuery(deleteAllData);
- nativeQuery.executeUpdate();
- entityManager.getTransaction().commit();
- log.info("- " + table + " - Cleaned -");
- }
- }
- }
-
- @Override
- public long estimate() throws Exception {
- return 300000; // return return 5 minutes for now without any database side checks.
- }
- }
-}
-
-
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
deleted file mode 100644
index dc3aaf4..0000000
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/DataMigratorRunner.java
+++ /dev/null
@@ -1,644 +0,0 @@
-/*
- * RHQ Management Platform
- * Copyright 2011, Red Hat Middleware LLC, and individual contributors
- * as indicated by the @author tags. See the copyright.txt file in the
- * distribution for a full listing of individual contributors.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-package org.rhq.server.metrics;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-
-import javax.persistence.EntityManager;
-import javax.persistence.EntityManagerFactory;
-
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ProtocolOptions.Compression;
-import com.datastax.driver.core.Session;
-import com.datastax.driver.core.SimpleAuthInfoProvider;
-import com.datastax.driver.core.exceptions.NoHostAvailableException;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.OptionBuilder;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import org.hibernate.ejb.Ejb3Configuration;
-
-
-/**
- * @author Stefan Negrea
- *
- * Only postgres is supported by the runner, however the data migrator itself can run
- * with any database.
- *
- * Maven command to run this from the command line:
- *
- * mvn install -DskipTests exec:java -Dexec.mainClass="org.rhq.server.metrics.DataMigratorRunner"
- *
- *
- */
-@SuppressWarnings({ "static-access", "deprecation" })
-public class DataMigratorRunner {
-
- private final Log log = LogFactory.getLog(DataMigratorRunner.class);
-
- //Cassandra
- private Option cassandraUserOption = OptionBuilder.withLongOpt("cassandra-user").hasArg().withType(String.class)
- .withDescription("Cassandra user (default: rhqadmin)").create();
- private Option cassandraPasswordOption = OptionBuilder.withLongOpt("cassandra-password").hasArg()
- .withDescription("Cassandra password (default: rhqadmin)").withType(String.class).create();
- private Option cassandraHostsOption = OptionBuilder.withLongOpt("cassandra-hosts").hasArg().withType(String.class)
- .withDescription("Cassandra hosts, format host_ip_1,host_ip_2,... (default: 127.0.0.1")
- .create();
- private Option cassandraPortOption = OptionBuilder.withLongOpt("cassandra-port").hasArg().withType(Integer.class)
- .withDescription("Cassandra native binary protocol port (default: 9142)").create();
- private Option cassandraCompressionOption = OptionBuilder.withLongOpt("cassandra-compression").hasOptionalArg()
- .withType(String.class).withDescription("Enable compression for communication with Cassandra (default: true)")
- .create();
-
- //SQL
- private Option sqlUserOption = OptionBuilder.withLongOpt("sql-user").hasArg().withType(String.class)
- .withDescription("SQL server user (default: rhqadmin)").create();
- private Option sqlPasswordOption = OptionBuilder.withLongOpt("sql-password").hasArg().withType(String.class)
- .withDescription("SQL server password (default: rhqadmin)").create();
- private Option sqlHostOption = OptionBuilder.withLongOpt("sql-host").hasArg().withType(String.class)
- .withDescription("SQL server host address (default: localhost)").create();
- private Option sqlPortOption = OptionBuilder.withLongOpt("sql-port").hasArg().withType(String.class)
- .withDescription("SQL server port (default: 5432)").create();
- private Option sqlDBOption = OptionBuilder.withLongOpt("sql-db").hasArg().withType(String.class)
- .withDescription("SQL database (default: rhq)").create();
-
- private Option sqlServerType = OptionBuilder.withLongOpt("sql-server-type").hasArg().withType(String.class)
- .withDescription("SQL server type, only postgres and oracle are supported (default: postgres)").create();
- private Option sqlPostgresServer = OptionBuilder.withLongOpt("sql-server-postgres").hasOptionalArg()
- .withType(Boolean.class).withDescription("Postgres SQL server.").create();
- private Option sqlOracleServer = OptionBuilder.withLongOpt("sql-server-oracle").hasOptionalArg()
- .withType(Boolean.class)
- .withDescription("Oracle SQL server.").create();
-
- //Migration
- private Option disableRawOption = OptionBuilder.withLongOpt("disable-raw-migration").hasOptionalArg().withType(Boolean.class)
- .withDescription("Disable raw table migration (default: false)").create();
- private Option disable1HOption = OptionBuilder.withLongOpt("disable-1h-migration").hasOptionalArg().withType(Boolean.class)
- .withDescription("Disable 1 hour aggregates table migration (default: false)").create();
- private Option disable6HOption = OptionBuilder.withLongOpt("disable-6h-migration").hasOptionalArg().withType(Boolean.class)
- .withDescription("Disable 6 hours aggregates table migration (default: false)").create();
- private Option disable1DOption = OptionBuilder.withLongOpt("disable-1d-migration").hasOptionalArg().withType(Boolean.class)
- .withDescription("Disable 24 hours aggregates table migration (default: false)").create();
- private Option preserveDataOption = OptionBuilder.withLongOpt("preserve-data").hasOptionalArg().withType(Boolean.class)
- .withDescription("Preserve SQL data post migration (default: true)").create();
- private Option deleteDataOption = OptionBuilder.withLongOpt("delete-data").hasOptionalArg().withType(Boolean.class)
- .withDescription("Delete SQL data at the end of migration (default: false)").create();
- private Option estimateOnlyOption = OptionBuilder.withLongOpt("estimate-only").hasOptionalArg().withType(Boolean.class)
- .withDescription("Only estimate how long the migration will take (default: false)").create();
- private Option deleteOnlyOption = OptionBuilder.withLongOpt("delete-only").hasOptionalArg().withType(Boolean.class)
- .withDescription("Only delete data from the old SQL server, no migration will be performed (default: false)")
- .create();
-
- //Runner
- private Option helpOption = OptionBuilder.withLongOpt("help").create("h");
- private Option debugLogOption = OptionBuilder.withLongOpt("debugLog")
- .withDescription("Enable debug level logs for the communication with Cassandra and SQL Server (default: false)")
- .create("X");
- private Option configFileOption = OptionBuilder.withLongOpt("config-file").hasArg()
- .withDescription("Configuration file. All the command line options can be set in a typical properties file. " +
- "Command line arguments take precedence over default and configuration file options.")
- .create();
-
- private Map<Object, Object> configuration = new HashMap<Object, Object>();
- private Options options;
-
- /**
- * @param args
- * @throws ParseException
- */
- public static void main(String[] args) throws Exception {
- initLogging();
- try{
- DataMigratorRunner runner = new DataMigratorRunner();
- runner.configure(args);
- runner.run();
- } catch (HelpRequestedException h) {
- //do nothing
- } catch (Exception e) {
- e.printStackTrace();
- System.exit(1);
- }
-
- System.exit(0);
- }
-
- private static void initLogging() {
- Logger root = Logger.getRootLogger();
- if (!root.getAllAppenders().hasMoreElements()) {
- root.addAppender(new ConsoleAppender(new PatternLayout(PatternLayout.TTCC_CONVERSION_PATTERN)));
- setLogLevel(Level.ERROR);
- }
- }
-
- private static void setLogLevel(Level level) {
- Logger root = Logger.getRootLogger();
- root.setLevel(level);
-
- Logger cassandraLogging = root.getLoggerRepository().getLogger("log4j.logger.org.apache.cassandra.cql.jdbc");
- cassandraLogging.setLevel(level);
-
- Logger cassandraDriverLogging = root.getLoggerRepository().getLogger("com.datastax.driver");
- cassandraDriverLogging.setLevel(level);
-
- Logger hibernateLogging = root.getLoggerRepository().getLogger("org.hibernate");
- hibernateLogging.setLevel(Level.ERROR);
-
- Logger migratorLogging = root.getLoggerRepository().getLogger("org.rhq");
- if (Level.DEBUG.equals(level)) {
- migratorLogging.setLevel(Level.ALL);
- } else {
- migratorLogging.setLevel(level);
- }
- }
-
- private void configure(String args[]) throws Exception {
- options = new Options();
-
- options.addOption(cassandraUserOption);
- options.addOption(cassandraPasswordOption);
- options.addOption(cassandraHostsOption);
- options.addOption(cassandraPortOption);
- options.addOption(cassandraCompressionOption);
-
- options.addOption(sqlUserOption);
- options.addOption(sqlPasswordOption);
- options.addOption(sqlHostOption);
- options.addOption(sqlPortOption);
- options.addOption(sqlDBOption);
- options.addOption(sqlServerType);
- options.addOption(sqlPostgresServer);
- options.addOption(sqlOracleServer);
-
- options.addOption(disableRawOption);
- options.addOption(disable1HOption);
- options.addOption(disable6HOption);
- options.addOption(disable1DOption);
- options.addOption(preserveDataOption);
- options.addOption(deleteDataOption);
- options.addOption(estimateOnlyOption);
- options.addOption(deleteOnlyOption);
-
- options.addOption(helpOption);
- options.addOption(debugLogOption);
- options.addOption(configFileOption);
-
- CommandLine commandLine;
- try {
- CommandLineParser parser = new PosixParser();
- commandLine = parser.parse(options, args);
- } catch (Exception e) {
- HelpFormatter formatter = new HelpFormatter();
- formatter.setWidth(120);
- formatter.printHelp("DataMigrationRunner", options);
- throw new Exception("Error parsing command line arguments");
- }
-
- if (commandLine.hasOption(helpOption.getLongOpt()) || commandLine.hasOption(helpOption.getOpt())) {
- HelpFormatter formatter = new HelpFormatter();
- formatter.printHelp("DataMigrationRunner", options);
- throw new HelpRequestedException();
- }
-
- if (commandLine.hasOption(debugLogOption.getLongOpt()) || commandLine.hasOption(debugLogOption.getOpt())) {
- DataMigratorRunner.setLogLevel(Level.DEBUG);
- }
-
- loadDefaultConfiguration();
- if (commandLine.hasOption(configFileOption.getLongOpt())) {
- loadConfigFile(commandLine.getOptionValue(configFileOption.getLongOpt()));
- }
-
- parseCassandraOptions(commandLine);
- parseSQLOptions(commandLine);
- parseMigrationOptions(commandLine);
- }
-
- /**
- * Add default configuration options to the configuration store.
- */
- private void loadDefaultConfiguration() {
- //default Cassandra configuration
- configuration.put(cassandraUserOption, "rhqadmin");
- configuration.put(cassandraPasswordOption, "rhqadmin");
- configuration.put(cassandraHostsOption, new String[] { "127.0.0.1" });
- configuration.put(cassandraPortOption, 9042);
- configuration.put(cassandraCompressionOption, true);
-
- //default SQL configuration
- configuration.put(sqlUserOption, "rhqadmin");
- configuration.put(sqlPasswordOption, "rhqadmin");
- configuration.put(sqlHostOption, "localhost");
- configuration.put(sqlPortOption, "5432");
- configuration.put(sqlDBOption, "rhq");
- configuration.put(sqlServerType, "postgres");
-
- //default runner options
- configuration.put(disableRawOption, false);
- configuration.put(disable1HOption, false);
- configuration.put(disable6HOption, false);
- configuration.put(disable1DOption, false);
- configuration.put(preserveDataOption, true);
- configuration.put(estimateOnlyOption, false);
- configuration.put(deleteOnlyOption, false);
- }
-
- /**
- * Load the configuration options from file and overlay them on top of the default
- * options.
- *
- * @param file config file
- */
- private void loadConfigFile(String file) {
- try {
- File configFile = new File(file);
- if (!configFile.exists()) {
- throw new FileNotFoundException("Configuration file not found!");
- }
-
- Properties configProperties = new Properties();
- FileInputStream stream = new FileInputStream(configFile);
- configProperties.load(stream);
- stream.close();
-
- for (Object optionObject : options.getOptions()) {
- Option option = (Option) optionObject;
- Object optionValue;
-
- if ((optionValue = configProperties.get(option.getLongOpt())) != null) {
- log.debug("Configuration option loaded: " + option.getLongOpt() + " (" + option.getType() + ") -> "
- + optionValue);
-
- if (option.equals(cassandraHostsOption)) {
- String[] cassandraHosts = parseCassandraHosts(optionValue.toString());
- configuration.put(option, cassandraHosts);
- } else if (option.equals(sqlServerType)) {
- if ("oracle".equals(optionValue)) {
- configuration.put(option, "oracle");
- } else {
- configuration.put(option, "postgres");
- }
- } else if (option.equals(sqlPostgresServer)) {
- boolean value = tryParseBoolean(optionValue.toString(), true);
- if (value == true) {
- configuration.put(sqlServerType, "postgres");
- }
- } else if (option.equals(sqlOracleServer)) {
- boolean value = tryParseBoolean(optionValue.toString(), true);
- if (value == true) {
- configuration.put(sqlServerType, "oracle");
- }
- } else if (option.getType().equals(Boolean.class)) {
- configuration.put(option, tryParseBoolean(optionValue.toString(), true));
- } else if (option.getType().equals(Integer.class)) {
- configuration.put(option, tryParseInteger(optionValue.toString(), 0));
- } else {
- configuration.put(option, optionValue.toString());
- }
- }
- }
- } catch (Exception e) {
- log.error("Unable to load or process the configuration file.", e);
- System.exit(1);
- }
-
- log.debug(configuration.toString());
- }
-
- /**
- * Parse command line options for Cassandra.
- *
- * @param commandLine command line
- * @throws NoHostAvailableException
- */
- private void parseCassandraOptions(CommandLine commandLine) throws Exception {
- if (commandLine.hasOption(cassandraUserOption.getLongOpt())) {
- configuration.put(cassandraUserOption, commandLine.getOptionValue(cassandraUserOption.getLongOpt()));
- }
-
- if (commandLine.hasOption(cassandraPasswordOption.getLongOpt())) {
- configuration
- .put(cassandraPasswordOption, commandLine.getOptionValue(cassandraPasswordOption.getLongOpt()));
- }
-
- if (commandLine.hasOption(cassandraHostsOption.getLongOpt())) {
- String[] cassandraHosts = parseCassandraHosts(commandLine.getOptionValue(cassandraHostsOption.getLongOpt()));
- configuration.put(cassandraHostsOption, cassandraHosts);
- }
-
- if (commandLine.hasOption(cassandraPortOption.getLongOpt())) {
- Integer cassandraPort = tryParseInteger(commandLine.getOptionValue(cassandraPortOption.getLongOpt()), 9142);
- configuration.put(cassandraPortOption, cassandraPort);
- }
-
- if (commandLine.hasOption(cassandraCompressionOption.getLongOpt())) {
- boolean value = tryParseBoolean(commandLine.getOptionValue(disableRawOption.getLongOpt()), true);
- configuration.put(cassandraCompressionOption, value);
- }
- }
-
- /**
- * Parse command line options for SQL.
- *
- * @param commandLine command line
- * @throws NoHostAvailableException
- */
- private void parseSQLOptions(CommandLine commandLine) throws NoHostAvailableException {
- if (commandLine.hasOption(sqlUserOption.getLongOpt())) {
- configuration.put(sqlUserOption, commandLine.getOptionValue(sqlUserOption.getLongOpt()));
- }
-
- if (commandLine.hasOption(sqlPasswordOption.getLongOpt())) {
- configuration.put(sqlPasswordOption, commandLine.getOptionValue(sqlPasswordOption.getLongOpt()));
- }
-
- if (commandLine.hasOption(sqlHostOption.getLongOpt())) {
- configuration.put(sqlHostOption, commandLine.getOptionValue(sqlHostOption.getLongOpt()));
- }
-
- if (commandLine.hasOption(sqlPortOption.getLongOpt())) {
- configuration.put(sqlPortOption, commandLine.getOptionValue(sqlPortOption.getLongOpt()));
- }
-
- if (commandLine.hasOption(sqlDBOption.getLongOpt())) {
- configuration.put(sqlDBOption, commandLine.getOptionValue(sqlDBOption.getLongOpt()));
- }
-
- if (commandLine.hasOption(sqlServerType.getLongOpt())) {
- if ("oracle".equals(commandLine.getOptionValue(sqlServerType.getLongOpt()))) {
- configuration.put(sqlServerType, "oracle");
- } else {
- configuration.put(sqlServerType, "postgres");
- }
- } else if (commandLine.hasOption(sqlPostgresServer.getLongOpt())) {
- configuration.put(sqlServerType, "postgres");
- } else if (commandLine.hasOption(sqlOracleServer.getLongOpt())) {
- configuration.put(sqlServerType, "oracle");
- }
- }
-
- /**
- * Parse command line options for the actual migration progress.
- *
- * @param commandLine
- */
- private void parseMigrationOptions(CommandLine commandLine) {
- boolean value;
-
- if (commandLine.hasOption(disableRawOption.getLongOpt())) {
- value = tryParseBoolean(commandLine.getOptionValue(disableRawOption.getLongOpt()), true);
- configuration.put(disableRawOption, value);
- }
-
- if (commandLine.hasOption(disable1HOption.getLongOpt())) {
- value = tryParseBoolean(commandLine.getOptionValue(disable1HOption.getLongOpt()), true);
- configuration.put(disable1HOption, value);
- }
-
- if (commandLine.hasOption(disable6HOption.getLongOpt())) {
- value = tryParseBoolean(commandLine.getOptionValue(disable6HOption.getLongOpt()), true);
- configuration.put(disable6HOption, value);
- }
-
- if (commandLine.hasOption(disable1DOption.getLongOpt())) {
- value = tryParseBoolean(commandLine.getOptionValue(disable1DOption.getLongOpt()), true);
- configuration.put(disable1DOption, value);
- }
-
- if (commandLine.hasOption(preserveDataOption.getLongOpt())) {
- value = tryParseBoolean(commandLine.getOptionValue(preserveDataOption.getLongOpt()), true);
- configuration.put(preserveDataOption, value);
- } else if (commandLine.hasOption(deleteDataOption.getLongOpt())) {
- value = tryParseBoolean(commandLine.getOptionValue(deleteDataOption.getLongOpt()), true);
- configuration.put(preserveDataOption, value);
- }
-
- if (commandLine.hasOption(estimateOnlyOption.getLongOpt())) {
- value = tryParseBoolean(commandLine.getOptionValue(estimateOnlyOption.getLongOpt()), true);
- configuration.put(estimateOnlyOption, true);
- }
- }
-
- private void run() throws Exception {
- log.debug("Creating Entity Manager");
- EntityManager entityManager = this.createEntityManager();
- log.debug("Done creating Entity Manager");
-
- log.debug("Creating Cassandra session");
- Session session = this.createCassandraSession();
- log.debug("Done creating Cassandra session");
-
- DataMigrator migrator = new DataMigrator(entityManager, session);
-
- if (!(Boolean) configuration.get(deleteOnlyOption)) {
- if ((Boolean) configuration.get(preserveDataOption)) {
- migrator.preserveData();
- } else {
- migrator.deleteAllDataAtEndOfMigration();
- }
-
- migrator.runRawDataMigration(!(Boolean) configuration.get(disableRawOption));
- migrator.run1HAggregateDataMigration(!(Boolean) configuration.get(disable1HOption));
- migrator.run6HAggregateDataMigration(!(Boolean) configuration.get(disable6HOption));
- migrator.run1DAggregateDataMigration(!(Boolean) configuration.get(disable1DOption));
-
- System.out.println("Estimation process - starting\n");
- long estimate = migrator.estimate();
- System.out.println("The migration process will take approximately: "
- + TimeUnit.MILLISECONDS.toMinutes(estimate) + " minutes (or " + estimate + " milliseconds)\n");
- System.out.println("Estimation process - ended\n\n");
-
- if (!(Boolean) configuration.get(estimateOnlyOption)) {
- System.out.println("Migration process - starting\n");
- long startTime = System.currentTimeMillis();
- migrator.migrateData();
- long duration = System.currentTimeMillis() - startTime;
- System.out.println("The migration process took: " + TimeUnit.MILLISECONDS.toMinutes(duration)
- + " minutes (or " + duration + " milliseconds)\n");
- System.out.println("Migration process - ended\n");
- }
- } else {
- migrator.deleteAllDataAtEndOfMigration();
- migrator.runRawDataMigration(true);
- migrator.run1HAggregateDataMigration(true);
- migrator.run6HAggregateDataMigration(true);
- migrator.run1DAggregateDataMigration(true);
-
- System.out.println("Estimation process - starting\n");
- long estimate = migrator.estimate();
- System.out.println("The deletion of old data will take approximately: "
- + TimeUnit.MILLISECONDS.toMinutes(estimate) + " minutes (or " + estimate + " milliseconds)\n");
- System.out.println("Estimation process - ended\n\n");
-
- if (!(Boolean) configuration.get(estimateOnlyOption)) {
- System.out.println("Old data deletion process - starting\n");
- long startTime = System.currentTimeMillis();
- migrator.deleteOldData();
- long duration = System.currentTimeMillis() - startTime;
- System.out.println("The deletion process took: " + TimeUnit.MILLISECONDS.toMinutes(duration)
- + " minutes (or " + duration + " milliseconds)\n");
- System.out.println("Old data deletion process - ended\n");
- }
- }
- }
-
- /**
- * Create a Cassandra session based on configuration options.
- *
- * @return Cassandra session
- * @throws Exception
- */
- private Session createCassandraSession() throws Exception {
- Compression selectedCompression = Compression.NONE;
- if ((Boolean) configuration.get(cassandraCompressionOption)) {
- selectedCompression = Compression.SNAPPY;
- }
-
- Cluster cluster = Cluster
- .builder()
- .addContactPoints((String[]) configuration.get(cassandraHostsOption))
- .withPort((Integer) configuration.get(cassandraPortOption))
- .withCompression(selectedCompression)
- .withoutMetrics()
- .withAuthInfoProvider(
- new SimpleAuthInfoProvider().add("username", (String) configuration.get(cassandraUserOption)).add(
- "password", (String) configuration.get(cassandraPasswordOption))).build();
-
- return cluster.connect("rhq");
- }
-
- /**
- * Create a hibernate session to the SQL server.
- *
- * @return
- * @throws Exception
- */
- private EntityManager createEntityManager() throws Exception {
- Properties properties = new Properties();
- properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
- properties.put("hibernate.connection.username", (String) configuration.get(sqlUserOption));
- properties.put("hibernate.connection.password", (String) configuration.get(sqlPasswordOption));
-
- if ("oracle".equals(configuration.get(sqlServerType))) {
- String driverClassName = "oracle.jdbc.driver.OracleDriver";
-
- try {
- //Required to preload the driver manually.
- //Without this the driver load will fail due to the packaging.
- Class.forName(driverClassName);
- } catch (ClassNotFoundException e) {
- log.debug(e);
- throw new Exception("Oracle SQL Driver class could not be loaded. Missing class: " + driverClassName);
- }
-
- properties.put("hibernate.dialect", "org.hibernate.dialect.Oracle10gDialect");
- properties.put("hibernate.driver_class", driverClassName);
- properties.put("hibernate.connection.url", "jdbc:oracle:thin:@" + (String) configuration.get(sqlHostOption)
- + ":" + (String) configuration.get(sqlPortOption) + ":" + (String) configuration.get(sqlDBOption));
- properties.put("hibernate.default_schema", (String) configuration.get(sqlDBOption));
- } else {
- String driverClassName = "org.postgresql.Driver";
-
- try {
- //Required to preload the driver manually.
- //Without this the driver load will fail due to the packaging.
- Class.forName(driverClassName);
- } catch (ClassNotFoundException e) {
- log.debug(e);
- throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
- }
-
- properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
- properties.put("hibernate.driver_class", driverClassName);
- properties.put("hibernate.connection.url", "jdbc:postgresql://" + (String) configuration.get(sqlHostOption)
- + ":" + (String) configuration.get(sqlPortOption) + "/" + (String) configuration.get(sqlDBOption));
- }
-
- log.debug("Creating entity manager with the following configuration:");
- log.debug(properties);
-
- Ejb3Configuration configuration = new Ejb3Configuration();
- configuration.setProperties(properties);
- EntityManagerFactory factory = configuration.buildEntityManagerFactory();
- return factory.createEntityManager();
- }
-
- /**
- * Parse Cassandra host information submitted in the form:
- * host_addres,thrift_port,native_port|host_address_2,thrift_port,native_port
- *
- * @param stringValue
- * @return
- */
- private String[] parseCassandraHosts(String stringValue) {
- String[] seeds = stringValue.split(",");
- return seeds;
- }
-
- /**
- * @param value object value to parse
- * @param defaultValue default value
- * @return
- */
- private boolean tryParseBoolean(Object value, boolean defaultValue) {
- try {
- return Boolean.parseBoolean(value.toString());
- } catch (Exception e) {
- return defaultValue;
- }
- }
-
- /**
- * @param value object value to parse
- * @param defaultValue default value
- * @return
- */
- private Integer tryParseInteger(Object value, int defaultValue) {
- try {
- return Integer.parseInt(value.toString());
- } catch (Exception e) {
- return defaultValue;
- }
- }
-
- @SuppressWarnings("serial")
- private class HelpRequestedException extends Exception {
- public HelpRequestedException() {
- super("Help Requested");
- }
- }
-}
\ No newline at end of file
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataBulkExportSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataBulkExportSource.java
deleted file mode 100644
index d817002..0000000
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataBulkExportSource.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * RHQ Management Platform
- * Copyright (C) 2005-2013 Red Hat, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
- */
-
-package org.rhq.server.metrics;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.StringTokenizer;
-
-import javax.persistence.EntityManager;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.hibernate.Session;
-import org.hibernate.engine.spi.SessionFactoryImplementor;
-import org.hibernate.service.jdbc.connections.spi.ConnectionProvider;
-
-import org.rhq.core.util.stream.StreamUtil;
-
-/**
- * @author Thomas Segismont
- */
-public abstract class ExistingDataBulkExportSource implements ExistingDataSource {
-
- private static final Log LOG = LogFactory.getLog(ExistingDataBulkExportSource.class);
-
- protected static final int IO_BUFFER_SIZE = 1024 * 64;
-
- protected static final String DELIMITER = "|";
-
- private EntityManager entityManager;
-
- private String selectNativeQuery;
-
- private File workDirectory;
-
- private String fileName;
-
- private File existingDataFile;
-
- private BufferedReader existingDataFileReader;
-
- private int currentIndex;
-
- public ExistingDataBulkExportSource(EntityManager entityManager, String selectNativeQuery, File workDirectory,
- String fileName) {
- this.entityManager = entityManager;
- this.selectNativeQuery = selectNativeQuery;
- this.workDirectory = workDirectory;
- this.fileName = fileName;
- existingDataFile = new File(workDirectory, fileName);
- }
-
- protected String getSelectNativeQuery() {
- return selectNativeQuery;
- }
-
- protected File getExistingDataFile() {
- return existingDataFile;
- }
-
- protected Connection getConnection() throws SQLException {
- Session session = (Session) entityManager.getDelegate();
- SessionFactoryImplementor sfi = (SessionFactoryImplementor) session.getSessionFactory();
- ConnectionProvider cp = sfi.getConnectionProvider();
- return cp.getConnection();
- }
-
- public abstract void exportExistingData() throws Exception;
-
- public void startReading() throws Exception {
- if (!existingDataFile.exists() && !existingDataFile.isFile() && !existingDataFile.canRead()) {
- throw new IllegalStateException();
- }
- existingDataFileReader = new BufferedReader(new FileReader(existingDataFile));
- currentIndex = 0;
- }
-
- public void stopReading() {
- StreamUtil.safeClose(existingDataFileReader);
- }
-
- @Override
- public List<Object[]> getExistingData(int fromIndex, int maxResults) throws Exception {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Reading lines " + fromIndex + "-" + (fromIndex + maxResults));
- }
- if (fromIndex != currentIndex) {
- throw new IllegalStateException();
- }
- List<Object[]> results = new LinkedList<Object[]>();
- for (int i = 0; i < maxResults; i++) {
- String nextLine = existingDataFileReader.readLine();
- if (nextLine == null) {
- break;
- }
- currentIndex++;
- StringTokenizer stringTokenizer = new StringTokenizer(nextLine, DELIMITER);
- Object[] row = new Object[stringTokenizer.countTokens()];
- for (int j = 0; j < row.length; j++) {
- row[j] = stringTokenizer.nextToken();
- }
- results.add(row);
- }
- return results;
- }
-
-}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPABulkExportSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPABulkExportSource.java
deleted file mode 100644
index d61fdf1..0000000
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPABulkExportSource.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package org.rhq.server.metrics;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.List;
-import java.util.Properties;
-
-import javax.persistence.EntityManager;
-import javax.persistence.EntityManagerFactory;
-
-import org.apache.commons.lang.time.StopWatch;
-import org.apache.log4j.BasicConfigurator;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.hibernate.ejb.Ejb3Configuration;
-
-import org.rhq.core.util.jdbc.JDBCUtil;
-import org.rhq.core.util.stream.StreamUtil;
-
-/**
- * @author Thomas Segismont
- */
-public class ExistingDataJPABulkExportSource extends ExistingDataBulkExportSource {
-
- public ExistingDataJPABulkExportSource(EntityManager entityManager, String selectNativeQuery, File workDirectory,
- String fileName) {
- super(entityManager, selectNativeQuery, workDirectory, fileName);
- }
-
- public void exportExistingData() throws Exception {
- BufferedWriter fileWriter = null;
- Connection connection = null;
- PreparedStatement statement = null;
- ResultSet resultSet = null;
- try {
- fileWriter = new BufferedWriter(new FileWriter(getExistingDataFile()), IO_BUFFER_SIZE);
- connection = getConnection();
- statement = connection.prepareStatement(getSelectNativeQuery());
- int columnCount = resultSet.getMetaData().getColumnCount();
- resultSet = statement.executeQuery();
- while (resultSet.next()) {
- for (int i = 1; i < columnCount + 1; i++) {
- if (i > 1) {
- fileWriter.write(DELIMITER);
- }
- fileWriter.write(resultSet.getString(i));
- }
- fileWriter.write("\n");
- }
- } finally {
- StreamUtil.safeClose(fileWriter);
- JDBCUtil.safeClose(connection, statement, resultSet);
- }
- }
-
- public static void main(String[] args) throws Exception {
- BasicConfigurator.configure();
- Logger.getRootLogger().setLevel(Level.INFO);
- Logger.getLogger("org.rhq").setLevel(Level.DEBUG);
- EntityManagerFactory entityManagerFactory = null;
- EntityManager entityManager = null;
- ExistingDataBulkExportSource source = null;
- try {
- entityManagerFactory = createEntityManager();
- entityManager = entityManagerFactory.createEntityManager();
- source = new ExistingPostgresDataBulkExportSource(entityManager,
- "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D",
- // "SELECT count(*) FROM RHQ_MEASUREMENT_DATA_NUM_1D limit 1"
- new File(System.getProperty("java.io.tmpdir")), "poupoupidou.txt");
- StopWatch stopWatch = new StopWatch();
- stopWatch.start();
- source.exportExistingData();
- source.startReading();
- int rowIndex = 0;
- int maxResults = 30000;
- for (;;) {
- List<Object[]> existingData = source.getExistingData(rowIndex, maxResults);
- if (existingData.size() < maxResults) {
- break;
- } else {
- rowIndex += maxResults;
- }
- }
- stopWatch.stop();
- System.out.println("Execution: " + stopWatch);
- } finally {
- if (source != null) {
- source.stopReading();
- }
- if (entityManager != null) {
- entityManager.close();
- }
- if (entityManagerFactory != null) {
- entityManagerFactory.close();
- }
- }
- }
-
- private static EntityManagerFactory createEntityManager() throws Exception {
- Properties properties = new Properties();
- properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
- properties.put("hibernate.connection.username", "rhqadmin");
- properties.put("hibernate.connection.password", "rhqadmin");
- properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
- String driverClassName = "org.postgresql.Driver";
- try {
- //Required to preload the driver manually.
- //Without this the driver load will fail due to the packaging.
- Class.forName(driverClassName);
- } catch (ClassNotFoundException e) {
- throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
- }
- properties.put("hibernate.driver_class", driverClassName);
- properties.put("hibernate.connection.url", "jdbc:postgresql://localhost:5432/rhqdev");
- Ejb3Configuration configuration = new Ejb3Configuration();
- configuration.setProperties(properties);
- return configuration.buildEntityManagerFactory();
- }
-
-}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPASource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPASource.java
deleted file mode 100644
index b072f58..0000000
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPASource.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * RHQ Management Platform
- * Copyright (C) 2005-2013 Red Hat, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
- */
-
-package org.rhq.server.metrics;
-
-import java.util.List;
-import java.util.Properties;
-
-import javax.persistence.EntityManager;
-import javax.persistence.EntityManagerFactory;
-
-import org.apache.commons.lang.time.StopWatch;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.log4j.BasicConfigurator;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.hibernate.ejb.Ejb3Configuration;
-
-/**
- * @author Thomas Segismont
- */
-public class ExistingDataJPASource implements ExistingDataSource {
-
- private static final Log LOG = LogFactory.getLog(ExistingDataJPASource.class);
-
- private EntityManager entityManager;
-
- private String selectNativeQuery;
-
- public ExistingDataJPASource(EntityManager entityManager, String selectNativeQuery) {
- this.entityManager = entityManager;
- this.selectNativeQuery = selectNativeQuery;
- }
-
- @Override
- public List<Object[]> getExistingData(int fromIndex, int maxResults) throws Exception {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Reading lines " + fromIndex + "-" + (fromIndex + maxResults));
- }
- return entityManager.createNativeQuery(selectNativeQuery).setFirstResult(fromIndex).setMaxResults(maxResults)
- .getResultList();
- }
-
- public static void main(String[] args) throws Exception {
- BasicConfigurator.configure();
- Logger.getRootLogger().setLevel(Level.INFO);
- Logger.getLogger("org.rhq").setLevel(Level.DEBUG);
- EntityManagerFactory entityManagerFactory = null;
- EntityManager entityManager = null;
- ExistingDataJPASource source = null;
- try {
- entityManagerFactory = createEntityManager();
- entityManager = entityManagerFactory.createEntityManager();
- source = new ExistingDataJPASource(
- entityManager,
- "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D");
- StopWatch stopWatch = new StopWatch();
- stopWatch.start();
- int rowIndex = 0;
- int maxResults = 30000;
- for (; ; ) {
- List<Object[]> existingData = source.getExistingData(rowIndex, maxResults);
- if (existingData.size() < maxResults) {
- break;
- } else {
- rowIndex += maxResults;
- }
- }
- stopWatch.stop();
- System.out.println("Execution: " + stopWatch);
- } finally {
- if (entityManager != null) {
- entityManager.close();
- }
- if (entityManagerFactory != null) {
- entityManagerFactory.close();
- }
- }
- }
-
- private static EntityManagerFactory createEntityManager() throws Exception {
- Properties properties = new Properties();
- properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
- properties.put("hibernate.connection.username", "rhqadmin");
- properties.put("hibernate.connection.password", "rhqadmin");
- properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
- String driverClassName = "org.postgresql.Driver";
- try {
- //Required to preload the driver manually.
- //Without this the driver load will fail due to the packaging.
- Class.forName(driverClassName);
- } catch (ClassNotFoundException e) {
- throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
- }
- properties.put("hibernate.driver_class", driverClassName);
- properties.put("hibernate.connection.url", "jdbc:postgresql://localhost:5432/rhq");
- Ejb3Configuration configuration = new Ejb3Configuration();
- configuration.setProperties(properties);
- return configuration.buildEntityManagerFactory();
- }
-
-}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataSource.java
deleted file mode 100644
index d6e0203..0000000
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataSource.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * RHQ Management Platform
- * Copyright (C) 2005-2013 Red Hat, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
- */
-
-package org.rhq.server.metrics;
-
-import java.util.List;
-
-/**
- * @author Thomas Segismont
- */
-public interface ExistingDataSource {
-
- List<Object[]> getExistingData(int fromIndex, int maxResults) throws Exception;
-
-}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingPostgresDataBulkExportSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingPostgresDataBulkExportSource.java
deleted file mode 100644
index 8d049ed..0000000
--- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingPostgresDataBulkExportSource.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * RHQ Management Platform
- * Copyright (C) 2005-2013 Red Hat, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
- */
-
-package org.rhq.server.metrics;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.sql.Connection;
-import java.util.List;
-import java.util.Properties;
-
-import javax.persistence.EntityManager;
-import javax.persistence.EntityManagerFactory;
-
-import org.apache.commons.lang.time.StopWatch;
-import org.apache.log4j.BasicConfigurator;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.hibernate.ejb.Ejb3Configuration;
-import org.postgresql.copy.CopyManager;
-import org.postgresql.core.BaseConnection;
-
-import org.rhq.core.util.jdbc.JDBCUtil;
-import org.rhq.core.util.stream.StreamUtil;
-
-/**
- * @author Thomas Segismont
- */
-public class ExistingPostgresDataBulkExportSource extends ExistingDataBulkExportSource {
-
- public ExistingPostgresDataBulkExportSource(EntityManager entityManager, String selectNativeQuery,
- File workDirectory, String fileName) {
- super(entityManager, selectNativeQuery, workDirectory, fileName);
- }
-
- public void exportExistingData() throws Exception {
- BufferedWriter fileWriter = null;
- Connection connection = null;
- try {
- fileWriter = new BufferedWriter(new FileWriter(getExistingDataFile()), IO_BUFFER_SIZE);
- connection = getConnection();
- CopyManager copyManager = new CopyManager((BaseConnection) connection);
- copyManager.copyOut("COPY (" + getSelectNativeQuery() + ") TO STDOUT WITH DELIMITER '" + DELIMITER + "'",
- fileWriter);
- } finally {
- StreamUtil.safeClose(fileWriter);
- JDBCUtil.safeClose(connection);
- }
- }
-
- public static void main(String[] args) throws Exception {
- BasicConfigurator.configure();
- Logger.getRootLogger().setLevel(Level.INFO);
- Logger.getLogger("org.rhq").setLevel(Level.DEBUG);
- EntityManagerFactory entityManagerFactory = null;
- EntityManager entityManager = null;
- ExistingDataBulkExportSource source = null;
- try {
- entityManagerFactory = createEntityManager();
- entityManager = entityManagerFactory.createEntityManager();
- source = new ExistingPostgresDataBulkExportSource(
- entityManager,
- "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D",
- // "SELECT count(*) FROM RHQ_MEASUREMENT_DATA_NUM_1D limit 1"
- new File(System.getProperty("java.io.tmpdir")), "poupoupidou.txt");
- StopWatch stopWatch = new StopWatch();
- stopWatch.start();
- source.exportExistingData();
- source.startReading();
- int rowIndex = 0;
- int maxResults = 30000;
- for (;;) {
- List<Object[]> existingData = source.getExistingData(rowIndex, maxResults);
- if (existingData.size() < maxResults) {
- break;
- } else {
- rowIndex += maxResults;
- }
- }
- stopWatch.stop();
- System.out.println("Execution: " + stopWatch);
- } finally {
- if (source != null) {
- source.stopReading();
- }
- if (entityManager != null) {
- entityManager.close();
- }
- if (entityManagerFactory != null) {
- entityManagerFactory.close();
- }
- }
- }
-
- private static EntityManagerFactory createEntityManager() throws Exception {
- Properties properties = new Properties();
- properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
- properties.put("hibernate.connection.username", "rhqadmin");
- properties.put("hibernate.connection.password", "rhqadmin");
- properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
- String driverClassName = "org.postgresql.Driver";
- try {
- //Required to preload the driver manually.
- //Without this the driver load will fail due to the packaging.
- Class.forName(driverClassName);
- } catch (ClassNotFoundException e) {
- throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
- }
- properties.put("hibernate.driver_class", driverClassName);
- properties.put("hibernate.connection.url", "jdbc:postgresql://localhost:5432/rhq");
- Ejb3Configuration configuration = new Ejb3Configuration();
- configuration.setProperties(properties);
- return configuration.buildEntityManagerFactory();
- }
-
-}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
new file mode 100644
index 0000000..bcddbd8
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigrator.java
@@ -0,0 +1,730 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2011, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator;
+
+import static com.datastax.driver.core.querybuilder.QueryBuilder.ttl;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+
+import javax.persistence.EntityManager;
+import javax.persistence.Query;
+
+import com.datastax.driver.core.ResultSetFuture;
+import com.datastax.driver.core.Session;
+import com.datastax.driver.core.querybuilder.Batch;
+import com.datastax.driver.core.querybuilder.QueryBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.rhq.server.metrics.domain.AggregateType;
+import org.rhq.server.metrics.domain.MetricsTable;
+
+
+/**
+ * @author Stefan Negrea
+ *
+ */
+public class DataMigrator {
+
+ public enum DatabaseType {
+ Postgres, Oracle
+ }
+
+ private final Log log = LogFactory.getLog(DataMigrator.class);
+
+
+ private static final int MAX_RECORDS_TO_LOAD_FROM_SQL = 30000;
+ private static final int MAX_RAW_BATCH_TO_CASSANDRA = 100;
+ private static final int MAX_AGGREGATE_BATCH_TO_CASSANDRA = 50;
+ private static final int MAX_NUMBER_OF_FAILURES = 5;
+ private static final long NUMBER_OF_BATCHES_FOR_ESTIMATION = 4;
+ private static final double UNDER_ESTIMATION_FACTOR = .10;
+
+
+ private enum MigrationQuery {
+ SELECT_1H_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
+ SELECT_6H_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
+ SELECT_1D_DATA("SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+
+ DELETE_1H_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
+ DELETE_6H_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
+ DELETE_1D_DATA("DELETE FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+
+
+ COUNT_1H_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_1H"),
+ COUNT_6H_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_6H"),
+ COUNT_1D_DATA("SELECT COUNT(*) FROM RHQ_MEASUREMENT_DATA_NUM_1D"),
+
+ COUNT_RAW("SELECT COUNT(*) FROM %s"),
+ SELECT_RAW_DATA("SELECT schedule_id, time_stamp, value FROM %s"),
+ DELETE_RAW_ALL_DATA("DELETE FROM %s"),
+ DELETE_RAW_ENTRY("DELETE FROM %s WHERE schedule_id = ?");
+
+ public static final int SCHEDULE_INDEX = 0;
+ public static final int TIMESTAMP_INDEX = 1;
+ public static final int VALUE_INDEX = 2;
+ public static final int MIN_VALUE_INDEX = 3;
+ public static final int MAX_VALUE_INDEX = 4;
+
+ private String query;
+
+ private MigrationQuery(String query){
+ this.query = query;
+ }
+
+ /**
+ * @return the query
+ */
+ public String getQuery() {
+ return query;
+ }
+
+ @Override
+ public String toString() {
+ return query;
+ }
+ }
+
+ private final EntityManager entityManager;
+ private final Session session;
+ private final DatabaseType databaseType;
+ private final boolean experimentalDataSource;
+
+ private boolean deleteDataImmediatelyAfterMigration;
+ private boolean deleteAllDataAtEndOfMigration;
+
+ private boolean runRawDataMigration;
+ private boolean run1HAggregateDataMigration;
+ private boolean run6HAggregateDataMigration;
+ private boolean run1DAggregateDataMigration;
+
+ private long estimation;
+
+ public DataMigrator(EntityManager entityManager, Session session, DatabaseType databaseType) {
+ this(entityManager, session, databaseType, false);
+ }
+
+ public DataMigrator(EntityManager entityManager, Session session, DatabaseType databaseType,
+ boolean experimentalDataSource) {
+ this.entityManager = entityManager;
+ this.session = session;
+ this.databaseType = databaseType;
+
+ this.experimentalDataSource = experimentalDataSource;
+
+ this.deleteDataImmediatelyAfterMigration = false;
+ this.deleteAllDataAtEndOfMigration = false;
+ this.runRawDataMigration = true;
+ this.run1HAggregateDataMigration = true;
+ this.run6HAggregateDataMigration = true;
+ this.run1DAggregateDataMigration = true;
+ }
+
+ public void runRawDataMigration(boolean value) {
+ this.runRawDataMigration = value;
+ }
+
+ public void run1HAggregateDataMigration(boolean value) {
+ this.run1HAggregateDataMigration = value;
+ }
+
+ public void run6HAggregateDataMigration(boolean value) {
+ this.run6HAggregateDataMigration = value;
+ }
+
+ public void run1DAggregateDataMigration(boolean value) {
+ this.run1DAggregateDataMigration = value;
+ }
+
+
+ public void deleteDataImmediatelyAfterMigration() {
+ this.deleteDataImmediatelyAfterMigration = true;
+ this.deleteAllDataAtEndOfMigration = false;
+ }
+
+ public void deleteAllDataAtEndOfMigration() {
+ this.deleteAllDataAtEndOfMigration = true;
+ this.deleteDataImmediatelyAfterMigration = false;
+ }
+
+ public void preserveData() {
+ this.deleteAllDataAtEndOfMigration = false;
+ this.deleteDataImmediatelyAfterMigration = false;
+ }
+
+ public long estimate() throws Exception {
+ this.estimation = 0;
+ if (runRawDataMigration) {
+ retryOnFailure(new RawDataMigrator(), Task.Estimate);
+ }
+
+ if (run1HAggregateDataMigration) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.ONE_HOUR), Task.Estimate);
+ }
+
+ if (run6HAggregateDataMigration) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.SIX_HOUR), Task.Estimate);
+ }
+
+ if (run1DAggregateDataMigration) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.TWENTY_FOUR_HOUR), Task.Estimate);
+ }
+
+ if (deleteAllDataAtEndOfMigration) {
+ retryOnFailure(new DeleteAllData(), Task.Estimate);
+ }
+
+ estimation = (long) (estimation + estimation * UNDER_ESTIMATION_FACTOR);
+
+ return estimation;
+ }
+
+ public void migrateData() throws Exception {
+ if (runRawDataMigration) {
+ retryOnFailure(new RawDataMigrator(), Task.Migrate);
+ }
+
+ if (run1HAggregateDataMigration) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.ONE_HOUR), Task.Migrate);
+ }
+
+ if (run6HAggregateDataMigration) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.SIX_HOUR), Task.Migrate);
+ }
+
+ if (run1DAggregateDataMigration) {
+ retryOnFailure(new AggregateDataMigrator(MetricsTable.TWENTY_FOUR_HOUR), Task.Migrate);
+ }
+
+ if (deleteAllDataAtEndOfMigration) {
+ retryOnFailure(new DeleteAllData(), Task.Migrate);
+ }
+ }
+
+ public void deleteOldData() throws Exception {
+ if (deleteAllDataAtEndOfMigration) {
+ retryOnFailure(new DeleteAllData(), Task.Migrate);
+ }
+ }
+
+ /**
+ * Retries the migration {@link #MAX_NUMBER_OF_FAILURES} times before
+ * failing the migration operation.
+ *
+ * @param migrator
+ * @throws Exception
+ */
+ private Thread retryOnFailure(final CallableMigrationWorker migrator, final Task task)
+ throws Exception {
+
+ RunnableWithException runnable = new RunnableWithException() {
+ private Exception exception;
+
+ @Override
+ public void run() {
+ int numberOfFailures = 0;
+ Exception caughtException = null;
+
+ log.info(migrator.getClass());
+
+ while (numberOfFailures < MAX_NUMBER_OF_FAILURES) {
+ try {
+ if (task == Task.Estimate) {
+ estimation += migrator.estimate();
+ } else {
+ migrator.migrate();
+ }
+ return;
+ } catch (Exception e) {
+ log.error("Migrator " + migrator.getClass() + " failed. Retrying!", e);
+
+ caughtException = e;
+ numberOfFailures++;
+ }
+ }
+
+ this.exception = caughtException;
+ }
+
+ @Override
+ public Exception getException() {
+ return this.exception;
+ }
+ };
+
+ Thread localThread = new Thread(runnable);
+ localThread.start();
+ localThread.join();
+
+ if (runnable.getException() != null) {
+ throw runnable.getException();
+ }
+
+ return localThread;
+ }
+
+ /**
+ * Returns a list of all the raw SQL metric tables.
+ * There is no equivalent in Cassandra, all raw data is stored in a single column family.
+ *
+ * @return SQL raw metric tables
+ */
+ private String[] getRawDataTables() {
+ int tableCount = 15;
+ String tablePrefix = "RHQ_MEAS_DATA_NUM_R";
+
+ String[] tables = new String[tableCount];
+ for (int i = 0; i < tableCount; i++) {
+ if (i < 10) {
+ tables[i] = tablePrefix + "0" + i;
+ } else {
+ tables[i] = tablePrefix + i;
+ }
+ }
+
+ return tables;
+ }
+
+ private ExistingDataSource getExistingDataSource(EntityManager entityManager, String query) {
+ if (this.databaseType == DatabaseType.Oracle) {
+ return new ExistingDataJPASource(entityManager, query);
+ } else {
+ if (!experimentalDataSource) {
+ return new ExistingDataJPASource(entityManager, query);
+ } else {
+ return new ExistingPostgresDataBulkExportSource(entityManager, query);
+ }
+ }
+
+ //return new ExistingDataJPASource(entityManager, query);
+ }
+
+ private enum Task {
+ Migrate, Estimate
+ }
+
+ private interface CallableMigrationWorker {
+
+
+ long estimate() throws Exception;
+
+ void migrate() throws Exception;
+ }
+
+ private interface RunnableWithException extends Runnable {
+ Exception getException();
+ }
+
+ private class AggregateDataMigrator implements CallableMigrationWorker {
+
+ private final String selectQuery;
+ private final String deleteQuery;
+ private final String countQuery;
+ private final MetricsTable metricsTable;
+
+ /**
+ * @param query
+ * @param metricsTable
+ */
+ public AggregateDataMigrator(MetricsTable metricsTable) throws Exception {
+ this.metricsTable = metricsTable;
+
+ if (MetricsTable.ONE_HOUR.equals(this.metricsTable)) {
+ this.selectQuery = MigrationQuery.SELECT_1H_DATA.toString();
+ this.deleteQuery = MigrationQuery.DELETE_1H_DATA.toString();
+ this.countQuery = MigrationQuery.COUNT_1H_DATA.toString();
+ } else if (MetricsTable.SIX_HOUR.equals(this.metricsTable)) {
+ this.selectQuery = MigrationQuery.SELECT_6H_DATA.toString();
+ this.deleteQuery = MigrationQuery.DELETE_6H_DATA.toString();
+ this.countQuery = MigrationQuery.COUNT_6H_DATA.toString();
+ } else if (MetricsTable.TWENTY_FOUR_HOUR.equals(this.metricsTable)) {
+ this.selectQuery = MigrationQuery.SELECT_1D_DATA.toString();
+ this.deleteQuery = MigrationQuery.DELETE_1D_DATA.toString();
+ this.countQuery = MigrationQuery.COUNT_1D_DATA.toString();
+ } else {
+ throw new Exception("MetricsTable " + metricsTable.toString() + " not supported by this migrator.");
+ }
+ }
+
+ @Override
+ public long estimate() throws Exception {
+ Query nativeQuery = entityManager.createNativeQuery(this.countQuery);
+ long recordCount = Long.parseLong(nativeQuery.getSingleResult().toString());
+ long estimatedTimeToMigrate = this.performMigration(Task.Estimate);
+
+ long estimation = (recordCount / (long) MAX_RECORDS_TO_LOAD_FROM_SQL / NUMBER_OF_BATCHES_FOR_ESTIMATION)
+ * estimatedTimeToMigrate;
+ return estimation;
+ }
+
+ public void migrate() throws Exception {
+ performMigration(Task.Migrate);
+ if (deleteDataImmediatelyAfterMigration) {
+ deleteTableData();
+ }
+ }
+
+ private void deleteTableData() throws Exception {
+ int failureCount = 0;
+ while (failureCount < MAX_NUMBER_OF_FAILURES) {
+ try {
+ entityManager.getTransaction().begin();
+ Query nativeQuery = entityManager.createNativeQuery(this.deleteQuery);
+ nativeQuery.executeUpdate();
+ entityManager.getTransaction().commit();
+ log.info("- " + metricsTable.toString() + " - Cleaned -");
+ } catch (Exception e) {
+ log.error("Failed to delete " + metricsTable.toString()
+ + " data. Attempting to delete data one more time...");
+
+ failureCount++;
+ if (failureCount == MAX_NUMBER_OF_FAILURES) {
+ throw e;
+ }
+ }
+ }
+ }
+
+ private long performMigration(Task task) throws Exception {
+ long migrationStartTime = System.currentTimeMillis();
+ long numberOfBatchesMigrated = 0;
+
+ List<Object[]> existingData;
+ int failureCount;
+
+ int lastMigratedRecord = 0;
+ ExistingDataSource dataSource = getExistingDataSource(entityManager, selectQuery);
+ dataSource.initialize();
+
+ while (true) {
+ existingData = dataSource.getData(lastMigratedRecord, MAX_RECORDS_TO_LOAD_FROM_SQL);
+
+ if (existingData.size() == 0) {
+ break;
+ }
+
+ lastMigratedRecord += existingData.size();
+
+ failureCount = 0;
+ while (failureCount < MAX_NUMBER_OF_FAILURES) {
+ try {
+ insertDataToCassandra(existingData);
+ break;
+ } catch (Exception e) {
+ log.error("Failed to insert " + metricsTable.toString()
+ + " data. Attempting to insert the current batch of data one more time");
+ log.error(e);
+
+ failureCount++;
+ if (failureCount == MAX_NUMBER_OF_FAILURES) {
+ throw e;
+ }
+ }
+ }
+
+ log.info("- " + metricsTable + " - " + lastMigratedRecord + " -");
+
+ numberOfBatchesMigrated++;
+ if (Task.Estimate.equals(task) && numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
+ break;
+ }
+ }
+
+ dataSource.close();
+
+ return System.currentTimeMillis() - migrationStartTime;
+ }
+
+ private void insertDataToCassandra(List<Object[]> existingData)
+ throws Exception {
+ List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
+ Batch batch = QueryBuilder.batch();
+ int batchSize = 0;
+
+ //only need approximate TTL to speed up processing
+ //given that each batch is processed within seconds, getting the
+ //system time once per batch has minimal impact on the record retention
+ long creationTimeMillis;
+ long itemTTLSeconds;
+ long currentTimeMillis = System.currentTimeMillis();
+ long expectedTTLMillis = metricsTable.getTTLinMilliseconds() * 10;
+
+
+ for (Object[] rawMeasurement : existingData) {
+ creationTimeMillis = Long.parseLong(rawMeasurement[MigrationQuery.TIMESTAMP_INDEX].toString());
+ itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
+
+ if(itemTTLSeconds > 0 ){
+ batch.add(QueryBuilder.insertInto(metricsTable.toString())
+ .value("schedule_id",Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString()))
+ .value("time", new Date(creationTimeMillis))
+ .value("type", AggregateType.AVG.ordinal())
+ .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.VALUE_INDEX].toString()))
+ .using(ttl((int) itemTTLSeconds)));
+
+ batch.add(QueryBuilder.insertInto(metricsTable.toString())
+ .value("schedule_id", Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString()))
+ .value("time", new Date(creationTimeMillis))
+ .value("type", AggregateType.MIN.ordinal())
+ .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MIN_VALUE_INDEX].toString()))
+ .using(ttl((int) itemTTLSeconds)));
+
+ batch.add(QueryBuilder.insertInto(metricsTable.toString())
+ .value("schedule_id", Integer.parseInt(rawMeasurement[MigrationQuery.SCHEDULE_INDEX].toString()))
+ .value("time", new Date(creationTimeMillis))
+ .value("type", AggregateType.MAX.ordinal())
+ .value("value", Double.parseDouble(rawMeasurement[MigrationQuery.MAX_VALUE_INDEX].toString()))
+ .using(ttl((int) itemTTLSeconds)));
+
+ batchSize += 3;
+ }
+
+ if (batchSize >= MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
+ resultSetFutures.add(session.executeAsync(batch));
+ batch = QueryBuilder.batch();
+ batchSize = 0;
+ }
+ }
+
+ if (batchSize != 0) {
+ resultSetFutures.add(session.executeAsync(batch));
+ }
+
+ for (ResultSetFuture future : resultSetFutures) {
+ future.get();
+ }
+ }
+ }
+
+
+ private class RawDataMigrator implements CallableMigrationWorker {
+
+ Queue<String> tablesNotProcessed = new LinkedList<String>(Arrays.asList(getRawDataTables()));
+
+ public long estimate() throws Exception {
+ long recordCount = 0;
+ for (String table : getRawDataTables()) {
+ String countQuery = String.format(MigrationQuery.COUNT_RAW.toString(), table);
+ Query nativeQuery = entityManager.createNativeQuery(countQuery);
+ recordCount += Long.parseLong(nativeQuery.getSingleResult().toString());
+ }
+
+ long estimatedTimeToMigrate = this.performMigration(Task.Estimate);
+ long estimation = (recordCount / (long) MAX_RECORDS_TO_LOAD_FROM_SQL / NUMBER_OF_BATCHES_FOR_ESTIMATION)
+ * estimatedTimeToMigrate;
+ return estimation;
+ }
+
+ public void migrate() throws Exception {
+ performMigration(Task.Migrate);
+ }
+
+ private long performMigration(Task task) throws Exception {
+ long migrationStartTime = System.currentTimeMillis();
+ long numberOfBatchesMigrated = 0;
+
+ List<Object[]> existingData;
+ int failureCount;
+
+ while (!tablesNotProcessed.isEmpty()) {
+ String table = tablesNotProcessed.peek();
+
+ String selectQuery = String.format(MigrationQuery.SELECT_RAW_DATA.toString(), table);
+
+ ExistingDataSource dataSource = getExistingDataSource(entityManager, selectQuery);
+ dataSource.initialize();
+
+ log.info("Start migrating raw table: " + table);
+
+ int lastMigratedRecord = 0;
+ while (true) {
+ existingData = dataSource.getData(lastMigratedRecord, MAX_RECORDS_TO_LOAD_FROM_SQL);
+
+ if (existingData.size() == 0) {
+ break;
+ }
+
+ lastMigratedRecord += existingData.size();
+
+ failureCount = 0;
+ while (failureCount < MAX_NUMBER_OF_FAILURES) {
+ try {
+ insertDataToCassandra(existingData);
+ break;
+ } catch (Exception e) {
+ log.error("Failed to insert " + MetricsTable.RAW.toString()
+ + " data. Attempting to insert the current batch of data one more time");
+ log.error(e);
+
+
+ failureCount++;
+ if (failureCount == MAX_AGGREGATE_BATCH_TO_CASSANDRA) {
+ throw e;
+ }
+ }
+ }
+
+ log.info("- " + table + " - " + lastMigratedRecord + " -");
+
+ numberOfBatchesMigrated++;
+ if (Task.Estimate.equals(task) && numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
+ break;
+ }
+ }
+
+ if (Task.Migrate.equals(task)) {
+ log.info("Done migrating raw table" + table + "---------------------");
+
+ if (deleteDataImmediatelyAfterMigration) {
+ deleteTableData(table);
+ }
+ } else if (numberOfBatchesMigrated >= NUMBER_OF_BATCHES_FOR_ESTIMATION) {
+ break;
+ }
+
+ dataSource.close();
+ tablesNotProcessed.poll();
+ }
+
+ return System.currentTimeMillis() - migrationStartTime;
+ }
+
+ private void deleteTableData(String table) throws Exception {
+ String deleteQuery = String.format(MigrationQuery.DELETE_RAW_ENTRY.toString(), table);
+ int failureCount = 0;
+ while (failureCount < MAX_NUMBER_OF_FAILURES) {
+ try {
+ entityManager.getTransaction().begin();
+ Query nativeQuery = entityManager.createNativeQuery(deleteQuery);
+ nativeQuery.executeUpdate();
+ entityManager.getTransaction().commit();
+ log.info("- " + table + " - Cleaned -");
+ } catch (Exception e) {
+ log.error("Failed to delete " + table + " data. Attempting to delete data one more time...");
+
+ failureCount++;
+ if (failureCount == MAX_NUMBER_OF_FAILURES) {
+ throw e;
+ }
+ }
+ }
+ }
+
+ private void insertDataToCassandra(List<Object[]> existingData) throws Exception {
+ List<ResultSetFuture> resultSetFutures = new ArrayList<ResultSetFuture>();
+ Batch batch = QueryBuilder.batch();
+ int batchSize = 0;
+
+ //only need approximate TTL to speed up processing
+ //given that each batch is processed within seconds, getting the
+ //system time once per batch has minimal impact on the record retention
+ long creationTimeMillis;
+ long itemTTLSeconds;
+ long currentTimeMillis = System.currentTimeMillis();
+ long expectedTTLMillis = MetricsTable.RAW.getTTLinMilliseconds() * 10;
+
+
+ for (Object[] rawDataPoint : existingData) {
+ creationTimeMillis = Long.parseLong(rawDataPoint[MigrationQuery.TIMESTAMP_INDEX].toString());
+ itemTTLSeconds = (expectedTTLMillis - currentTimeMillis + creationTimeMillis) / 1000l;
+
+ if (itemTTLSeconds > 0) {
+ batch.add(QueryBuilder.insertInto(MetricsTable.RAW.toString())
+ .value("schedule_id", Integer.parseInt(rawDataPoint[MigrationQuery.SCHEDULE_INDEX].toString()))
+ .value("time", new Date(creationTimeMillis))
+ .value("value", Double.parseDouble(rawDataPoint[MigrationQuery.VALUE_INDEX].toString()))
+ .using(ttl((int) itemTTLSeconds)));
+ batchSize++;
+ }
+
+ if (batchSize >= MAX_RAW_BATCH_TO_CASSANDRA) {
+ resultSetFutures.add(session.executeAsync(batch));
+ batch = QueryBuilder.batch();
+ batchSize = 0;
+ }
+ }
+
+ if (batchSize != 0) {
+ resultSetFutures.add(session.executeAsync(batch));
+ }
+
+ for (ResultSetFuture future : resultSetFutures) {
+ future.get();
+ }
+ }
+ }
+
+
+ private class DeleteAllData implements CallableMigrationWorker {
+
+ public void migrate() {
+ Query nativeQuery;
+
+ if (run1HAggregateDataMigration) {
+ entityManager.getTransaction().begin();
+ nativeQuery = entityManager.createNativeQuery(MigrationQuery.DELETE_1H_DATA.toString());
+ nativeQuery.executeUpdate();
+ entityManager.getTransaction().commit();
+ log.info("- RHQ_MEASUREMENT_DATA_NUM_1H - Cleaned -");
+ }
+
+ if (run6HAggregateDataMigration) {
+ entityManager.getTransaction().begin();
+ nativeQuery = entityManager.createNativeQuery(MigrationQuery.DELETE_6H_DATA.toString());
+ nativeQuery.executeUpdate();
+ entityManager.getTransaction().commit();
+ log.info("- RHQ_MEASUREMENT_DATA_NUM_6H - Cleaned -");
+ }
+
+ if (run1DAggregateDataMigration) {
+ entityManager.getTransaction().begin();
+ nativeQuery = entityManager.createNativeQuery(MigrationQuery.DELETE_1D_DATA.toString());
+ nativeQuery.executeUpdate();
+ entityManager.getTransaction().commit();
+ log.info("- RHQ_MEASUREMENT_DATA_NUM_1D - Cleaned -");
+ }
+
+ if (runRawDataMigration) {
+ for (String table : getRawDataTables()) {
+ entityManager.getTransaction().begin();
+ String deleteAllData = String.format(MigrationQuery.DELETE_RAW_ALL_DATA.toString(), table);
+ nativeQuery = entityManager.createNativeQuery(deleteAllData);
+ nativeQuery.executeUpdate();
+ entityManager.getTransaction().commit();
+ log.info("- " + table + " - Cleaned -");
+ }
+ }
+ }
+
+ @Override
+ public long estimate() throws Exception {
+ return 300000; // return return 5 minutes for now without any database side checks.
+ }
+ }
+}
+
+
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigratorRunner.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigratorRunner.java
new file mode 100644
index 0000000..adcd1c0
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/DataMigratorRunner.java
@@ -0,0 +1,668 @@
+/*
+ * RHQ Management Platform
+ * Copyright 2011, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+package org.rhq.server.metrics.migrator;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+import javax.persistence.EntityManager;
+import javax.persistence.EntityManagerFactory;
+
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.ProtocolOptions.Compression;
+import com.datastax.driver.core.Session;
+import com.datastax.driver.core.SimpleAuthInfoProvider;
+import com.datastax.driver.core.exceptions.NoHostAvailableException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.log4j.ConsoleAppender;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+import org.hibernate.ejb.Ejb3Configuration;
+
+import org.rhq.server.metrics.migrator.DataMigrator.DatabaseType;
+
+
+/**
+ * @author Stefan Negrea
+ *
+ * Only postgres is supported by the runner, however the data migrator itself can run
+ * with any database.
+ *
+ * Maven command to run this from the command line:
+ *
+ * mvn install -DskipTests exec:java -Dexec.mainClass="org.rhq.server.metrics.DataMigratorRunner"
+ *
+ *
+ */
+@SuppressWarnings({ "static-access", "deprecation" })
+public class DataMigratorRunner {
+
+ private final Log log = LogFactory.getLog(DataMigratorRunner.class);
+
+ //Cassandra
+ private Option cassandraUserOption = OptionBuilder.withLongOpt("cassandra-user").hasArg().withType(String.class)
+ .withDescription("Cassandra user (default: rhqadmin)").create();
+ private Option cassandraPasswordOption = OptionBuilder.withLongOpt("cassandra-password").hasArg()
+ .withDescription("Cassandra password (default: rhqadmin)").withType(String.class).create();
+ private Option cassandraHostsOption = OptionBuilder.withLongOpt("cassandra-hosts").hasArg().withType(String.class)
+ .withDescription("Cassandra hosts, format host_ip_1,host_ip_2,... (default: 127.0.0.1")
+ .create();
+ private Option cassandraPortOption = OptionBuilder.withLongOpt("cassandra-port").hasArg().withType(Integer.class)
+ .withDescription("Cassandra native binary protocol port (default: 9142)").create();
+ private Option cassandraCompressionOption = OptionBuilder.withLongOpt("cassandra-compression").hasOptionalArg()
+ .withType(String.class).withDescription("Enable compression for communication with Cassandra (default: true)")
+ .create();
+
+ //SQL
+ private Option sqlUserOption = OptionBuilder.withLongOpt("sql-user").hasArg().withType(String.class)
+ .withDescription("SQL server user (default: rhqadmin)").create();
+ private Option sqlPasswordOption = OptionBuilder.withLongOpt("sql-password").hasArg().withType(String.class)
+ .withDescription("SQL server password (default: rhqadmin)").create();
+ private Option sqlHostOption = OptionBuilder.withLongOpt("sql-host").hasArg().withType(String.class)
+ .withDescription("SQL server host address (default: localhost)").create();
+ private Option sqlPortOption = OptionBuilder.withLongOpt("sql-port").hasArg().withType(String.class)
+ .withDescription("SQL server port (default: 5432)").create();
+ private Option sqlDBOption = OptionBuilder.withLongOpt("sql-db").hasArg().withType(String.class)
+ .withDescription("SQL database (default: rhq)").create();
+
+ private Option sqlServerType = OptionBuilder.withLongOpt("sql-server-type").hasArg().withType(String.class)
+ .withDescription("SQL server type, only postgres and oracle are supported (default: postgres)").create();
+ private Option sqlPostgresServer = OptionBuilder.withLongOpt("sql-server-postgres").hasOptionalArg()
+ .withType(Boolean.class).withDescription("Postgres SQL server.").create();
+ private Option sqlOracleServer = OptionBuilder.withLongOpt("sql-server-oracle").hasOptionalArg()
+ .withType(Boolean.class)
+ .withDescription("Oracle SQL server.").create();
+
+ //Migration
+ private Option disableRawOption = OptionBuilder.withLongOpt("disable-raw-migration").hasOptionalArg().withType(Boolean.class)
+ .withDescription("Disable raw table migration (default: false)").create();
+ private Option disable1HOption = OptionBuilder.withLongOpt("disable-1h-migration").hasOptionalArg().withType(Boolean.class)
+ .withDescription("Disable 1 hour aggregates table migration (default: false)").create();
+ private Option disable6HOption = OptionBuilder.withLongOpt("disable-6h-migration").hasOptionalArg().withType(Boolean.class)
+ .withDescription("Disable 6 hours aggregates table migration (default: false)").create();
+ private Option disable1DOption = OptionBuilder.withLongOpt("disable-1d-migration").hasOptionalArg().withType(Boolean.class)
+ .withDescription("Disable 24 hours aggregates table migration (default: false)").create();
+ private Option preserveDataOption = OptionBuilder.withLongOpt("preserve-data").hasOptionalArg().withType(Boolean.class)
+ .withDescription("Preserve SQL data post migration (default: true)").create();
+ private Option deleteDataOption = OptionBuilder.withLongOpt("delete-data").hasOptionalArg().withType(Boolean.class)
+ .withDescription("Delete SQL data at the end of migration (default: false)").create();
+ private Option estimateOnlyOption = OptionBuilder.withLongOpt("estimate-only").hasOptionalArg().withType(Boolean.class)
+ .withDescription("Only estimate how long the migration will take (default: false)").create();
+ private Option deleteOnlyOption = OptionBuilder.withLongOpt("delete-only").hasOptionalArg().withType(Boolean.class)
+ .withDescription("Only delete data from the old SQL server, no migration will be performed (default: false)")
+ .create();
+ private Option experimentalExportOption = OptionBuilder.withLongOpt("experimental-export").hasOptionalArg().withType(Boolean.class)
+ .withDescription("Enable experimental bulk export for Postgres, option ignored for Oracle migration (default: false)")
+ .create();
+
+ //Runner
+ private Option helpOption = OptionBuilder.withLongOpt("help").create("h");
+ private Option debugLogOption = OptionBuilder.withLongOpt("debugLog")
+ .withDescription("Enable debug level logs for the communication with Cassandra and SQL Server (default: false)")
+ .create("X");
+ private Option configFileOption = OptionBuilder.withLongOpt("config-file").hasArg()
+ .withDescription("Configuration file. All the command line options can be set in a typical properties file. " +
+ "Command line arguments take precedence over default and configuration file options.")
+ .create();
+
+ private Map<Object, Object> configuration = new HashMap<Object, Object>();
+ private Options options;
+
+ /**
+ * @param args
+ * @throws ParseException
+ */
+ public static void main(String[] args) throws Exception {
+ initLogging();
+ try{
+ DataMigratorRunner runner = new DataMigratorRunner();
+ runner.configure(args);
+ runner.run();
+ } catch (HelpRequestedException h) {
+ //do nothing
+ } catch (Exception e) {
+ e.printStackTrace();
+ System.exit(1);
+ }
+
+ System.exit(0);
+ }
+
+ private static void initLogging() {
+ Logger root = Logger.getRootLogger();
+ if (!root.getAllAppenders().hasMoreElements()) {
+ root.addAppender(new ConsoleAppender(new PatternLayout(PatternLayout.TTCC_CONVERSION_PATTERN)));
+ setLogLevel(Level.ERROR);
+ }
+ }
+
+ private static void setLogLevel(Level level) {
+ Logger root = Logger.getRootLogger();
+ root.setLevel(level);
+
+ Logger cassandraLogging = root.getLoggerRepository().getLogger("log4j.logger.org.apache.cassandra.cql.jdbc");
+ cassandraLogging.setLevel(level);
+
+ Logger cassandraDriverLogging = root.getLoggerRepository().getLogger("com.datastax.driver");
+ cassandraDriverLogging.setLevel(level);
+
+ Logger hibernateLogging = root.getLoggerRepository().getLogger("org.hibernate");
+ hibernateLogging.setLevel(Level.ERROR);
+
+ Logger migratorLogging = root.getLoggerRepository().getLogger("org.rhq");
+ if (Level.DEBUG.equals(level)) {
+ migratorLogging.setLevel(Level.ALL);
+ } else {
+ migratorLogging.setLevel(level);
+ }
+ }
+
+ private void configure(String args[]) throws Exception {
+ options = new Options();
+
+ options.addOption(cassandraUserOption);
+ options.addOption(cassandraPasswordOption);
+ options.addOption(cassandraHostsOption);
+ options.addOption(cassandraPortOption);
+ options.addOption(cassandraCompressionOption);
+
+ options.addOption(sqlUserOption);
+ options.addOption(sqlPasswordOption);
+ options.addOption(sqlHostOption);
+ options.addOption(sqlPortOption);
+ options.addOption(sqlDBOption);
+ options.addOption(sqlServerType);
+ options.addOption(sqlPostgresServer);
+ options.addOption(sqlOracleServer);
+
+ options.addOption(disableRawOption);
+ options.addOption(disable1HOption);
+ options.addOption(disable6HOption);
+ options.addOption(disable1DOption);
+ options.addOption(preserveDataOption);
+ options.addOption(deleteDataOption);
+ options.addOption(estimateOnlyOption);
+ options.addOption(deleteOnlyOption);
+ options.addOption(experimentalExportOption);
+
+ options.addOption(helpOption);
+ options.addOption(debugLogOption);
+ options.addOption(configFileOption);
+
+ CommandLine commandLine;
+ try {
+ CommandLineParser parser = new PosixParser();
+ commandLine = parser.parse(options, args);
+ } catch (Exception e) {
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.setWidth(120);
+ formatter.printHelp("DataMigrationRunner", options);
+ throw new Exception("Error parsing command line arguments");
+ }
+
+ if (commandLine.hasOption(helpOption.getLongOpt()) || commandLine.hasOption(helpOption.getOpt())) {
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("DataMigrationRunner", options);
+ throw new HelpRequestedException();
+ }
+
+ if (commandLine.hasOption(debugLogOption.getLongOpt()) || commandLine.hasOption(debugLogOption.getOpt())) {
+ DataMigratorRunner.setLogLevel(Level.DEBUG);
+ }
+
+ loadDefaultConfiguration();
+ if (commandLine.hasOption(configFileOption.getLongOpt())) {
+ loadConfigFile(commandLine.getOptionValue(configFileOption.getLongOpt()));
+ }
+
+ parseCassandraOptions(commandLine);
+ parseSQLOptions(commandLine);
+ parseMigrationOptions(commandLine);
+ }
+
+ /**
+ * Add default configuration options to the configuration store.
+ */
+ private void loadDefaultConfiguration() {
+ //default Cassandra configuration
+ configuration.put(cassandraUserOption, "rhqadmin");
+ configuration.put(cassandraPasswordOption, "rhqadmin");
+ configuration.put(cassandraHostsOption, new String[] { "127.0.0.1" });
+ configuration.put(cassandraPortOption, 9042);
+ configuration.put(cassandraCompressionOption, true);
+
+ //default SQL configuration
+ configuration.put(sqlUserOption, "rhqadmin");
+ configuration.put(sqlPasswordOption, "rhqadmin");
+ configuration.put(sqlHostOption, "localhost");
+ configuration.put(sqlPortOption, "5432");
+ configuration.put(sqlDBOption, "rhq");
+ configuration.put(sqlServerType, "postgres");
+
+ //default runner options
+ configuration.put(disableRawOption, false);
+ configuration.put(disable1HOption, false);
+ configuration.put(disable6HOption, false);
+ configuration.put(disable1DOption, false);
+ configuration.put(preserveDataOption, true);
+ configuration.put(estimateOnlyOption, false);
+ configuration.put(deleteOnlyOption, false);
+ configuration.put(experimentalExportOption, false);
+ }
+
+ /**
+ * Load the configuration options from file and overlay them on top of the default
+ * options.
+ *
+ * @param file config file
+ */
+ private void loadConfigFile(String file) {
+ try {
+ File configFile = new File(file);
+ if (!configFile.exists()) {
+ throw new FileNotFoundException("Configuration file not found!");
+ }
+
+ Properties configProperties = new Properties();
+ FileInputStream stream = new FileInputStream(configFile);
+ configProperties.load(stream);
+ stream.close();
+
+ for (Object optionObject : options.getOptions()) {
+ Option option = (Option) optionObject;
+ Object optionValue;
+
+ if ((optionValue = configProperties.get(option.getLongOpt())) != null) {
+ log.debug("Configuration option loaded: " + option.getLongOpt() + " (" + option.getType() + ") -> "
+ + optionValue);
+
+ if (option.equals(cassandraHostsOption)) {
+ String[] cassandraHosts = parseCassandraHosts(optionValue.toString());
+ configuration.put(option, cassandraHosts);
+ } else if (option.equals(sqlServerType)) {
+ if ("oracle".equals(optionValue)) {
+ configuration.put(option, "oracle");
+ } else {
+ configuration.put(option, "postgres");
+ }
+ } else if (option.equals(sqlPostgresServer)) {
+ boolean value = tryParseBoolean(optionValue.toString(), true);
+ if (value == true) {
+ configuration.put(sqlServerType, "postgres");
+ }
+ } else if (option.equals(sqlOracleServer)) {
+ boolean value = tryParseBoolean(optionValue.toString(), true);
+ if (value == true) {
+ configuration.put(sqlServerType, "oracle");
+ }
+ } else if (option.getType().equals(Boolean.class)) {
+ configuration.put(option, tryParseBoolean(optionValue.toString(), true));
+ } else if (option.getType().equals(Integer.class)) {
+ configuration.put(option, tryParseInteger(optionValue.toString(), 0));
+ } else {
+ configuration.put(option, optionValue.toString());
+ }
+ }
+ }
+ } catch (Exception e) {
+ log.error("Unable to load or process the configuration file.", e);
+ System.exit(1);
+ }
+
+ log.debug(configuration.toString());
+ }
+
+ /**
+ * Parse command line options for Cassandra.
+ *
+ * @param commandLine command line
+ * @throws NoHostAvailableException
+ */
+ private void parseCassandraOptions(CommandLine commandLine) throws Exception {
+ if (commandLine.hasOption(cassandraUserOption.getLongOpt())) {
+ configuration.put(cassandraUserOption, commandLine.getOptionValue(cassandraUserOption.getLongOpt()));
+ }
+
+ if (commandLine.hasOption(cassandraPasswordOption.getLongOpt())) {
+ configuration
+ .put(cassandraPasswordOption, commandLine.getOptionValue(cassandraPasswordOption.getLongOpt()));
+ }
+
+ if (commandLine.hasOption(cassandraHostsOption.getLongOpt())) {
+ String[] cassandraHosts = parseCassandraHosts(commandLine.getOptionValue(cassandraHostsOption.getLongOpt()));
+ configuration.put(cassandraHostsOption, cassandraHosts);
+ }
+
+ if (commandLine.hasOption(cassandraPortOption.getLongOpt())) {
+ Integer cassandraPort = tryParseInteger(commandLine.getOptionValue(cassandraPortOption.getLongOpt()), 9142);
+ configuration.put(cassandraPortOption, cassandraPort);
+ }
+
+ if (commandLine.hasOption(cassandraCompressionOption.getLongOpt())) {
+ boolean value = tryParseBoolean(commandLine.getOptionValue(disableRawOption.getLongOpt()), true);
+ configuration.put(cassandraCompressionOption, value);
+ }
+ }
+
+ /**
+ * Parse command line options for SQL.
+ *
+ * @param commandLine command line
+ * @throws NoHostAvailableException
+ */
+ private void parseSQLOptions(CommandLine commandLine) throws NoHostAvailableException {
+ if (commandLine.hasOption(sqlUserOption.getLongOpt())) {
+ configuration.put(sqlUserOption, commandLine.getOptionValue(sqlUserOption.getLongOpt()));
+ }
+
+ if (commandLine.hasOption(sqlPasswordOption.getLongOpt())) {
+ configuration.put(sqlPasswordOption, commandLine.getOptionValue(sqlPasswordOption.getLongOpt()));
+ }
+
+ if (commandLine.hasOption(sqlHostOption.getLongOpt())) {
+ configuration.put(sqlHostOption, commandLine.getOptionValue(sqlHostOption.getLongOpt()));
+ }
+
+ if (commandLine.hasOption(sqlPortOption.getLongOpt())) {
+ configuration.put(sqlPortOption, commandLine.getOptionValue(sqlPortOption.getLongOpt()));
+ }
+
+ if (commandLine.hasOption(sqlDBOption.getLongOpt())) {
+ configuration.put(sqlDBOption, commandLine.getOptionValue(sqlDBOption.getLongOpt()));
+ }
+
+ if (commandLine.hasOption(sqlServerType.getLongOpt())) {
+ if ("oracle".equals(commandLine.getOptionValue(sqlServerType.getLongOpt()))) {
+ configuration.put(sqlServerType, "oracle");
+ } else {
+ configuration.put(sqlServerType, "postgres");
+ }
+ } else if (commandLine.hasOption(sqlPostgresServer.getLongOpt())) {
+ configuration.put(sqlServerType, "postgres");
+ } else if (commandLine.hasOption(sqlOracleServer.getLongOpt())) {
+ configuration.put(sqlServerType, "oracle");
+ }
+ }
+
+ /**
+ * Parse command line options for the actual migration progress.
+ *
+ * @param commandLine
+ */
+ private void parseMigrationOptions(CommandLine commandLine) {
+ boolean value;
+
+ if (commandLine.hasOption(disableRawOption.getLongOpt())) {
+ value = tryParseBoolean(commandLine.getOptionValue(disableRawOption.getLongOpt()), true);
+ configuration.put(disableRawOption, value);
+ }
+
+ if (commandLine.hasOption(disable1HOption.getLongOpt())) {
+ value = tryParseBoolean(commandLine.getOptionValue(disable1HOption.getLongOpt()), true);
+ configuration.put(disable1HOption, value);
+ }
+
+ if (commandLine.hasOption(disable6HOption.getLongOpt())) {
+ value = tryParseBoolean(commandLine.getOptionValue(disable6HOption.getLongOpt()), true);
+ configuration.put(disable6HOption, value);
+ }
+
+ if (commandLine.hasOption(disable1DOption.getLongOpt())) {
+ value = tryParseBoolean(commandLine.getOptionValue(disable1DOption.getLongOpt()), true);
+ configuration.put(disable1DOption, value);
+ }
+
+ if (commandLine.hasOption(preserveDataOption.getLongOpt())) {
+ value = tryParseBoolean(commandLine.getOptionValue(preserveDataOption.getLongOpt()), true);
+ configuration.put(preserveDataOption, value);
+ } else if (commandLine.hasOption(deleteDataOption.getLongOpt())) {
+ value = tryParseBoolean(commandLine.getOptionValue(deleteDataOption.getLongOpt()), true);
+ configuration.put(preserveDataOption, value);
+ }
+
+ if (commandLine.hasOption(estimateOnlyOption.getLongOpt())) {
+ value = tryParseBoolean(commandLine.getOptionValue(estimateOnlyOption.getLongOpt()), true);
+ configuration.put(estimateOnlyOption, value);
+ }
+
+ if (commandLine.hasOption(experimentalExportOption.getLongOpt())) {
+ value = tryParseBoolean(commandLine.getOptionValue(experimentalExportOption.getLongOpt()), true);
+ configuration.put(experimentalExportOption, value);
+ }
+ }
+
+ private void run() throws Exception {
+ log.debug("Creating Entity Manager");
+ EntityManager entityManager = this.createEntityManager();
+ log.debug("Done creating Entity Manager");
+
+ log.debug("Creating Cassandra session");
+ Session session = this.createCassandraSession();
+ log.debug("Done creating Cassandra session");
+
+ DatabaseType databaseType = DatabaseType.Postgres;
+ if ("oracle".equals(configuration.get(sqlServerType))) {
+ databaseType = databaseType.Oracle;
+ }
+ DataMigrator migrator = new DataMigrator(entityManager, session, databaseType, tryParseBoolean(
+ configuration.get(experimentalExportOption), false));
+
+ if (!(Boolean) configuration.get(deleteOnlyOption)) {
+ if ((Boolean) configuration.get(preserveDataOption)) {
+ migrator.preserveData();
+ } else {
+ migrator.deleteAllDataAtEndOfMigration();
+ }
+
+ migrator.runRawDataMigration(!(Boolean) configuration.get(disableRawOption));
+ migrator.run1HAggregateDataMigration(!(Boolean) configuration.get(disable1HOption));
+ migrator.run6HAggregateDataMigration(!(Boolean) configuration.get(disable6HOption));
+ migrator.run1DAggregateDataMigration(!(Boolean) configuration.get(disable1DOption));
+
+ System.out.println("Estimation process - starting\n");
+ long estimate = migrator.estimate();
+ System.out.println("The migration process will take approximately: "
+ + TimeUnit.MILLISECONDS.toMinutes(estimate) + " minutes (or " + estimate + " milliseconds)\n");
+ System.out.println("Estimation process - ended\n\n");
+
+ if (!(Boolean) configuration.get(estimateOnlyOption)) {
+ System.out.println("Migration process - starting\n");
+ long startTime = System.currentTimeMillis();
+ migrator.migrateData();
+ long duration = System.currentTimeMillis() - startTime;
+ System.out.println("The migration process took: " + TimeUnit.MILLISECONDS.toMinutes(duration)
+ + " minutes (or " + duration + " milliseconds)\n");
+ System.out.println("Migration process - ended\n");
+ }
+ } else {
+ migrator.deleteAllDataAtEndOfMigration();
+ migrator.runRawDataMigration(true);
+ migrator.run1HAggregateDataMigration(true);
+ migrator.run6HAggregateDataMigration(true);
+ migrator.run1DAggregateDataMigration(true);
+
+ System.out.println("Estimation process - starting\n");
+ long estimate = migrator.estimate();
+ System.out.println("The deletion of old data will take approximately: "
+ + TimeUnit.MILLISECONDS.toMinutes(estimate) + " minutes (or " + estimate + " milliseconds)\n");
+ System.out.println("Estimation process - ended\n\n");
+
+ if (!(Boolean) configuration.get(estimateOnlyOption)) {
+ System.out.println("Old data deletion process - starting\n");
+ long startTime = System.currentTimeMillis();
+ migrator.deleteOldData();
+ long duration = System.currentTimeMillis() - startTime;
+ System.out.println("The deletion process took: " + TimeUnit.MILLISECONDS.toMinutes(duration)
+ + " minutes (or " + duration + " milliseconds)\n");
+ System.out.println("Old data deletion process - ended\n");
+ }
+ }
+
+ /*if (entityManager != null) {
+ entityManager.close();
+ }
+ if (entityManagerFactory != null) {
+ entityManagerFactory.close();
+ }*/
+ }
+
+ /**
+ * Create a Cassandra session based on configuration options.
+ *
+ * @return Cassandra session
+ * @throws Exception
+ */
+ private Session createCassandraSession() throws Exception {
+ Compression selectedCompression = Compression.NONE;
+ if ((Boolean) configuration.get(cassandraCompressionOption)) {
+ selectedCompression = Compression.SNAPPY;
+ }
+
+ Cluster cluster = Cluster
+ .builder()
+ .addContactPoints((String[]) configuration.get(cassandraHostsOption))
+ .withPort((Integer) configuration.get(cassandraPortOption))
+ .withCompression(selectedCompression)
+ .withoutMetrics()
+ .withAuthInfoProvider(
+ new SimpleAuthInfoProvider().add("username", (String) configuration.get(cassandraUserOption)).add(
+ "password", (String) configuration.get(cassandraPasswordOption))).build();
+
+ return cluster.connect("rhq");
+ }
+
+ /**
+ * Create a hibernate session to the SQL server.
+ *
+ * @return
+ * @throws Exception
+ */
+ private EntityManager createEntityManager() throws Exception {
+ Properties properties = new Properties();
+ properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
+ properties.put("hibernate.connection.username", (String) configuration.get(sqlUserOption));
+ properties.put("hibernate.connection.password", (String) configuration.get(sqlPasswordOption));
+
+ if ("oracle".equals(configuration.get(sqlServerType))) {
+ String driverClassName = "oracle.jdbc.driver.OracleDriver";
+
+ try {
+ //Required to preload the driver manually.
+ //Without this the driver load will fail due to the packaging.
+ Class.forName(driverClassName);
+ } catch (ClassNotFoundException e) {
+ log.debug(e);
+ throw new Exception("Oracle SQL Driver class could not be loaded. Missing class: " + driverClassName);
+ }
+
+ properties.put("hibernate.dialect", "org.hibernate.dialect.Oracle10gDialect");
+ properties.put("hibernate.driver_class", driverClassName);
+ properties.put("hibernate.connection.url", "jdbc:oracle:thin:@" + (String) configuration.get(sqlHostOption)
+ + ":" + (String) configuration.get(sqlPortOption) + ":" + (String) configuration.get(sqlDBOption));
+ properties.put("hibernate.default_schema", (String) configuration.get(sqlDBOption));
+ } else {
+ String driverClassName = "org.postgresql.Driver";
+
+ try {
+ //Required to preload the driver manually.
+ //Without this the driver load will fail due to the packaging.
+ Class.forName(driverClassName);
+ } catch (ClassNotFoundException e) {
+ log.debug(e);
+ throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
+ }
+
+ properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
+ properties.put("hibernate.driver_class", driverClassName);
+ properties.put("hibernate.connection.url", "jdbc:postgresql://" + (String) configuration.get(sqlHostOption)
+ + ":" + (String) configuration.get(sqlPortOption) + "/" + (String) configuration.get(sqlDBOption));
+ }
+
+ log.debug("Creating entity manager with the following configuration:");
+ log.debug(properties);
+
+ Ejb3Configuration configuration = new Ejb3Configuration();
+ configuration.setProperties(properties);
+ EntityManagerFactory factory = configuration.buildEntityManagerFactory();
+ return factory.createEntityManager();
+ }
+
+ /**
+ * Parse Cassandra host information submitted in the form:
+ * host_addres,thrift_port,native_port|host_address_2,thrift_port,native_port
+ *
+ * @param stringValue
+ * @return
+ */
+ private String[] parseCassandraHosts(String stringValue) {
+ String[] seeds = stringValue.split(",");
+ return seeds;
+ }
+
+ /**
+ * @param value object value to parse
+ * @param defaultValue default value
+ * @return
+ */
+ private boolean tryParseBoolean(Object value, boolean defaultValue) {
+ try {
+ return Boolean.parseBoolean(value.toString());
+ } catch (Exception e) {
+ return defaultValue;
+ }
+ }
+
+ /**
+ * @param value object value to parse
+ * @param defaultValue default value
+ * @return
+ */
+ private Integer tryParseInteger(Object value, int defaultValue) {
+ try {
+ return Integer.parseInt(value.toString());
+ } catch (Exception e) {
+ return defaultValue;
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private class HelpRequestedException extends Exception {
+ public HelpRequestedException() {
+ super("Help Requested");
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataBulkExportSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataBulkExportSource.java
new file mode 100644
index 0000000..756d1b1
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataBulkExportSource.java
@@ -0,0 +1,124 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.server.metrics.migrator;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import java.util.StringTokenizer;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.rhq.core.util.stream.StreamUtil;
+
+/**
+ * @author Thomas Segismont
+ */
+public abstract class ExistingDataBulkExportSource implements ExistingDataSource {
+
+ private static final Log log = LogFactory.getLog(ExistingDataBulkExportSource.class);
+
+ protected static final int IO_BUFFER_SIZE = 1024 * 64;
+
+ protected static final String DELIMITER = "|";
+
+ private File existingDataFile;
+
+ private BufferedReader existingDataFileReader;
+
+ private int currentIndex;
+
+ public ExistingDataBulkExportSource() {
+ Random random = new Random();
+ String exportFileName = System.currentTimeMillis() + "." + random.nextInt() + ".export";
+ this.existingDataFile = new File(System.getProperty("java.io.tmpdir"), exportFileName);
+ }
+
+ @Override
+ public void initialize() throws Exception {
+ this.exportExistingData();
+ this.startReading();
+ }
+
+ @Override
+ public void close() throws Exception {
+ this.stopReading();
+ this.removeTempFile();
+ }
+
+ protected abstract void exportExistingData() throws Exception;
+
+ protected File getExistingDataFile() {
+ return existingDataFile;
+ }
+
+ protected void startReading() throws Exception {
+ if (!existingDataFile.exists() && !existingDataFile.isFile() && !existingDataFile.canRead()) {
+ throw new IllegalStateException();
+ }
+ existingDataFileReader = new BufferedReader(new FileReader(existingDataFile));
+ currentIndex = 0;
+ }
+
+ protected void stopReading() {
+ StreamUtil.safeClose(existingDataFileReader);
+ }
+
+ protected void removeTempFile(){
+ if(this.existingDataFile.exists()){
+ try{
+ this.existingDataFile.delete();
+ }catch(Exception e){
+ log.debug("Unable to clean temporary file " + this.existingDataFile, e);
+ }
+ }
+ }
+
+ @Override
+ public List<Object[]> getData(int fromIndex, int maxResults) throws Exception {
+ if (log.isDebugEnabled()) {
+ log.debug("Reading lines " + fromIndex + " to " + (fromIndex + maxResults));
+ }
+
+ if (fromIndex != currentIndex) {
+ throw new IllegalStateException();
+ }
+
+ List<Object[]> results = new LinkedList<Object[]>();
+ for (int i = 0; i < maxResults; i++) {
+ String nextLine = existingDataFileReader.readLine();
+ if (nextLine == null) {
+ break;
+ }
+ currentIndex++;
+ StringTokenizer stringTokenizer = new StringTokenizer(nextLine, DELIMITER);
+ Object[] row = new Object[stringTokenizer.countTokens()];
+ for (int j = 0; j < row.length; j++) {
+ row[j] = stringTokenizer.nextToken();
+ }
+ results.add(row);
+ }
+ return results;
+ }
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataJPABulkExportSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataJPABulkExportSource.java
new file mode 100644
index 0000000..fde9096
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataJPABulkExportSource.java
@@ -0,0 +1,58 @@
+package org.rhq.server.metrics.migrator;
+
+import java.io.BufferedWriter;
+import java.io.FileWriter;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import javax.persistence.EntityManager;
+
+import org.hibernate.Session;
+import org.hibernate.engine.spi.SessionFactoryImplementor;
+import org.hibernate.service.jdbc.connections.spi.ConnectionProvider;
+
+import org.rhq.core.util.jdbc.JDBCUtil;
+import org.rhq.core.util.stream.StreamUtil;
+
+/**
+ * @author Thomas Segismont
+ */
+public class ExistingDataJPABulkExportSource extends ExistingDataBulkExportSource {
+
+ private EntityManager entityManager;
+ private String selectNativeQuery;
+
+ public ExistingDataJPABulkExportSource(EntityManager entityManager, String selectNativeQuery) {
+ super();
+ }
+
+ protected void exportExistingData() throws Exception {
+ BufferedWriter fileWriter = null;
+ Connection connection = null;
+ PreparedStatement statement = null;
+ ResultSet resultSet = null;
+ try {
+ fileWriter = new BufferedWriter(new FileWriter(getExistingDataFile()), IO_BUFFER_SIZE);
+ Session session = (Session) this.entityManager.getDelegate();
+ SessionFactoryImplementor sfi = (SessionFactoryImplementor) session.getSessionFactory();
+ ConnectionProvider cp = sfi.getConnectionProvider();
+ connection = cp.getConnection();
+ statement = connection.prepareStatement(this.selectNativeQuery);
+ resultSet = statement.executeQuery();
+ int columnCount = resultSet.getMetaData().getColumnCount();
+ while (resultSet.next()) {
+ for (int i = 1; i < columnCount + 1; i++) {
+ if (i > 1) {
+ fileWriter.write(DELIMITER);
+ }
+ fileWriter.write(resultSet.getString(i));
+ }
+ fileWriter.write("\n");
+ }
+ } finally {
+ StreamUtil.safeClose(fileWriter);
+ JDBCUtil.safeClose(connection, statement, resultSet);
+ }
+ }
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataJPASource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataJPASource.java
new file mode 100644
index 0000000..9aca36a
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataJPASource.java
@@ -0,0 +1,64 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.server.metrics.migrator;
+
+import java.util.List;
+
+import javax.persistence.EntityManager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * @author Thomas Segismont
+ */
+public class ExistingDataJPASource implements ExistingDataSource {
+
+ private static final Log log = LogFactory.getLog(ExistingDataJPASource.class);
+
+ private EntityManager entityManager;
+ private String selectNativeQuery;
+
+ public ExistingDataJPASource(EntityManager entityManager, String selectNativeQuery) {
+ this.entityManager = entityManager;
+ this.selectNativeQuery = selectNativeQuery;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public List<Object[]> getData(int fromIndex, int maxResults) throws Exception {
+ if (log.isDebugEnabled()) {
+ log.debug("Reading lines " + fromIndex + " to " + (fromIndex + maxResults));
+ }
+
+ return (List<Object[]>) entityManager.createNativeQuery(selectNativeQuery).setFirstResult(fromIndex)
+ .setMaxResults(maxResults).getResultList();
+ }
+
+ @Override
+ public void initialize() {
+ //nothing to do since it just implements a simple query with limits
+ }
+
+ @Override
+ public void close() {
+ //nothing to do since it just implements a simple query with limits
+ }
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataSource.java
new file mode 100644
index 0000000..0096538
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingDataSource.java
@@ -0,0 +1,34 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.server.metrics.migrator;
+
+import java.util.List;
+
+/**
+ * @author Thomas Segismont
+ */
+public interface ExistingDataSource {
+
+ List<Object[]> getData(int fromIndex, int maxResults) throws Exception;
+
+ void initialize() throws Exception;
+
+ void close() throws Exception;
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingPostgresDataBulkExportSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingPostgresDataBulkExportSource.java
new file mode 100644
index 0000000..563e332
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/migrator/ExistingPostgresDataBulkExportSource.java
@@ -0,0 +1,68 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.server.metrics.migrator;
+
+import java.io.BufferedWriter;
+import java.io.FileWriter;
+import java.sql.Connection;
+
+import javax.persistence.EntityManager;
+
+import org.hibernate.Session;
+import org.hibernate.engine.spi.SessionFactoryImplementor;
+import org.hibernate.service.jdbc.connections.spi.ConnectionProvider;
+import org.postgresql.copy.CopyManager;
+import org.postgresql.core.BaseConnection;
+
+import org.rhq.core.util.jdbc.JDBCUtil;
+import org.rhq.core.util.stream.StreamUtil;
+
+/**
+ * @author Thomas Segismont
+ */
+public class ExistingPostgresDataBulkExportSource extends ExistingDataBulkExportSource {
+
+ private EntityManager entityManager;
+ private String selectNativeQuery;
+
+ public ExistingPostgresDataBulkExportSource(EntityManager entityManager, String selectNativeQuery) {
+ super();
+ this.entityManager = entityManager;
+ this.selectNativeQuery = selectNativeQuery;
+ }
+
+ protected void exportExistingData() throws Exception {
+ BufferedWriter fileWriter = null;
+ Connection connection = null;
+ try {
+ fileWriter = new BufferedWriter(new FileWriter(getExistingDataFile()), IO_BUFFER_SIZE);
+ Session session = (Session) entityManager.getDelegate();
+ SessionFactoryImplementor sfi = (SessionFactoryImplementor) session.getSessionFactory();
+ ConnectionProvider cp = sfi.getConnectionProvider();
+ connection = cp.getConnection();
+ CopyManager copyManager = new CopyManager((BaseConnection) connection);
+ copyManager.copyOut("COPY (" + this.selectNativeQuery + ") TO STDOUT WITH DELIMITER '" + DELIMITER + "'",
+ fileWriter);
+ } finally {
+ StreamUtil.safeClose(fileWriter);
+ JDBCUtil.safeClose(connection);
+ }
+ }
+}
diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/DataSourceTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/DataSourceTest.java
new file mode 100644
index 0000000..cda04ca
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/DataSourceTest.java
@@ -0,0 +1,169 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.server.metrics;
+
+import java.util.List;
+import java.util.Properties;
+
+import javax.persistence.EntityManager;
+import javax.persistence.EntityManagerFactory;
+
+import org.apache.commons.lang.time.StopWatch;
+import org.apache.log4j.BasicConfigurator;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.hibernate.ejb.Ejb3Configuration;
+
+import org.rhq.server.metrics.migrator.ExistingDataBulkExportSource;
+import org.rhq.server.metrics.migrator.ExistingPostgresDataBulkExportSource;
+
+/**
+ * @author Thomas Segismont
+ */
+
+
+public class DataSourceTest {
+
+ //ExistingPostgresDataBulkExport
+ public static void main(String[] args) throws Exception {
+ BasicConfigurator.configure();
+ Logger.getRootLogger().setLevel(Level.INFO);
+ Logger.getLogger("org.rhq").setLevel(Level.DEBUG);
+ EntityManagerFactory entityManagerFactory = null;
+ EntityManager entityManager = null;
+ ExistingDataBulkExportSource source = null;
+ try {
+ entityManagerFactory = createEntityManager();
+ entityManager = entityManagerFactory.createEntityManager();
+ source = new ExistingPostgresDataBulkExportSource(entityManager,
+ "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D");
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ source.initialize();
+ int rowIndex = 0;
+ int maxResults = 30000;
+ for (;;) {
+ List<Object[]> existingData = source.getData(rowIndex, maxResults);
+ if (existingData.size() < maxResults) {
+ break;
+ } else {
+ rowIndex += maxResults;
+ }
+ }
+ stopWatch.stop();
+ System.out.println("Execution: " + stopWatch);
+ } finally {
+ if (source != null) {
+ source.close();
+ }
+ if (entityManager != null) {
+ entityManager.close();
+ }
+ if (entityManagerFactory != null) {
+ entityManagerFactory.close();
+ }
+ }
+ }
+
+ private static EntityManagerFactory createEntityManager() throws Exception {
+ Properties properties = new Properties();
+ properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
+ properties.put("hibernate.connection.username", "rhqadmin");
+ properties.put("hibernate.connection.password", "rhqadmin");
+ properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
+ String driverClassName = "org.postgresql.Driver";
+ try {
+ //Required to preload the driver manually.
+ //Without this the driver load will fail due to the packaging.
+ Class.forName(driverClassName);
+ } catch (ClassNotFoundException e) {
+ throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
+ }
+ properties.put("hibernate.driver_class", driverClassName);
+ properties.put("hibernate.connection.url", "jdbc:postgresql://localhost:5432/rhq");
+ Ejb3Configuration configuration = new Ejb3Configuration();
+ configuration.setProperties(properties);
+ return configuration.buildEntityManagerFactory();
+ }
+
+ //ExistingDataJPABulkExportSource
+
+ public static void main2(String[] args) throws Exception {
+ BasicConfigurator.configure();
+ Logger.getRootLogger().setLevel(Level.INFO);
+ Logger.getLogger("org.rhq").setLevel(Level.DEBUG);
+ EntityManagerFactory entityManagerFactory = null;
+ EntityManager entityManager = null;
+ ExistingDataBulkExportSource source = null;
+ try {
+ entityManagerFactory = createEntityManager();
+ entityManager = entityManagerFactory.createEntityManager();
+ source = new ExistingPostgresDataBulkExportSource(entityManager,
+ "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D");
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ source.initialize();
+ int rowIndex = 0;
+ int maxResults = 30000;
+ for (;;) {
+ List<Object[]> existingData = source.getData(rowIndex, maxResults);
+ if (existingData.size() < maxResults) {
+ break;
+ } else {
+ rowIndex += maxResults;
+ }
+ }
+ stopWatch.stop();
+ System.out.println("Execution: " + stopWatch);
+ } finally {
+ if (source != null) {
+ source.close();
+ }
+ if (entityManager != null) {
+ entityManager.close();
+ }
+ if (entityManagerFactory != null) {
+ entityManagerFactory.close();
+ }
+ }
+ }
+
+ private static EntityManagerFactory createEntityManager2() throws Exception {
+ Properties properties = new Properties();
+ properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
+ properties.put("hibernate.connection.username", "rhqadmin");
+ properties.put("hibernate.connection.password", "rhqadmin");
+ properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
+ String driverClassName = "org.postgresql.Driver";
+ try {
+ //Required to preload the driver manually.
+ //Without this the driver load will fail due to the packaging.
+ Class.forName(driverClassName);
+ } catch (ClassNotFoundException e) {
+ throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
+ }
+ properties.put("hibernate.driver_class", driverClassName);
+ properties.put("hibernate.connection.url", "jdbc:postgresql://localhost:5432/rhqdev");
+ Ejb3Configuration configuration = new Ejb3Configuration();
+ configuration.setProperties(properties);
+ return configuration.buildEntityManagerFactory();
+ }
+
+}
\ No newline at end of file
commit d3a4a1539804a69de22bfbf909ab58156d6e4204
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue Apr 30 08:17:10 2013 -0500
Revamp the way to get information about the current node. The address, native port, and cluster name are now retrieved from the actual cassandra yaml file on discovery.
diff --git a/.classpath b/.classpath
index e44a17b..62c7e54 100644
--- a/.classpath
+++ b/.classpath
@@ -365,6 +365,7 @@
<classpathentry exported="true" kind="var" path="M2_REPO/org/picketbox/picketbox/4.0.7.Final/picketbox-4.0.7.Final.jar" sourcepath="/M2_REPO/org/picketbox/picketbox/4.0.7.Final/picketbox-4.0.7.Final-sources.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/org/python/jython-standalone/2.5.2/jython-standalone-2.5.2.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/com/wordnik/swagger-annotations_2.9.1/1.1.1-SNAPSHOT/swagger-annotations_2.9.1-1.1.1-20121031.024335-6.jar"/>
+ <classpathentry exported="true" kind="var" path="M2_REPO/org/yaml/snakeyaml/1.12/snakeyaml-1.12.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/joda-time/joda-time/2.1/joda-time-2.1.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/byteman/byteman-install/1.5.2/byteman-install-1.5.2.jar" sourcepath="M2_REPO/org/jboss/byteman/byteman-install/1.5.2/byteman-install-1.5.2-sources.jar"/>
<classpathentry exported="true" kind="var" path="M2_REPO/org/jboss/byteman/byteman-submit/1.5.2/byteman-submit-1.5.2.jar" sourcepath="M2_REPO/org/jboss/byteman/byteman-submit/1.5.2/byteman-submit-1.5.2-sources.jar"/>
diff --git a/modules/plugins/cassandra/pom.xml b/modules/plugins/cassandra/pom.xml
index d13cbf4..a88d6e3 100644
--- a/modules/plugins/cassandra/pom.xml
+++ b/modules/plugins/cassandra/pom.xml
@@ -20,9 +20,14 @@
<version>${cassandra.driver.version}</version>
</dependency>
<dependency>
- <groupId>commons-logging</groupId>
- <artifactId>commons-logging</artifactId>
- <scope>provided</scope>
+ <groupId>org.yaml</groupId>
+ <artifactId>snakeyaml</artifactId>
+ <version>1.12</version>
+ </dependency>
+ <dependency>
+ <groupId>org.xerial.snappy</groupId>
+ <artifactId>snappy-java</artifactId>
+ <version>1.0.5-M3-p1</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
@@ -83,39 +88,8 @@
<artifactId>cassandra-driver-core</artifactId>
</artifactItem>
<artifactItem>
- <groupId>io.netty</groupId>
- <artifactId>netty</artifactId>
- <version>${cassandra.driver.netty.version}</version>
- </artifactItem>
- <artifactItem>
- <groupId>org.apache.cassandra</groupId>
- <artifactId>cassandra-all</artifactId>
- <version>${cassandra.version}</version>
- </artifactItem>
- <artifactItem>
- <groupId>org.apache.cassandra</groupId>
- <artifactId>cassandra-thrift</artifactId>
- <version>${cassandra.version}</version>
- </artifactItem>
- <artifactItem>
- <groupId>org.apache.thrift</groupId>
- <artifactId>libthrift</artifactId>
- <version>${cassandra.thrift.version}</version>
- </artifactItem>
- <artifactItem>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- <version>14.0.1</version>
- </artifactItem>
- <artifactItem>
- <groupId>com.yammer.metrics</groupId>
- <artifactId>metrics-core</artifactId>
- <version>2.2.0</version>
- </artifactItem>
- <artifactItem>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- <version>1.7.2</version>
+ <groupId>org.yaml</groupId>
+ <artifactId>snakeyaml</artifactId>
</artifactItem>
<artifactItem>
<groupId>org.slf4j</groupId>
@@ -127,16 +101,6 @@
<artifactId>snappy-java</artifactId>
<version>1.0.5-M3-p1</version>
</artifactItem>
- <artifactItem>
- <groupId>org.codehaus.jackson</groupId>
- <artifactId>jackson-core-asl</artifactId>
- <version>1.9.2</version>
- </artifactItem>
- <artifactItem>
- <groupId>org.codehaus.jackson</groupId>
- <artifactId>jackson-mapper-asl</artifactId>
- <version>1.9.2</version>
- </artifactItem>
</artifactItems>
</configuration>
</execution>
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
index db28117..e16fd88 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
@@ -28,14 +28,11 @@ import static org.rhq.core.domain.measurement.AvailabilityType.UP;
import static org.rhq.core.system.OperatingSystemType.WINDOWS;
import java.io.File;
-import java.util.Set;
import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.Query;
-import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.ProtocolOptions.Compression;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.SimpleAuthInfoProvider;
-import com.datastax.driver.core.querybuilder.QueryBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -45,14 +42,9 @@ import org.mc4j.ems.connection.bean.EmsBean;
import org.mc4j.ems.connection.bean.operation.EmsOperation;
import org.rhq.core.domain.configuration.Configuration;
-import org.rhq.core.domain.configuration.PropertySimple;
import org.rhq.core.domain.measurement.AvailabilityType;
-import org.rhq.core.domain.measurement.MeasurementDataTrait;
-import org.rhq.core.domain.measurement.MeasurementReport;
-import org.rhq.core.domain.measurement.MeasurementScheduleRequest;
import org.rhq.core.pluginapi.inventory.ResourceComponent;
import org.rhq.core.pluginapi.inventory.ResourceContext;
-import org.rhq.core.pluginapi.measurement.MeasurementFacet;
import org.rhq.core.pluginapi.operation.OperationFacet;
import org.rhq.core.pluginapi.operation.OperationResult;
import org.rhq.core.pluginapi.util.ProcessExecutionUtility;
@@ -67,8 +59,7 @@ import org.rhq.plugins.jmx.JMXServerComponent;
/**
* @author John Sanda
*/
-public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent<?>> implements MeasurementFacet,
- OperationFacet {
+public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent<?>> implements OperationFacet {
private Log log = LogFactory.getLog(CassandraNodeComponent.class);
@@ -78,20 +69,35 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
@Override
public void start(ResourceContext context) throws Exception {
super.start(context);
- Cluster cluster = Cluster.builder().addContactPoints(new String[] { "localhost" }).withoutMetrics()
- .withPort(getCQLPort(context.getPluginConfiguration()))
- .withAuthInfoProvider(
- new SimpleAuthInfoProvider().add("username",
- this.getResourceContext().getPluginConfiguration().getSimpleValue("username")).add("password",
- this.getResourceContext().getPluginConfiguration().getSimpleValue("password")))
- .build();
- this.cassandraSession = cluster.connect("rhq");
- };
- private int getCQLPort(Configuration pluginConfig) {
- String property = pluginConfig.getSimpleValue("nativeTransportPort", "9042");
- return Integer.parseInt(property);
- }
+ String host = context.getPluginConfiguration().getSimpleValue("host", "localhost");
+ String clusterName = context.getPluginConfiguration().getSimpleValue("clusterName", "unknown");
+ String username = context.getPluginConfiguration().getSimpleValue("username", "cassandra");
+ String password = context.getPluginConfiguration().getSimpleValue("password", "password");
+
+ Integer nativePort = 9042;
+ try {
+ nativePort = Integer.parseInt(context.getPluginConfiguration()
+ .getSimpleValue("nativeTransportPort", "9042"));
+ } catch (Exception e) {
+ log.debug("Native transport port parsing failed...", e);
+ }
+
+ try {
+ Cluster cluster = Cluster
+ .builder()
+ .addContactPoints(new String[] { host })
+ .withoutMetrics()
+ .withPort(nativePort)
+ .withCompression(Compression.NONE)
+ .withAuthInfoProvider(new SimpleAuthInfoProvider().add("username", username).add("password", password))
+ .build();
+ this.cassandraSession = cluster.connect(clusterName);
+ } catch (Exception e) {
+ log.error("Connect to Cassandra " + host + ":" + nativePort, e);
+ throw e;
+ }
+ };
@Override
public AvailabilityType getAvailability() {
@@ -209,20 +215,6 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
}
}
- //@Override
- public void getValues(MeasurementReport report, Set<MeasurementScheduleRequest> metrics) throws Exception {
- for (MeasurementScheduleRequest scheduleRequest : metrics) {
- if (scheduleRequest.getName().equals("cluster")) {
- Query q = QueryBuilder.select().from("system", "local");
- ResultSet resultSet = this.getCassandraSession().execute(q);
- if (!resultSet.isExhausted()) {
- report
- .addData(new MeasurementDataTrait(scheduleRequest, resultSet.one().getString("cluster_name")));
- }
- }
- }
- }
-
public Session getCassandraSession() {
return this.cassandraSession;
}
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
index 0b4eab2..febddf1 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
@@ -23,11 +23,17 @@
package org.rhq.plugins.cassandra;
import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStream;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.mc4j.ems.connection.support.metadata.J2SE5ConnectionTypeDescriptor;
+import org.yaml.snakeyaml.Yaml;
import org.rhq.core.domain.configuration.Configuration;
import org.rhq.core.domain.configuration.PropertySimple;
@@ -42,6 +48,8 @@ import org.rhq.plugins.jmx.JMXDiscoveryComponent;
*/
public class CassandraNodeDiscoveryComponent extends JMXDiscoveryComponent {
+ private static final Log log = LogFactory.getLog(CassandraNodeDiscoveryComponent.class);
+
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
public Set<DiscoveredResourceDetails> discoverResources(ResourceDiscoveryContext context) {
@@ -58,31 +66,83 @@ public class CassandraNodeDiscoveryComponent extends JMXDiscoveryComponent {
return details;
}
+ @SuppressWarnings({ "unchecked", "deprecation" })
private DiscoveredResourceDetails getDetails(ResourceDiscoveryContext<?> context,
ProcessScanResult processScanResult) {
ProcessInfo processInfo = processScanResult.getProcessInfo();
+
+ Configuration pluginConfig = new Configuration();
+
String jmxPort = null;
- for (String arg : processInfo.getCommandLine()) {
+
+ String[] arguments = processInfo.getCommandLine();
+ int classpathIndex = -1;
+ for (int i = 0; i < arguments.length; i++) {
+ String arg = arguments[i];
+
if (arg.startsWith("-Dcom.sun.management.jmxremote.port")) {
String[] jmxPortArg = arg.split("=");
jmxPort = jmxPortArg[1];
- break;
+ }
+ if (arg.startsWith("-cp")) {
+ classpathIndex = i;
}
}
- if (jmxPort == null) {
- return null;
+ if (classpathIndex != -1 && classpathIndex + 1 < arguments.length) {
+ String[] classpathEntries = arguments[classpathIndex + 1].split(":");
+
+ String yamlConfigurationPath = null;
+ for (String classpathEntry : classpathEntries) {
+ if (classpathEntry.endsWith("conf")) {
+ yamlConfigurationPath = processInfo.getExecutable().getCwd() + "/" + classpathEntry;
+ }
+ }
+
+ if (yamlConfigurationPath != null) {
+
+ InputStream inputStream = null;
+ try {
+ inputStream = new FileInputStream(new File(yamlConfigurationPath + "/cassandra.yaml"));
+ Yaml yaml = new Yaml();
+ Map<String, String> parsedProperties = (Map<String, String>) yaml.load(inputStream);
+
+ if (parsedProperties.get("cluster_name") != null) {
+ pluginConfig.put(new PropertySimple("clusterName", parsedProperties.get("cluster_name")));
+ }
+
+ if (parsedProperties.get("listen_address") != null) {
+ pluginConfig.put(new PropertySimple("host", parsedProperties.get("listen_address")));
+ }
+
+ if (parsedProperties.get("native_transport_port") != null) {
+ pluginConfig.put(new PropertySimple("nativeTransportPort", parsedProperties
+ .get("native_transport_port")));
+ }
+ } catch (Exception e) {
+ log.error("YAML Configuration load exception ", e);
+ } finally {
+ try {
+ if ( inputStream != null){
+ inputStream.close();
+ }
+ } catch (Exception e) {
+ log.error("Unable to close stream for yaml configuration", e);
+ }
+ }
+ }
}
- String resourceKey = "CassandraDaemon:" + jmxPort;
- String resourceName = "CassandraDaemon";
+ if (jmxPort != null) {
+ pluginConfig.put(new PropertySimple(JMXDiscoveryComponent.CONNECTION_TYPE,
+ J2SE5ConnectionTypeDescriptor.class.getName()));
+ pluginConfig.put(new PropertySimple(JMXDiscoveryComponent.CONNECTOR_ADDRESS_CONFIG_PROPERTY,
+ "service:jmx:rmi:///jndi/rmi://" + pluginConfig.getSimpleValue("host") + ":" + jmxPort + "/jmxrmi"));
+ }
- Configuration pluginConfig = new Configuration();
- pluginConfig.put(new PropertySimple(JMXDiscoveryComponent.CONNECTION_TYPE,
- J2SE5ConnectionTypeDescriptor.class.getName()));
- pluginConfig.put(new PropertySimple(JMXDiscoveryComponent.CONNECTOR_ADDRESS_CONFIG_PROPERTY,
- "service:jmx:rmi:///jndi/rmi://127.0.0.1:" + jmxPort + "/jmxrmi"));
+ String resourceKey = "Cassandra (" + pluginConfig.getSimpleValue("host") + ") " + jmxPort;
+ String resourceName = "Cassandra (" + pluginConfig.getSimpleValue("host") + ")";
String path = processInfo.getExecutable().getCwd();
pluginConfig.put(new PropertySimple("baseDir", new File(path).getParentFile().getAbsolutePath()));
diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
index cf28a8e..905d4b8 100644
--- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
@@ -45,6 +45,8 @@
<c:simple-property name="commandLine" required="false" type="string" description="the command line of the JVM at the time it was discovered - only used by JVMs with type Local; if the command line of the JVM changes, this property's value will need to be updated accordingly in order for RHQ to connect to the JVM"/>
<c:simple-property name="baseDir" displayName="Base Directory" description="The base directory from which the Cassandra Daemon was launched." required="false"/>
<c:simple-property name="nativeTransportPort" description="The port on which Cassandra listens for CQL client connections." default="9042" type="integer"/>
+ <c:simple-property name="host" description="The host on which cassandra listens to CQL client connections" default="localhost"/>
+ <c:simple-property name="clusterName" description="Cluster name" default="localhost"/>
</plugin-configuration>
<process-scan name="CassandraDaemon" query="process|basename|match=^java.*,arg|org.apache.cassandra.service.CassandraDaemon|match=.*"/>
@@ -65,9 +67,6 @@
</results>
</operation>
- <metric property="cluster" dataType="trait" description="The name of the cluster. This is used to prevent machines in one logical cluster from joining another." displayType="summary"/>
-
-
<server name="Cassandra Server JVM"
sourcePlugin="JMX" sourceType="JMX Server"
discovery="org.rhq.plugins.jmx.EmbeddedJMXServerDiscoveryComponent"
commit 3a21520a2a9f2805b14f0120f7876b30f3f3901d
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Mon Apr 29 15:23:46 2013 +0200
Add JPA bulk export source
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPABulkExportSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPABulkExportSource.java
new file mode 100644
index 0000000..d61fdf1
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPABulkExportSource.java
@@ -0,0 +1,124 @@
+package org.rhq.server.metrics;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.List;
+import java.util.Properties;
+
+import javax.persistence.EntityManager;
+import javax.persistence.EntityManagerFactory;
+
+import org.apache.commons.lang.time.StopWatch;
+import org.apache.log4j.BasicConfigurator;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.hibernate.ejb.Ejb3Configuration;
+
+import org.rhq.core.util.jdbc.JDBCUtil;
+import org.rhq.core.util.stream.StreamUtil;
+
+/**
+ * @author Thomas Segismont
+ */
+public class ExistingDataJPABulkExportSource extends ExistingDataBulkExportSource {
+
+ public ExistingDataJPABulkExportSource(EntityManager entityManager, String selectNativeQuery, File workDirectory,
+ String fileName) {
+ super(entityManager, selectNativeQuery, workDirectory, fileName);
+ }
+
+ public void exportExistingData() throws Exception {
+ BufferedWriter fileWriter = null;
+ Connection connection = null;
+ PreparedStatement statement = null;
+ ResultSet resultSet = null;
+ try {
+ fileWriter = new BufferedWriter(new FileWriter(getExistingDataFile()), IO_BUFFER_SIZE);
+ connection = getConnection();
+ statement = connection.prepareStatement(getSelectNativeQuery());
+ int columnCount = resultSet.getMetaData().getColumnCount();
+ resultSet = statement.executeQuery();
+ while (resultSet.next()) {
+ for (int i = 1; i < columnCount + 1; i++) {
+ if (i > 1) {
+ fileWriter.write(DELIMITER);
+ }
+ fileWriter.write(resultSet.getString(i));
+ }
+ fileWriter.write("\n");
+ }
+ } finally {
+ StreamUtil.safeClose(fileWriter);
+ JDBCUtil.safeClose(connection, statement, resultSet);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ BasicConfigurator.configure();
+ Logger.getRootLogger().setLevel(Level.INFO);
+ Logger.getLogger("org.rhq").setLevel(Level.DEBUG);
+ EntityManagerFactory entityManagerFactory = null;
+ EntityManager entityManager = null;
+ ExistingDataBulkExportSource source = null;
+ try {
+ entityManagerFactory = createEntityManager();
+ entityManager = entityManagerFactory.createEntityManager();
+ source = new ExistingPostgresDataBulkExportSource(entityManager,
+ "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D",
+ // "SELECT count(*) FROM RHQ_MEASUREMENT_DATA_NUM_1D limit 1"
+ new File(System.getProperty("java.io.tmpdir")), "poupoupidou.txt");
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ source.exportExistingData();
+ source.startReading();
+ int rowIndex = 0;
+ int maxResults = 30000;
+ for (;;) {
+ List<Object[]> existingData = source.getExistingData(rowIndex, maxResults);
+ if (existingData.size() < maxResults) {
+ break;
+ } else {
+ rowIndex += maxResults;
+ }
+ }
+ stopWatch.stop();
+ System.out.println("Execution: " + stopWatch);
+ } finally {
+ if (source != null) {
+ source.stopReading();
+ }
+ if (entityManager != null) {
+ entityManager.close();
+ }
+ if (entityManagerFactory != null) {
+ entityManagerFactory.close();
+ }
+ }
+ }
+
+ private static EntityManagerFactory createEntityManager() throws Exception {
+ Properties properties = new Properties();
+ properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
+ properties.put("hibernate.connection.username", "rhqadmin");
+ properties.put("hibernate.connection.password", "rhqadmin");
+ properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
+ String driverClassName = "org.postgresql.Driver";
+ try {
+ //Required to preload the driver manually.
+ //Without this the driver load will fail due to the packaging.
+ Class.forName(driverClassName);
+ } catch (ClassNotFoundException e) {
+ throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
+ }
+ properties.put("hibernate.driver_class", driverClassName);
+ properties.put("hibernate.connection.url", "jdbc:postgresql://localhost:5432/rhqdev");
+ Ejb3Configuration configuration = new Ejb3Configuration();
+ configuration.setProperties(properties);
+ return configuration.buildEntityManagerFactory();
+ }
+
+}
commit a6ae7997055dd5e508da3991c08876c3da2fc294
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Fri Apr 26 09:46:48 2013 +0200
Introduce ExistingDataSource abstraction
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataBulkExportSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataBulkExportSource.java
new file mode 100644
index 0000000..d817002
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataBulkExportSource.java
@@ -0,0 +1,129 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.server.metrics;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.StringTokenizer;
+
+import javax.persistence.EntityManager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.Session;
+import org.hibernate.engine.spi.SessionFactoryImplementor;
+import org.hibernate.service.jdbc.connections.spi.ConnectionProvider;
+
+import org.rhq.core.util.stream.StreamUtil;
+
+/**
+ * @author Thomas Segismont
+ */
+public abstract class ExistingDataBulkExportSource implements ExistingDataSource {
+
+ private static final Log LOG = LogFactory.getLog(ExistingDataBulkExportSource.class);
+
+ protected static final int IO_BUFFER_SIZE = 1024 * 64;
+
+ protected static final String DELIMITER = "|";
+
+ private EntityManager entityManager;
+
+ private String selectNativeQuery;
+
+ private File workDirectory;
+
+ private String fileName;
+
+ private File existingDataFile;
+
+ private BufferedReader existingDataFileReader;
+
+ private int currentIndex;
+
+ public ExistingDataBulkExportSource(EntityManager entityManager, String selectNativeQuery, File workDirectory,
+ String fileName) {
+ this.entityManager = entityManager;
+ this.selectNativeQuery = selectNativeQuery;
+ this.workDirectory = workDirectory;
+ this.fileName = fileName;
+ existingDataFile = new File(workDirectory, fileName);
+ }
+
+ protected String getSelectNativeQuery() {
+ return selectNativeQuery;
+ }
+
+ protected File getExistingDataFile() {
+ return existingDataFile;
+ }
+
+ protected Connection getConnection() throws SQLException {
+ Session session = (Session) entityManager.getDelegate();
+ SessionFactoryImplementor sfi = (SessionFactoryImplementor) session.getSessionFactory();
+ ConnectionProvider cp = sfi.getConnectionProvider();
+ return cp.getConnection();
+ }
+
+ public abstract void exportExistingData() throws Exception;
+
+ public void startReading() throws Exception {
+ if (!existingDataFile.exists() && !existingDataFile.isFile() && !existingDataFile.canRead()) {
+ throw new IllegalStateException();
+ }
+ existingDataFileReader = new BufferedReader(new FileReader(existingDataFile));
+ currentIndex = 0;
+ }
+
+ public void stopReading() {
+ StreamUtil.safeClose(existingDataFileReader);
+ }
+
+ @Override
+ public List<Object[]> getExistingData(int fromIndex, int maxResults) throws Exception {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Reading lines " + fromIndex + "-" + (fromIndex + maxResults));
+ }
+ if (fromIndex != currentIndex) {
+ throw new IllegalStateException();
+ }
+ List<Object[]> results = new LinkedList<Object[]>();
+ for (int i = 0; i < maxResults; i++) {
+ String nextLine = existingDataFileReader.readLine();
+ if (nextLine == null) {
+ break;
+ }
+ currentIndex++;
+ StringTokenizer stringTokenizer = new StringTokenizer(nextLine, DELIMITER);
+ Object[] row = new Object[stringTokenizer.countTokens()];
+ for (int j = 0; j < row.length; j++) {
+ row[j] = stringTokenizer.nextToken();
+ }
+ results.add(row);
+ }
+ return results;
+ }
+
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPASource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPASource.java
new file mode 100644
index 0000000..b072f58
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataJPASource.java
@@ -0,0 +1,119 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.server.metrics;
+
+import java.util.List;
+import java.util.Properties;
+
+import javax.persistence.EntityManager;
+import javax.persistence.EntityManagerFactory;
+
+import org.apache.commons.lang.time.StopWatch;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.log4j.BasicConfigurator;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.hibernate.ejb.Ejb3Configuration;
+
+/**
+ * @author Thomas Segismont
+ */
+public class ExistingDataJPASource implements ExistingDataSource {
+
+ private static final Log LOG = LogFactory.getLog(ExistingDataJPASource.class);
+
+ private EntityManager entityManager;
+
+ private String selectNativeQuery;
+
+ public ExistingDataJPASource(EntityManager entityManager, String selectNativeQuery) {
+ this.entityManager = entityManager;
+ this.selectNativeQuery = selectNativeQuery;
+ }
+
+ @Override
+ public List<Object[]> getExistingData(int fromIndex, int maxResults) throws Exception {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Reading lines " + fromIndex + "-" + (fromIndex + maxResults));
+ }
+ return entityManager.createNativeQuery(selectNativeQuery).setFirstResult(fromIndex).setMaxResults(maxResults)
+ .getResultList();
+ }
+
+ public static void main(String[] args) throws Exception {
+ BasicConfigurator.configure();
+ Logger.getRootLogger().setLevel(Level.INFO);
+ Logger.getLogger("org.rhq").setLevel(Level.DEBUG);
+ EntityManagerFactory entityManagerFactory = null;
+ EntityManager entityManager = null;
+ ExistingDataJPASource source = null;
+ try {
+ entityManagerFactory = createEntityManager();
+ entityManager = entityManagerFactory.createEntityManager();
+ source = new ExistingDataJPASource(
+ entityManager,
+ "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D");
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ int rowIndex = 0;
+ int maxResults = 30000;
+ for (; ; ) {
+ List<Object[]> existingData = source.getExistingData(rowIndex, maxResults);
+ if (existingData.size() < maxResults) {
+ break;
+ } else {
+ rowIndex += maxResults;
+ }
+ }
+ stopWatch.stop();
+ System.out.println("Execution: " + stopWatch);
+ } finally {
+ if (entityManager != null) {
+ entityManager.close();
+ }
+ if (entityManagerFactory != null) {
+ entityManagerFactory.close();
+ }
+ }
+ }
+
+ private static EntityManagerFactory createEntityManager() throws Exception {
+ Properties properties = new Properties();
+ properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
+ properties.put("hibernate.connection.username", "rhqadmin");
+ properties.put("hibernate.connection.password", "rhqadmin");
+ properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
+ String driverClassName = "org.postgresql.Driver";
+ try {
+ //Required to preload the driver manually.
+ //Without this the driver load will fail due to the packaging.
+ Class.forName(driverClassName);
+ } catch (ClassNotFoundException e) {
+ throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
+ }
+ properties.put("hibernate.driver_class", driverClassName);
+ properties.put("hibernate.connection.url", "jdbc:postgresql://localhost:5432/rhq");
+ Ejb3Configuration configuration = new Ejb3Configuration();
+ configuration.setProperties(properties);
+ return configuration.buildEntityManagerFactory();
+ }
+
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataSource.java
new file mode 100644
index 0000000..d6e0203
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingDataSource.java
@@ -0,0 +1,31 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.server.metrics;
+
+import java.util.List;
+
+/**
+ * @author Thomas Segismont
+ */
+public interface ExistingDataSource {
+
+ List<Object[]> getExistingData(int fromIndex, int maxResults) throws Exception;
+
+}
diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingPostgresDataBulkExportSource.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingPostgresDataBulkExportSource.java
new file mode 100644
index 0000000..8d049ed
--- /dev/null
+++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/ExistingPostgresDataBulkExportSource.java
@@ -0,0 +1,133 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.server.metrics;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.sql.Connection;
+import java.util.List;
+import java.util.Properties;
+
+import javax.persistence.EntityManager;
+import javax.persistence.EntityManagerFactory;
+
+import org.apache.commons.lang.time.StopWatch;
+import org.apache.log4j.BasicConfigurator;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.hibernate.ejb.Ejb3Configuration;
+import org.postgresql.copy.CopyManager;
+import org.postgresql.core.BaseConnection;
+
+import org.rhq.core.util.jdbc.JDBCUtil;
+import org.rhq.core.util.stream.StreamUtil;
+
+/**
+ * @author Thomas Segismont
+ */
+public class ExistingPostgresDataBulkExportSource extends ExistingDataBulkExportSource {
+
+ public ExistingPostgresDataBulkExportSource(EntityManager entityManager, String selectNativeQuery,
+ File workDirectory, String fileName) {
+ super(entityManager, selectNativeQuery, workDirectory, fileName);
+ }
+
+ public void exportExistingData() throws Exception {
+ BufferedWriter fileWriter = null;
+ Connection connection = null;
+ try {
+ fileWriter = new BufferedWriter(new FileWriter(getExistingDataFile()), IO_BUFFER_SIZE);
+ connection = getConnection();
+ CopyManager copyManager = new CopyManager((BaseConnection) connection);
+ copyManager.copyOut("COPY (" + getSelectNativeQuery() + ") TO STDOUT WITH DELIMITER '" + DELIMITER + "'",
+ fileWriter);
+ } finally {
+ StreamUtil.safeClose(fileWriter);
+ JDBCUtil.safeClose(connection);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ BasicConfigurator.configure();
+ Logger.getRootLogger().setLevel(Level.INFO);
+ Logger.getLogger("org.rhq").setLevel(Level.DEBUG);
+ EntityManagerFactory entityManagerFactory = null;
+ EntityManager entityManager = null;
+ ExistingDataBulkExportSource source = null;
+ try {
+ entityManagerFactory = createEntityManager();
+ entityManager = entityManagerFactory.createEntityManager();
+ source = new ExistingPostgresDataBulkExportSource(
+ entityManager,
+ "SELECT schedule_id, time_stamp, value, minvalue, maxvalue FROM RHQ_MEASUREMENT_DATA_NUM_1D",
+ // "SELECT count(*) FROM RHQ_MEASUREMENT_DATA_NUM_1D limit 1"
+ new File(System.getProperty("java.io.tmpdir")), "poupoupidou.txt");
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ source.exportExistingData();
+ source.startReading();
+ int rowIndex = 0;
+ int maxResults = 30000;
+ for (;;) {
+ List<Object[]> existingData = source.getExistingData(rowIndex, maxResults);
+ if (existingData.size() < maxResults) {
+ break;
+ } else {
+ rowIndex += maxResults;
+ }
+ }
+ stopWatch.stop();
+ System.out.println("Execution: " + stopWatch);
+ } finally {
+ if (source != null) {
+ source.stopReading();
+ }
+ if (entityManager != null) {
+ entityManager.close();
+ }
+ if (entityManagerFactory != null) {
+ entityManagerFactory.close();
+ }
+ }
+ }
+
+ private static EntityManagerFactory createEntityManager() throws Exception {
+ Properties properties = new Properties();
+ properties.put("javax.persistence.provider", "org.hibernate.ejb.HibernatePersistence");
+ properties.put("hibernate.connection.username", "rhqadmin");
+ properties.put("hibernate.connection.password", "rhqadmin");
+ properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQLDialect");
+ String driverClassName = "org.postgresql.Driver";
+ try {
+ //Required to preload the driver manually.
+ //Without this the driver load will fail due to the packaging.
+ Class.forName(driverClassName);
+ } catch (ClassNotFoundException e) {
+ throw new Exception("Postgres SQL Driver class could not be loaded. Missing class: " + driverClassName);
+ }
+ properties.put("hibernate.driver_class", driverClassName);
+ properties.put("hibernate.connection.url", "jdbc:postgresql://localhost:5432/rhq");
+ Ejb3Configuration configuration = new Ejb3Configuration();
+ configuration.setProperties(properties);
+ return configuration.buildEntityManagerFactory();
+ }
+
+}
10 years, 7 months
[rhq] modules/enterprise
by mike thompson
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/charttype/StackedBarMetricGraphImpl.java | 7 -------
modules/enterprise/gui/coregui/src/main/webapp/css/charts.css | 7 +++----
2 files changed, 3 insertions(+), 11 deletions(-)
New commits:
commit c430d28ff07adb87cccc8823a3b24d598867c462
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Tue Apr 30 14:47:27 2013 -0700
[BZ 950701] Fix SVG text fonts in d3 graphs so that they dont look fuzzy.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/charttype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/charttype/StackedBarMetricGraphImpl.java
index 2693772..d9a4205 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/charttype/StackedBarMetricGraphImpl.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/charttype/StackedBarMetricGraphImpl.java
@@ -31,13 +31,6 @@ public class StackedBarMetricGraphImpl extends AbstractGraph {
public StackedBarMetricGraphImpl() {
super();
}
- /**
- * General constructor for stacked bar graph when you have all the data needed to produce the graph. (This is true
- * for all cases but the dashboard portlet).
- */
-// public StackedBarMetricGraphImpl(MetricGraphData metricGraphData) {
-// setMetricGraphData(metricGraphData);
-// }
/**
diff --git a/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css b/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css
index 099f23a..8728026 100644
--- a/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css
+++ b/modules/enterprise/gui/coregui/src/main/webapp/css/charts.css
@@ -27,11 +27,11 @@
}
.y.axis text, .x.axis text {
- font-family: 'Liberation Sans', Arial, Helvetica, sans-serif;
+ font-family: Arial, Helvetica, sans-serif;
font-size: 10px;
font-weight: normal;
font-style: normal;
- stroke: #50505a;
+ fill: #50505a;
text-rendering: optimize-legibility;
}
@@ -47,8 +47,8 @@
.minLabel, .avgLabel, .highLabel {
font-family: Arial, Verdana, sans-serif;
font-size: 12px;
+ font-weight: bold;
text-anchor: start;
- stroke: #003168;
fill: #003168;
text-rendering: optimize-legibility;
}
@@ -57,7 +57,6 @@
font-family: Arial, Verdana, sans-serif;
font-size: 12px;
text-anchor: start;
- stroke: #003168;
fill: #003168;
text-rendering: optimize-legibility;
}
10 years, 7 months
[rhq] 2 commits - modules/enterprise
by mike thompson
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml | 8 --------
modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html | 4 +++-
modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh | 2 +-
3 files changed, 4 insertions(+), 10 deletions(-)
New commits:
commit 813c49855969171406f638a9d32112b722b99d19
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Tue Apr 30 13:37:35 2013 -0700
[BZ 955760] Syntax fix for gzip compression enablement.
diff --git a/modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh b/modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh
index 29a6d76..4e90c62 100755
--- a/modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh
+++ b/modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh
@@ -348,7 +348,7 @@ fi
# Add the JVM opts that we always want to specify, whether or not the user set RHQ_SERVER_JAVA_OPTS.
# Note that the double equals for the policy file specification IS INTENTIONAL
-_HTTP_COMPRESSION="-Dorg.apache.coyote.http11.Http11Protocol.COMPRESSION=on org.apache.coyote.http11.Http11Protocol.COMPRESSION_MIME_TYPES=text/javascript,text/css,text/html"
+_HTTP_COMPRESSION="-Dorg.apache.coyote.http11.Http11Protocol.COMPRESSION=on -Dorg.apache.coyote.http11.Http11Protocol.COMPRESSION_MIME_TYPES=text/javascript,text/css,text/html"
RHQ_SERVER_JAVA_OPTS="-Dapp.name=rhq-server ${RHQ_SERVER_JAVA_OPTS} -Drhq.server.home=${RHQ_SERVER_HOME} -Djboss.server.log.dir=${_LOG_DIR_PATH} -Djava.awt.headless=true -Dsun.lang.ClassLoader.allowArraySyntax=true -Djboss.server.default.config=standalone-full.xml -Djboss.modules.system.pkgs=org.jboss.byteman -Djava.security.manager -Djava.security.policy==${RHQ_SERVER_HOME}/bin/rhq-server.security-policy ${_HTTP_COMPRESSION} ${_JBOSS_DEBUG_LOGGING}"
debug_msg "RHQ_SERVER_JAVA_OPTS: $RHQ_SERVER_JAVA_OPTS"
commit 8c54ef01abaa6790f49f96c492fe6649a0245a6f
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Tue Apr 30 09:06:23 2013 -0700
Move jquery(1.7.2) library out of CoreGUI.gwt.xml now that we don't have GFlot dependency (GFlot was removed with addition of d3 charts).
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml
index 3d5ae84..7d50e87 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/CoreGUI.gwt.xml
@@ -46,14 +46,6 @@
<when-type-assignable class="com.google.gwt.user.client.rpc.RemoteService"/>
</generate-with>
- <!-- External javascript libraries -->
- <!-- jquery.sparkline requires jquery. We don't explicitly provide jquery here because it is already
- embedded in the GFlot JAR (the charting lib used for GraphPortlet). Furthermore, GFlot 2.4.2 requires
- the version of jquery (1.7.2).
- -->
- <script src="/coregui/js/jquery-1.7.2.min.js"/>
-
-
<!--
Limit compilation to your preferred browser(s) to speed up compile time.
diff --git a/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html b/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html
index 8722614..1b69b53 100644
--- a/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html
+++ b/modules/enterprise/gui/coregui/src/main/webapp/CoreGUI.html
@@ -15,10 +15,12 @@
<link rel="stylesheet" href="css/tipsy.css">
<link rel="stylesheet" href="css/charts.css">
- <script defer="defer" type="text/javascript" src="/coregui/js/jquery.sparkline-2.1.min.js"></script>
<script defer="defer" type="text/javascript" src="/coregui/js/rhq.js"></script>
<script defer="defer" type="text/javascript" src="/coregui/js/d3.v3.js"></script>
<script defer="defer" type="text/javascript" src="/coregui/js/nv.d3.js"></script>
+ <!-- jquery dependent js libs -->
+ <script defer="defer" type="text/javascript" src="/coregui/js/jquery-1.7.2.min.js"></script>
+ <script defer="defer" type="text/javascript" src="/coregui/js/jquery.sparkline-2.1.min.js"></script>
<script defer="defer" type="text/javascript" src="/coregui/js/jquery.tipsy.js"></script>
<script type="text/javascript">
10 years, 7 months
[rhq] modules/enterprise
by mazz
modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh | 3 ++-
modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-server-wrapper.conf | 9 ++++++---
modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-server-wrapper.inc | 6 +++---
3 files changed, 11 insertions(+), 7 deletions(-)
New commits:
commit bcda4cd317f159f335aa544604ed9a0b0b3842a8
Author: John Mazzitelli <mazz(a)redhat.com>
Date: Tue Apr 30 15:41:46 2013 -0400
BZ 955760 - pass in the http compression sysprops to the RHQ Server at startup
diff --git a/modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh b/modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh
index 42c8e45..29a6d76 100755
--- a/modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh
+++ b/modules/enterprise/server/appserver/src/main/bin-resources/bin/rhq-server.sh
@@ -348,7 +348,8 @@ fi
# Add the JVM opts that we always want to specify, whether or not the user set RHQ_SERVER_JAVA_OPTS.
# Note that the double equals for the policy file specification IS INTENTIONAL
-RHQ_SERVER_JAVA_OPTS="-Dapp.name=rhq-server ${RHQ_SERVER_JAVA_OPTS} -Drhq.server.home=${RHQ_SERVER_HOME} -Djboss.server.log.dir=${_LOG_DIR_PATH} -Djava.awt.headless=true -Dsun.lang.ClassLoader.allowArraySyntax=true -Djboss.server.default.config=standalone-full.xml -Djboss.modules.system.pkgs=org.jboss.byteman -Djava.security.manager -Djava.security.policy==${RHQ_SERVER_HOME}/bin/rhq-server.security-policy ${_JBOSS_DEBUG_LOGGING}"
+_HTTP_COMPRESSION="-Dorg.apache.coyote.http11.Http11Protocol.COMPRESSION=on org.apache.coyote.http11.Http11Protocol.COMPRESSION_MIME_TYPES=text/javascript,text/css,text/html"
+RHQ_SERVER_JAVA_OPTS="-Dapp.name=rhq-server ${RHQ_SERVER_JAVA_OPTS} -Drhq.server.home=${RHQ_SERVER_HOME} -Djboss.server.log.dir=${_LOG_DIR_PATH} -Djava.awt.headless=true -Dsun.lang.ClassLoader.allowArraySyntax=true -Djboss.server.default.config=standalone-full.xml -Djboss.modules.system.pkgs=org.jboss.byteman -Djava.security.manager -Djava.security.policy==${RHQ_SERVER_HOME}/bin/rhq-server.security-policy ${_HTTP_COMPRESSION} ${_JBOSS_DEBUG_LOGGING}"
debug_msg "RHQ_SERVER_JAVA_OPTS: $RHQ_SERVER_JAVA_OPTS"
debug_msg "RHQ_SERVER_ADDITIONAL_JAVA_OPTS: $RHQ_SERVER_ADDITIONAL_JAVA_OPTS"
diff --git a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-server-wrapper.conf b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-server-wrapper.conf
index 3780f90..c5afd1b 100644
--- a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-server-wrapper.conf
+++ b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-server-wrapper.conf
@@ -82,11 +82,14 @@ wrapper.java.additional.18=-Dsun.lang.ClassLoader.allowArraySyntax=true
wrapper.java.additional.19=-Djava.security.manager
# the double equals for the policy file specification IS INTENTIONAL
wrapper.java.additional.20="-Djava.security.policy==%RHQ_SERVER_HOME%/bin/rhq-server.security-policy"
+wrapper.java.additional.21="-Dorg.apache.coyote.http11.Http11Protocol.COMPRESSION=on"
+wrapper.java.additional.22="-Dorg.apache.coyote.http11.Http11Protocol.COMPRESSION_MIME_TYPES=text/javascript,text/css,text/html"
+
# use these if your JVM supports it
-#wrapper.java.additional.21=-XX:+TieredCompilation
-#wrapper.java.additional.22=-XX:+UseCompressedOops
+#wrapper.java.additional.23=-XX:+TieredCompilation
+#wrapper.java.additional.24=-XX:+UseCompressedOops
# Don't need these now, but this is commented out in case we need to add an endorsed dir in the future
-#wrapper.java.additional.23="-Djava.endorsed.dirs=%RHQ_SERVER_HOME%/jbossas/lib/endorsed"
+#wrapper.java.additional.25="-Djava.endorsed.dirs=%RHQ_SERVER_HOME%/jbossas/lib/endorsed"
# We want to make sure the Server starts in the JBossAS bin directory
wrapper.working.dir=%RHQ_SERVER_HOME%/jbossas/bin
diff --git a/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-server-wrapper.inc b/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-server-wrapper.inc
index b76cd2f..8094241 100644
--- a/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-server-wrapper.inc
+++ b/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-server-wrapper.inc
@@ -4,12 +4,12 @@
#
# enable remote debugging
-wrapper.java.additional.9=-agentlib:jdwp=transport=dt_socket,address=8787,server=y,suspend=n
+wrapper.java.additional.23=-agentlib:jdwp=transport=dt_socket,address=8787,server=y,suspend=n
# enable jprofiler
#set.PATH=%PATH%;<jprofiler-install-dir>\bin\windows
-#wrapper.java.additional.10=-agentlib:jprofilerti=port=8849
-#wrapper.java.additional.11=-Xbootclasspath/a:<jprofiler-install-dir>\bin\agent.jar
+#wrapper.java.additional.23=-agentlib:jprofilerti=port=8849
+#wrapper.java.additional.24=-Xbootclasspath/a:<jprofiler-install-dir>\bin\agent.jar
# disable JVM startup timeout
wrapper.startup.timeout=0
10 years, 7 months
[rhq] modules/plugins
by mazz
modules/plugins/ant-bundle/src/test/java/org/rhq/plugins/ant/AntBundlePluginComponentTest.java | 17 ++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
New commits:
commit 9a4c406425741f4dc3ecc546c27aec5d558fa37b
Author: John Mazzitelli <mazz(a)redhat.com>
Date: Tue Apr 30 14:25:36 2013 -0400
BZ 917085 fix test to hopefully run on windows
diff --git a/modules/plugins/ant-bundle/src/test/java/org/rhq/plugins/ant/AntBundlePluginComponentTest.java b/modules/plugins/ant-bundle/src/test/java/org/rhq/plugins/ant/AntBundlePluginComponentTest.java
index f0605a2..76742e8 100644
--- a/modules/plugins/ant-bundle/src/test/java/org/rhq/plugins/ant/AntBundlePluginComponentTest.java
+++ b/modules/plugins/ant-bundle/src/test/java/org/rhq/plugins/ant/AntBundlePluginComponentTest.java
@@ -530,8 +530,21 @@ public class AntBundlePluginComponentTest {
assert winDirs.size() == 1 : "should only have 1 ext backup dir on windows: " + winDirs;
backupDir = winDirs.values().iterator().next().getAbsoluteFile();
}
- File file3Backup = new File(backupDir, file3Dest.getAbsolutePath());
- File file4Backup = new File(backupDir, file4Dest.getAbsolutePath());
+
+ File file3Backup;
+ File file4Backup;
+ boolean isWindows = (File.separatorChar == '\\');
+ if (isWindows) {
+ StringBuilder file3AbsPath = new StringBuilder(file3Dest.getAbsolutePath());
+ StringBuilder file4AbsPath = new StringBuilder(file4Dest.getAbsolutePath());
+ FileUtil.stripDriveLetter(file3AbsPath);
+ FileUtil.stripDriveLetter(file4AbsPath);
+ file3Backup = new File(backupDir, file3AbsPath.toString());
+ file4Backup = new File(backupDir, file4AbsPath.toString());
+ } else {
+ file3Backup = new File(backupDir, file3Dest.getAbsolutePath());
+ file4Backup = new File(backupDir, file4Dest.getAbsolutePath());
+ }
assert file3Backup.isFile() : "should have been backed up: " + file3Backup;
assert file4Backup.isFile() : "should have been backed up: " + file4Backup;
assert (TEST3 + "modified").equals(readFile(file3Backup)) : "bad backup file: " + file3Backup;
10 years, 7 months
[rhq] Branch 'feature/cassandra-backend' - modules/common
by John Sanda
modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
New commits:
commit ca71bf11807aabf766409cc9d2a9712da0395301
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Apr 30 13:44:11 2013 -0400
make sure rhq-storage.log is written to RHQ_SERVER_HOME/logs
diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
index 39a4a64..9ea1d65 100644
--- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
+++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
@@ -213,8 +213,7 @@ public class StorageInstaller {
deploymentOptions.setCommitLogDir(commitLogDir);
deploymentOptions.setDataDir(dataDir);
deploymentOptions.setSavedCachesDir(savedCachesDir);
- // deploymentOptions.setLogFileName(logFile.getPath());
- deploymentOptions.setLogFileName(logFile.getName());
+ deploymentOptions.setLogFileName(logFile.getPath());
deploymentOptions.setLoggingLevel("INFO");
deploymentOptions.setRpcPort(rpcPort);
deploymentOptions.setJmxPort(getPort(cmdLine, "jmx-port", jmxPort));
10 years, 7 months
[rhq] Branch 'feature/cassandra-backend' - 2 commits - modules/enterprise
by John Sanda
modules/enterprise/server/appserver/pom.xml | 5 +----
modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml | 3 +--
modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java | 6 +-----
3 files changed, 3 insertions(+), 11 deletions(-)
New commits:
commit 42ca26e04cba93aa4e57e11cb43ea74eb7d7f35b
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Apr 30 12:59:24 2013 -0400
removing duplicate calls to chmod task in ant scripts
diff --git a/modules/enterprise/server/appserver/pom.xml b/modules/enterprise/server/appserver/pom.xml
index be595d5..42aa4a5 100644
--- a/modules/enterprise/server/appserver/pom.xml
+++ b/modules/enterprise/server/appserver/pom.xml
@@ -271,12 +271,9 @@
<!-- Make sure shell scripts are readable and executable. -->
<chmod perm="ug+x" verbose="true">
- <fileset dir="${deployment.dir}/bin" includes="*.sh" />
+ <fileset dir="${deployment.dir}/bin" includes="*.sh,rhqctl" />
<fileset dir="${deployment.dir}/jbossas/bin" includes="*.sh" />
</chmod>
-
- <!-- Adding explicitly the rhqctl script. -->
- <chmod perm="ug+x" verbose="true" file="${deployment.dir}/bin/rhqctl" />
</target>
</configuration>
<goals>
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
index 72a61c0..945e2c9 100644
--- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
+++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
@@ -834,7 +834,6 @@ rhq.cassandra.logging.level=${rhq.cassandra.logging.level}
<echo>Make sure shell scripts are readable and executable.</echo>
<chmod dir="${project.build.outputDirectory}/bin" includes="*.sh,rhqctl" perm="ug+x" verbose="true" />
<chmod dir="${jboss.home}/bin" includes="*.sh" perm="ug+x" verbose="true" />
- <chmod file="${project.build.outputDirectory}/bin/rhqctl" perm="ug+x" verbose="true" />
</target>
<target name="developer-release-message" if="predeploy">
commit 9763289df11a6813c6ff8594642d063723ec6060
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Apr 30 12:53:15 2013 -0400
fix start command for agent and make rhqctl script executable
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
index c84e597..72a61c0 100644
--- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
+++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
@@ -832,7 +832,7 @@ rhq.cassandra.logging.level=${rhq.cassandra.logging.level}
<target name="fix-perms">
<echo>Make sure shell scripts are readable and executable.</echo>
- <chmod dir="${project.build.outputDirectory}/bin" includes="*.sh" perm="ug+x" verbose="true" />
+ <chmod dir="${project.build.outputDirectory}/bin" includes="*.sh,rhqctl" perm="ug+x" verbose="true" />
<chmod dir="${jboss.home}/bin" includes="*.sh" perm="ug+x" verbose="true" />
<chmod file="${project.build.outputDirectory}/bin/rhqctl" perm="ug+x" verbose="true" />
</target>
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java
index cfc104d..ef25370 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java
@@ -188,11 +188,7 @@ public class Start extends ControlCommand {
log.debug("Failed to start agent service", e);
}
} else {
- String pid = getAgentPid();
-
- if (pid != null) {
- executor.execute(commandLine);
- }
+ executor.execute(commandLine);
}
}
10 years, 7 months
[rhq] Branch 'feature/cassandra-backend' - modules/enterprise
by Jiri Kremser
modules/enterprise/server/appserver/pom.xml | 3 +++
modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml | 1 +
2 files changed, 4 insertions(+)
New commits:
commit cdce8b171e886a221ee3e2fa2cd024e0944498ec
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Tue Apr 30 18:51:26 2013 +0200
Adding the executable bit for rhqctl script.
diff --git a/modules/enterprise/server/appserver/pom.xml b/modules/enterprise/server/appserver/pom.xml
index 7129c4e..be595d5 100644
--- a/modules/enterprise/server/appserver/pom.xml
+++ b/modules/enterprise/server/appserver/pom.xml
@@ -274,6 +274,9 @@
<fileset dir="${deployment.dir}/bin" includes="*.sh" />
<fileset dir="${deployment.dir}/jbossas/bin" includes="*.sh" />
</chmod>
+
+ <!-- Adding explicitly the rhqctl script. -->
+ <chmod perm="ug+x" verbose="true" file="${deployment.dir}/bin/rhqctl" />
</target>
</configuration>
<goals>
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
index 8a649bf..c84e597 100644
--- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
+++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
@@ -834,6 +834,7 @@ rhq.cassandra.logging.level=${rhq.cassandra.logging.level}
<echo>Make sure shell scripts are readable and executable.</echo>
<chmod dir="${project.build.outputDirectory}/bin" includes="*.sh" perm="ug+x" verbose="true" />
<chmod dir="${jboss.home}/bin" includes="*.sh" perm="ug+x" verbose="true" />
+ <chmod file="${project.build.outputDirectory}/bin/rhqctl" perm="ug+x" verbose="true" />
</target>
<target name="developer-release-message" if="predeploy">
10 years, 7 months
[rhq] Branch 'feature/cassandra-backend' - modules/enterprise
by Jay Shaughnessy
modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Status.java | 12 +++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
New commits:
commit dc69e64b584f0534f5af8e1efac9b29db95cffca
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Tue Apr 30 10:17:51 2013 -0400
Fix 'rhqctl status' for windows, avoid throwing exception on aganet check
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Status.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Status.java
index 478b142..b11d23f 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Status.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Status.java
@@ -31,6 +31,7 @@ import java.io.FileReader;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Options;
import org.apache.commons.exec.DefaultExecutor;
+import org.apache.commons.exec.ExecuteException;
import org.apache.commons.exec.Executor;
import org.apache.commons.exec.PumpStreamHandler;
@@ -159,6 +160,15 @@ public class Status extends ControlCommand {
Executor executor = new DefaultExecutor();
executor.setWorkingDirectory(agentBinDir);
executor.setStreamHandler(new PumpStreamHandler());
- executor.execute(commandLine);
+ try {
+ executor.execute(commandLine);
+ } catch (ExecuteException e) {
+ // For windows the JSW exit code for a status check is expected to be a mask value and the agent wrapper
+ // .bat will return it explicitly. We can ignore it and assume that the logged output is sufficient.
+ // See http://wrapper.tanukisoftware.com/doc/english/launch-win.html#standalone-...
+ if (!isWindows()) {
+ throw e;
+ }
+ }
}
}
10 years, 7 months
[rhq] modules/enterprise
by mazz
modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java | 5 +----
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java | 5 +----
2 files changed, 2 insertions(+), 8 deletions(-)
New commits:
commit 03caa3d61d59d2e041750b0581a31098acf61f10
Author: John Mazzitelli <mazz(a)redhat.com>
Date: Tue Apr 30 09:47:25 2013 -0400
these methods' @TranAttrib annotations are unused. Also, spoke to lukas, he said these methods should be made private
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java
index bfe9f32..6689de5 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/StrippedDownStartupBean.java
@@ -21,8 +21,6 @@
package org.rhq.enterprise.server.test;
import javax.ejb.Singleton;
-import javax.ejb.TransactionAttribute;
-import javax.ejb.TransactionAttributeType;
import org.rhq.enterprise.server.core.StartupBean;
import org.rhq.enterprise.server.naming.NamingHack;
@@ -34,8 +32,7 @@ import org.rhq.enterprise.server.naming.NamingHack;
@Singleton
public class StrippedDownStartupBean {
- @TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
- public void secureNaming() {
+ private void secureNaming() {
NamingHack.bruteForceInitialContextFactoryBuilder();
}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java
index 5b5aa8c..90ffbaf 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/core/StartupBean.java
@@ -146,11 +146,8 @@ public class StartupBean implements StartupLocal {
/**
* Modifies the naming subsystem to be able to check for Java security permissions on JNDI lookup.
- * <p>
- * Made public so that this can be reused in tests.
*/
- @TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
- public void secureNaming() {
+ private void secureNaming() {
NamingHack.bruteForceInitialContextFactoryBuilder();
}
10 years, 7 months