dev/null |binary modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/CassandraClusterManager.java | 33 modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/DeployCluster.java | 18 modules/common/cassandra-common/pom.xml | 10 modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/cassandra-env.sh | 233 --- modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/cassandra.yaml | 627 --------- modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/log4j-server.properties | 52 modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/cassandra-env.sh | 233 +++ modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/cassandra.yaml | 645 ++++++++++ modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/log4j-server.properties | 52 modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/lib/jna-3.4.1.jar |binary modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/lib/platform-3.4.1.jar |binary modules/enterprise/server/server-metrics/pom.xml | 4 modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java | 5 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java | 16 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java | 12 modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java | 167 -- modules/enterprise/server/server-metrics/src/test/resources/log4j.properties | 6 18 files changed, 1069 insertions(+), 1044 deletions(-)
New commits: commit c72cb9eb28cd2c862ffd1f27b9b8763ea3a6d4e2 Author: John Sanda jsanda@redhat.com Date: Tue Nov 20 15:58:01 2012 -0500
move cluster init logic out of test and into CassandraClusterManager
Putting the cluster init logic in CassandraClusterManager will minimize duplication across tests.
diff --git a/modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/CassandraClusterManager.java index f156dbd..9f35d9c 100644 --- a/modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/CassandraClusterManager.java +++ b/modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/CassandraClusterManager.java @@ -30,6 +30,8 @@ import java.io.FileReader; import java.io.IOException; import java.io.StringWriter; import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -39,6 +41,8 @@ import org.testng.ITestResult;
import org.rhq.core.util.stream.StreamUtil;
+import me.prettyprint.cassandra.service.CassandraHost; + /** * @author John Sanda */ @@ -92,6 +96,24 @@ public class CassandraClusterManager implements IInvokedMethodListener { deployer.setDeploymentOptions(deploymentOptions);
deployer.deploy(); + + ClusterInitService clusterInitService = new ClusterInitService(); + List<CassandraHost> cassandraHosts = getCassandraHosts(deployer.getCassandraHosts()); + + if (annotation.waitForClusterToStart()) { + clusterInitService.waitForClusterToStart(cassandraHosts); + } + + if (annotation.waitForSchemaAgreement()) { + // TODO do not hard code cluster name + // I am ok with hard coding the cluster name for now as it is only required + // by the Hector API, and it is to be determined whether or not we will continue + // using Hector. If we wind up directly using the underlying Thrift API, there + // is no cluster name argument. + // + // jsanda + clusterInitService.waitForSchemaAgreement("rhq", cassandraHosts); + } }
private void shutdownCluster() throws Exception { @@ -113,4 +135,15 @@ public class CassandraClusterManager implements IInvokedMethodListener {
return Long.parseLong(writer.getBuffer().toString()); } + + private List<CassandraHost> getCassandraHosts(String hosts) { + List<CassandraHost> cassandraHosts = new ArrayList<CassandraHost>(); + + for (String s : hosts.split(",")) { + String[] params = s.split(":"); + cassandraHosts.add(new CassandraHost(params[0], Integer.parseInt(params[1]))); + + } + return cassandraHosts; + } } diff --git a/modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/DeployCluster.java b/modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/DeployCluster.java index ec9d80a..3b4bee4 100644 --- a/modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/DeployCluster.java +++ b/modules/common/cassandra-common-itests/src/main/java/org/rhq/cassandra/DeployCluster.java @@ -31,12 +31,30 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target;
/** + * boo! + * * @author John Sanda */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.TYPE, ElementType.METHOD }) public @interface DeployCluster {
+ /** + * @return The number of nodes in the cluster. Defaults to two. + */ int numNodes() default 2;
+ /** + * @return A flag that specifies whether or not to wait for all cluster nodes to start. + * The approach that is currently used to determine whether or a node is started is to + * open a Thrift connection to that node. This attribute defaults to true. + */ + boolean waitForClusterToStart() default true; + + /** + * @return A flag that specifies whether or not to wait for schema agreement across the + * cluster. Defaults to true. + */ + boolean waitForSchemaAgreement() default true; + } diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java index 4511760..55b148e 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java @@ -53,17 +53,9 @@ public class CassandraIntegrationTest { private DateTimeService dateTimeService;
@BeforeClass - @DeployCluster(numNodes = 2) + @DeployCluster public void deployCluster() throws CassandraException { dateTimeService = new DateTimeService(); - -// List<CassandraHost> hosts = asList(new CassandraHost("127.0.0.1", 9160), new CassandraHost("127.0.0.2", 9160)); -// List<CassandraHost> hosts = asList(new CassandraHost("127.0.0.1", 9160)); -// ClusterInitService initService = new ClusterInitService(); -// -// initService.waitForClusterToStart(hosts); -// initService.waitForSchemaAgreement("rhq", hosts); - dataSource = new CassandraDataSource("127.0.0.1", 9160, "rhq", null, null, "3.0.0"); try { connection = dataSource.getConnection(); diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java index 18724bf..f14fe1b 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java @@ -86,7 +86,6 @@ public class MetricsDAOTest extends CassandraIntegrationTest { data.add(new MeasurementDataNumeric(oneMinuteAgo.getMillis(), scheduleId, 2.6));
MetricsDAO dao = new MetricsDAO(dataSource); -// int ttl = Days.days(10).toStandardSeconds().getSeconds(); int ttl = Hours.ONE.toStandardSeconds().getSeconds(); long timestamp = System.currentTimeMillis(); Set<MeasurementDataNumeric> actualUpdates = dao.insertRawMetrics(data, ttl, timestamp);
commit 46aac51b8a097f91bd0fc7302e4dd27e0b8bf5e2 Author: John Sanda jsanda@redhat.com Date: Tue Nov 20 14:51:17 2012 -0500
fixing whitespace error in query and debugging tests
I am no longer using a TRUNCATE statement to purge tables before a test run. Not sure why but the operation takes a very long time, so long that it causes tests to fail.
diff --git a/modules/enterprise/server/server-metrics/pom.xml b/modules/enterprise/server/server-metrics/pom.xml index 0494f81..04c86e3 100644 --- a/modules/enterprise/server/server-metrics/pom.xml +++ b/modules/enterprise/server/server-metrics/pom.xml @@ -39,7 +39,7 @@ <name>RHQ Server Metrics</name>
<properties> - <cassandra.version>1.1.5</cassandra.version> + <cassandra.version>1.2.0-beta2</cassandra.version> <skipClusterShutdown>false</skipClusterShutdown> </properties>
@@ -85,7 +85,7 @@ <groupId>org.apache-extras.cassandra-jdbc</groupId> <artifactId>cassandra-jdbc</artifactId> <version>1.2.0-SNAPSHOT</version> - <scope>provided</scope> + <!--<scope>provided</scope>--> </dependency>
<dependency> diff --git a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java index fb7b1db..e083dea 100644 --- a/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java +++ b/modules/enterprise/server/server-metrics/src/main/java/org/rhq/server/metrics/MetricsDAO.java @@ -93,7 +93,8 @@ public class MetricsDAO { PreparedStatement statement = null; try { String sql = "INSERT INTO raw_metrics (schedule_id, time, value) VALUES (?, ?, ?) " + - "USING TTL " + ttl + " AND TIMESTAMP " + timestamp; + "USING TTL " + ttl; +// sql = "INSERT INTO raw_metrics (schedule_id, time, value) VALUES (?, ?, ?) "; connection = dataSource.getConnection(); statement = connection.prepareStatement(sql);
@@ -221,7 +222,7 @@ public class MetricsDAO { ResultSet resultSet = null;
String sql = - "SELECT schedule_id, time, type, value, ttl(value), writetime(value)" + + "SELECT schedule_id, time, type, value, ttl(value), writetime(value) " + "FROM " + bucket + " " + "WHERE schedule_id = " + scheduleId + " " + "ORDER BY time, type"; diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java index ac4db0f..4511760 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/CassandraIntegrationTest.java @@ -25,11 +25,9 @@
package org.rhq.server.metrics;
-import static java.util.Arrays.asList; - import java.sql.Connection; import java.sql.SQLException; -import java.util.List; +import java.sql.Statement;
import org.apache.cassandra.cql.jdbc.CassandraDataSource; import org.joda.time.DateTime; @@ -39,12 +37,9 @@ import org.testng.annotations.Listeners;
import org.rhq.cassandra.CassandraClusterManager; import org.rhq.cassandra.CassandraException; -import org.rhq.cassandra.ClusterInitService; import org.rhq.cassandra.DeployCluster; import org.rhq.cassandra.ShutdownCluster;
-import me.prettyprint.cassandra.service.CassandraHost; - /** * @author John Sanda */ @@ -58,19 +53,22 @@ public class CassandraIntegrationTest { private DateTimeService dateTimeService;
@BeforeClass - @DeployCluster + @DeployCluster(numNodes = 2) public void deployCluster() throws CassandraException { dateTimeService = new DateTimeService();
- List<CassandraHost> hosts = asList(new CassandraHost("127.0.0.1", 9160), new CassandraHost("127.0.0.2", 9160)); - ClusterInitService initService = new ClusterInitService(); - - initService.waitForClusterToStart(hosts); - initService.waitForSchemaAgreement("rhq", hosts); +// List<CassandraHost> hosts = asList(new CassandraHost("127.0.0.1", 9160), new CassandraHost("127.0.0.2", 9160)); +// List<CassandraHost> hosts = asList(new CassandraHost("127.0.0.1", 9160)); +// ClusterInitService initService = new ClusterInitService(); +// +// initService.waitForClusterToStart(hosts); +// initService.waitForSchemaAgreement("rhq", hosts);
dataSource = new CassandraDataSource("127.0.0.1", 9160, "rhq", null, null, "3.0.0"); try { connection = dataSource.getConnection(); + Statement statement = connection.createStatement(); + statement.execute("use rhq;"); } catch (SQLException e) { throw new CassandraException("Unable to get JDBC connection.", e); } diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java index 2e22d0e..18724bf 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsDAOTest.java @@ -59,10 +59,14 @@ public class MetricsDAOTest extends CassandraIntegrationTest { @BeforeMethod public void resetDB() throws Exception { Statement statement = connection.createStatement(); - statement.executeUpdate("TRUNCATE " + RAW_METRICS_TABLE); - statement.executeUpdate("TRUNCATE " + ONE_HOUR_METRICS_TABLE); - statement.executeUpdate("TRUNCATE " + METRICS_INDEX_TABLE); - statement.executeUpdate("TRUNCATE " + SIX_HOUR_METRICS_TABLE); +// statement.executeUpdate("TRUNCATE " + RAW_METRICS_TABLE); +// statement.executeUpdate("TRUNCATE " + ONE_HOUR_METRICS_TABLE); +// statement.executeUpdate("TRUNCATE " + METRICS_INDEX_TABLE); +// statement.executeUpdate("TRUNCATE " + SIX_HOUR_METRICS_TABLE); + statement.executeUpdate("DELETE FROM " + RAW_METRICS_TABLE + " WHERE schedule_id IN (123, 456)"); + statement.executeUpdate("DELETE FROM " + ONE_HOUR_METRICS_TABLE + " WHERE schedule_id IN (123, 456)"); + statement.executeUpdate("DELETE FROM " + SIX_HOUR_METRICS_TABLE + " WHERE schedule_id IN (123, 456)"); + statement.executeUpdate("DELETE FROM " + METRICS_INDEX_TABLE + " WHERE bucket IN ('raw_metrics', 'one_hour_metrics', 'six_hour_metrics')"); }
@Test @@ -82,6 +86,7 @@ public class MetricsDAOTest extends CassandraIntegrationTest { data.add(new MeasurementDataNumeric(oneMinuteAgo.getMillis(), scheduleId, 2.6));
MetricsDAO dao = new MetricsDAO(dataSource); +// int ttl = Days.days(10).toStandardSeconds().getSeconds(); int ttl = Hours.ONE.toStandardSeconds().getSeconds(); long timestamp = System.currentTimeMillis(); Set<MeasurementDataNumeric> actualUpdates = dao.insertRawMetrics(data, ttl, timestamp); diff --git a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java index 149a16d..826b8cf 100644 --- a/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java +++ b/modules/enterprise/server/server-metrics/src/test/java/org/rhq/server/metrics/MetricsServerTest.java @@ -47,8 +47,10 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set;
import org.apache.commons.logging.Log; @@ -56,10 +58,8 @@ import org.apache.commons.logging.LogFactory; import org.joda.time.DateTime; import org.joda.time.Minutes; import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Listeners; import org.testng.annotations.Test;
-import org.rhq.cassandra.CassandraClusterManager; import org.rhq.core.domain.measurement.DataType; import org.rhq.core.domain.measurement.MeasurementDataNumeric; import org.rhq.core.domain.measurement.MeasurementSchedule; @@ -72,7 +72,6 @@ import me.prettyprint.cassandra.serializers.IntegerSerializer; import me.prettyprint.cassandra.serializers.LongSerializer; import me.prettyprint.cassandra.serializers.StringSerializer; import me.prettyprint.cassandra.service.ColumnSliceIterator; -import me.prettyprint.hector.api.Cluster; import me.prettyprint.hector.api.Keyspace; import me.prettyprint.hector.api.beans.Composite; import me.prettyprint.hector.api.beans.HColumn; @@ -83,7 +82,7 @@ import me.prettyprint.hector.api.query.SliceQuery; /** * @author John Sanda */ -@Listeners({CassandraClusterManager.class}) +//@Listeners({CassandraClusterManager.class}) public class MetricsServerTest extends CassandraIntegrationTest {
private static final boolean ENABLED = false; @@ -132,24 +131,24 @@ public class MetricsServerTest extends CassandraIntegrationTest {
@BeforeMethod public void initServer() throws Exception { - Cluster cluster = HFactory.getOrCreateCluster("rhq", "127.0.0.1:9160"); - keyspace = HFactory.createKeyspace("rhq", cluster); - +// Cluster cluster = HFactory.getOrCreateCluster("rhq", "127.0.0.1:9160"); +// keyspace = HFactory.createKeyspace("rhq", cluster); +// metricsServer = new MetricsServerStub(); - metricsServer.setCluster(cluster); - metricsServer.setKeyspace(keyspace); - metricsServer.setRawMetricsDataCF(RAW_METRIC_DATA_CF); - metricsServer.setOneHourMetricsDataCF(ONE_HOUR_METRIC_DATA_CF); - metricsServer.setSixHourMetricsDataCF(SIX_HOUR_METRIC_DATA_CF); - metricsServer.setTwentyFourHourMetricsDataCF(TWENTY_FOUR_HOUR_METRIC_DATA_CF); - metricsServer.setMetricsIndex(METRICS_INDEX); - metricsServer.setTraitsCF(TRAITS_CF); - metricsServer.setResourceTraitsCF(RESOURCE_TRAITS_CF); +// metricsServer.setCluster(cluster); +// metricsServer.setKeyspace(keyspace); +// metricsServer.setRawMetricsDataCF(RAW_METRIC_DATA_CF); +// metricsServer.setOneHourMetricsDataCF(ONE_HOUR_METRIC_DATA_CF); +// metricsServer.setSixHourMetricsDataCF(SIX_HOUR_METRIC_DATA_CF); +// metricsServer.setTwentyFourHourMetricsDataCF(TWENTY_FOUR_HOUR_METRIC_DATA_CF); +// metricsServer.setMetricsIndex(METRICS_INDEX); +// metricsServer.setTraitsCF(TRAITS_CF); +// metricsServer.setResourceTraitsCF(RESOURCE_TRAITS_CF); metricsServer.setCassandraDS(dataSource);
dao = new MetricsDAO(dataSource);
- purgeDB(); + //purgeDB(); }
private void purgeDB() throws SQLException { @@ -214,50 +213,15 @@ public class MetricsServerTest extends CassandraIntegrationTest { DateTime secondMetricTime = hour6.minusMinutes(2); DateTime thirdMetricTime = hour6.minusMinutes(1);
- String scheduleName = getClass().getName() + "_SCHEDULE"; - long interval = MINUTE * 15; - boolean enabled = true; - DataType dataType = DataType.MEASUREMENT; - MeasurementScheduleRequest request = new MeasurementScheduleRequest(scheduleId, scheduleName, interval, - enabled, dataType); - Set<MeasurementDataNumeric> data = new HashSet<MeasurementDataNumeric>(); - data.add(new MeasurementDataNumeric(firstMetricTime.getMillis(), request, 3.2)); - data.add(new MeasurementDataNumeric(secondMetricTime.getMillis(), request, 3.9)); - data.add(new MeasurementDataNumeric(thirdMetricTime.getMillis(), request, 2.6)); + data.add(new MeasurementDataNumeric(firstMetricTime.getMillis(), scheduleId, 3.2)); + data.add(new MeasurementDataNumeric(secondMetricTime.getMillis(), scheduleId, 3.9)); + data.add(new MeasurementDataNumeric(thirdMetricTime.getMillis(), scheduleId, 2.6));
metricsServer.setCurrentHour(hour6); metricsServer.addNumericData(data); metricsServer.calculateAggregates();
- // verify one hour metric data is calculated - // The ttl for 1 hour data is 14 days. -// int ttl = Days.days(14).toStandardSeconds().getSeconds(); -// List<HColumn<Composite, Double>> expected1HourData = asList( -// HFactory.createColumn(createAggregateKey(lastHour, AggregateType.MAX), 3.9, ttl, CompositeSerializer.get(), -// DoubleSerializer.get()), -// HFactory.createColumn(createAggregateKey(lastHour, AggregateType.MIN), 2.6, ttl, CompositeSerializer.get(), -// DoubleSerializer.get()), -// HFactory.createColumn(createAggregateKey(lastHour, AggregateType.AVG), (3.9 + 3.2 + 2.6) / 3, ttl, -// CompositeSerializer.get(), DoubleSerializer.get()) -// ); -// -// assert1HourDataEquals(scheduleId, expected1HourData); - - // verify six hour metric data is calculated - // the ttl for 6 hour data is 31 days -// ttl = Days.days(31).toStandardSeconds().getSeconds(); -// List<HColumn<Composite, Double>> expected6HourData = asList( -// HFactory.createColumn(createAggregateKey(hour0, AggregateType.MAX), 3.9, ttl, CompositeSerializer.get(), -// DoubleSerializer.get()), -// HFactory.createColumn(createAggregateKey(hour0, AggregateType.MIN), 2.6, ttl, CompositeSerializer.get(), -// DoubleSerializer.get()), -// HFactory.createColumn(createAggregateKey(hour0, AggregateType.AVG), (3.9 + 3.2 + 2.6) / 3, ttl, -// CompositeSerializer.get(), DoubleSerializer.get()) -// ); -// -// assert6HourDataEquals(scheduleId, expected6HourData); - // verify that one hour metric data is updated List<AggregatedNumericMetric> expected = asList(new AggregatedNumericMetric(scheduleId, divide((3.9 + 3.2 + 2.6), 3), 2.6, 3.9, lastHour.getMillis())); @@ -268,6 +232,7 @@ public class MetricsServerTest extends CassandraIntegrationTest { divide((3.9 + 3.2 + 2.6), 3), 2.6, 3.9, hour0.getMillis())));
// TODO verify that 24 hour data is *not* updated + // TODO verify metrics index for 24 hour data is updated }
@Test//(enabled = ENABLED) @@ -295,26 +260,6 @@ public class MetricsServerTest extends CassandraIntegrationTest { Set<MeasurementDataNumeric> insertedRawMetrics = dao.insertRawMetrics(rawMetrics, RAW_TTL, timestamp); metricsServer.updateMetricsIndex(insertedRawMetrics);
- // insert raw data to be aggregated -// Mutator<Integer> rawMetricsMutator = HFactory.createMutator(keyspace, IntegerSerializer.get()); -// rawMetricsMutator.addInsertion(scheduleId, RAW_METRIC_DATA_CF, createRawDataColumn(firstMetricTime, -// firstValue)); -// rawMetricsMutator.addInsertion(scheduleId, RAW_METRIC_DATA_CF, -// createRawDataColumn(secondMetricTime, secondValue)); -// rawMetricsMutator.addInsertion(scheduleId, RAW_METRIC_DATA_CF, createRawDataColumn(thirdMetricTime, -// thirdValue)); -// -// rawMetricsMutator.execute(); - - // update the one hour queue -// Mutator<String> queueMutator = HFactory.createMutator(keyspace, StringSerializer.get()); -// Composite key = createQueueColumnName(hour8, scheduleId); -// HColumn<Composite, Integer> oneHourQueueColumn = HFactory.createColumn(key, 0, CompositeSerializer.get(), -// IntegerSerializer.get()); -// queueMutator.addInsertion(ONE_HOUR_METRIC_DATA_CF, METRICS_INDEX, oneHourQueueColumn); -// -// queueMutator.execute(); - metricsServer.setCurrentHour(hour9); metricsServer.calculateAggregates();
@@ -349,7 +294,6 @@ public class MetricsServerTest extends CassandraIntegrationTest { // set up the test fixture int scheduleId = 123;
- DateTime now = new DateTime(); DateTime hour0 = hour0(); DateTime hour12 = hour0.plusHours(12); DateTime hour6 = hour0.plusHours(6); @@ -358,7 +302,6 @@ public class MetricsServerTest extends CassandraIntegrationTest {
double min1 = 1.1; double avg1 = 2.2; - //double max1 = 3.3; double max1 = 9.9;
double min2 = 4.4; @@ -366,29 +309,37 @@ public class MetricsServerTest extends CassandraIntegrationTest { double max2 = 6.6;
// insert one hour data to be aggregated - Mutator<Integer> oneHourMutator = HFactory.createMutator(keyspace, IntegerSerializer.get()); - oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour7, AggregateType.MAX, - max1)); - oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour7, AggregateType.MIN, - min1)); - oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour7, AggregateType.AVG, - avg1)); - oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour8, AggregateType.MAX, - max2)); - oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour8, AggregateType.MIN, - min2)); - oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour8, AggregateType.AVG, - avg2)); - oneHourMutator.execute(); +// Mutator<Integer> oneHourMutator = HFactory.createMutator(keyspace, IntegerSerializer.get()); +// oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour7, AggregateType.MAX, +// max1)); +// oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour7, AggregateType.MIN, +// min1)); +// oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour7, AggregateType.AVG, +// avg1)); +// oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour8, AggregateType.MAX, +// max2)); +// oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour8, AggregateType.MIN, +// min2)); +// oneHourMutator.addInsertion(scheduleId, ONE_HOUR_METRIC_DATA_CF, create1HourColumn(hour8, AggregateType.AVG, +// avg2)); +// oneHourMutator.execute(); + List<AggregatedNumericMetric> oneHourMetrics = asList( + new AggregatedNumericMetric(scheduleId, avg1, min1, max1, hour7.getMillis()), + new AggregatedNumericMetric(scheduleId, avg2, min2, max2, hour8.getMillis()) + ); + dao.insertAggregates(ONE_HOUR_METRICS_TABLE, oneHourMetrics);
// update the 6 hour queue - Mutator<String> queueMutator = HFactory.createMutator(keyspace, StringSerializer.get()); - Composite key = createQueueColumnName(hour6, scheduleId); - HColumn<Composite, Integer> sixHourQueueColumn = HFactory.createColumn(key, 0, CompositeSerializer.get(), - IntegerSerializer.get()); - queueMutator.addInsertion(SIX_HOUR_METRIC_DATA_CF, METRICS_INDEX, sixHourQueueColumn); - - queueMutator.execute(); +// Mutator<String> queueMutator = HFactory.createMutator(keyspace, StringSerializer.get()); +// Composite key = createQueueColumnName(hour6, scheduleId); +// HColumn<Composite, Integer> sixHourQueueColumn = HFactory.createColumn(key, 0, CompositeSerializer.get(), +// IntegerSerializer.get()); +// queueMutator.addInsertion(SIX_HOUR_METRIC_DATA_CF, METRICS_INDEX, sixHourQueueColumn); +// +// queueMutator.execute(); + Map<Integer, DateTime> indexUpdates = new HashMap<Integer, DateTime>(); + indexUpdates.put(scheduleId, hour6); + dao.updateMetricsIndex(SIX_HOUR_METRICS_TABLE, indexUpdates);
// execute the system under test metricsServer.setCurrentHour(hour12); @@ -396,21 +347,23 @@ public class MetricsServerTest extends CassandraIntegrationTest {
// verify the results // verify that the one hour data has been aggregated - assert6HourDataEquals(scheduleId, asList( - create6HourColumn(hour6, AggregateType.MAX, max1), - create6HourColumn(hour6, AggregateType.MIN, min1), - create6HourColumn(hour6, AggregateType.AVG, (avg1 + avg2) / 2) - )); +// assert6HourDataEquals(scheduleId, asList( +// create6HourColumn(hour6, AggregateType.MAX, max1), +// create6HourColumn(hour6, AggregateType.MIN, min1), +// create6HourColumn(hour6, AggregateType.AVG, (avg1 + avg2) / 2) +// )); + assertMetricDataEquals(SIX_HOUR_METRICS_TABLE, scheduleId, asList(new AggregatedNumericMetric(scheduleId, + divide((avg1 + avg2), 2), min1, max1, hour6.getMillis())));
// verify that the 6 hour queue has been updated - assert6HourMetricsIndexEmpty(scheduleId); +// assert6HourMetricsIndexEmpty(scheduleId);
// verify that the 24 hour queue is updated - assert24HourMetricsQueueEquals(asList(HFactory.createColumn(createQueueColumnName(hour0, scheduleId), 0, - CompositeSerializer.get(), IntegerSerializer.get()))); +// assert24HourMetricsQueueEquals(asList(HFactory.createColumn(createQueueColumnName(hour0, scheduleId), 0, +// CompositeSerializer.get(), IntegerSerializer.get())));
// verify that 6 hour data is not rolled up into the 24 hour bucket - assert24HourDataEmpty(scheduleId); +// assert24HourDataEmpty(scheduleId); }
@Test(enabled = ENABLED) diff --git a/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties b/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties index de17510..44f8c2e 100644 --- a/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties +++ b/modules/enterprise/server/server-metrics/src/test/resources/log4j.properties @@ -23,7 +23,7 @@ # */ #
-log4j.rootCategory=INFO, FILE, CONSOLE +log4j.rootCategory=DEBUG, FILE, CONSOLE
log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender log4j.appender.FILE.DatePattern='.'yyyy-MM-dd @@ -35,4 +35,6 @@ log4j.appender.FILE.Append=false
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n \ No newline at end of file +log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n + +log4j.logger.org.apache.cassandra.cql.jdbc=TRACE
commit 853f23f6fb2788b7f17e7fedd0bb89316daec336 Author: John Sanda jsanda@redhat.com Date: Tue Nov 20 14:42:15 2012 -0500
upgrade bundle to cassandra 1.2.0 beta2
diff --git a/modules/common/cassandra-common/pom.xml b/modules/common/cassandra-common/pom.xml index 97c4446..2ef4b9d 100644 --- a/modules/common/cassandra-common/pom.xml +++ b/modules/common/cassandra-common/pom.xml @@ -13,7 +13,7 @@ <name>RHQ Cassandra Common</name>
<properties> - <cassandra.version>1.2.0-beta1</cassandra.version> + <cassandra.version>1.2.0-beta2</cassandra.version> <local.repo>${settings.localRepository}</local.repo> </properties>
@@ -43,6 +43,12 @@ </dependency>
<dependency> + <groupId>org.apache.cassandra</groupId> + <artifactId>cassandra-thrift</artifactId> + <version>${cassandra.version}</version> + </dependency> + + <dependency> <groupId>org.apache.thrift</groupId> <artifactId>libthrift</artifactId> <version>0.7.0</version> @@ -87,7 +93,7 @@ <property name="cassandra.download.dir" value="${project.build.directory}/cassandra-download"/> <mkdir dir="${cassandra.download.dir}"/> - + <mkdir dir="${settings.localRepository}/org/apache/cassandra/apache-cassandra/${cassandra.version}"/> <get src="http://repo1.maven.org/maven2/org/apache/cassandra/apache-cassandra/$%7Bcass..." dest="${settings.localRepository}/org/apache/cassandra/apache-cassandra/${cassandra.version}/apache-cassandra-${cassandra.version}-bin.tar.gz" skipexisting="true" diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/cassandra-env.sh b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/cassandra-env.sh deleted file mode 100644 index 79b1369..0000000 --- a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/cassandra-env.sh +++ /dev/null @@ -1,233 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -calculate_heap_sizes() -{ - case "`uname`" in - Linux) - system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'` - system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo` - ;; - FreeBSD) - system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'` - system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` - system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` - ;; - SunOS) - system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'` - system_cpu_cores=`psrinfo | wc -l` - ;; - *) - # assume reasonable defaults for e.g. a modern desktop or - # cheap server - system_memory_in_mb="2048" - system_cpu_cores="2" - ;; - esac - - # some systems like the raspberry pi don't report cores, use at least 1 - if [ "$system_cpu_cores" -lt "1" ] - then - system_cpu_cores="1" - fi - - # set max heap size based on the following - # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB)) - # calculate 1/2 ram and cap to 1024MB - # calculate 1/4 ram and cap to 8192MB - # pick the max - half_system_memory_in_mb=`expr $system_memory_in_mb / 2` - quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2` - if [ "$half_system_memory_in_mb" -gt "1024" ] - then - half_system_memory_in_mb="1024" - fi - if [ "$quarter_system_memory_in_mb" -gt "8192" ] - then - quarter_system_memory_in_mb="8192" - fi - if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ] - then - max_heap_size_in_mb="$half_system_memory_in_mb" - else - max_heap_size_in_mb="$quarter_system_memory_in_mb" - fi - MAX_HEAP_SIZE="${max_heap_size_in_mb}M" - - # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size) - max_sensible_yg_per_core_in_mb="100" - max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores` - - desired_yg_in_mb=`expr $max_heap_size_in_mb / 4` - - if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ] - then - HEAP_NEWSIZE="${max_sensible_yg_in_mb}M" - else - HEAP_NEWSIZE="${desired_yg_in_mb}M" - fi -} - -# Determine the sort of JVM we'll be running on. - -java_ver_output=`"${JAVA:-java}" -version 2>&1` - -jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print $2}'` -JVM_VERSION=${jvmver%_*} -JVM_PATCH_VERSION=${jvmver#*_} - -jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'` -case "$jvm" in - OpenJDK) - JVM_VENDOR=OpenJDK - # this will be "64-Bit" or "32-Bit" - JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'` - ;; - "Java(TM)") - JVM_VENDOR=Oracle - # this will be "64-Bit" or "32-Bit" - JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'` - ;; - *) - # Help fill in other JVM values - JVM_VENDOR=other - JVM_ARCH=unknown - ;; -esac - - -# Override these to set the amount of memory to allocate to the JVM at -# start-up. For production use you may wish to adjust this for your -# environment. MAX_HEAP_SIZE is the total amount of memory dedicated -# to the Java heap; HEAP_NEWSIZE refers to the size of the young -# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set -# or not (if you set one, set the other). -# -# The main trade-off for the young generation is that the larger it -# is, the longer GC pause times will be. The shorter it is, the more -# expensive GC will be (usually). -# -# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause -# times. If in doubt, and if you do not particularly want to tweak, go with -# 100 MB per physical CPU core. - -#MAX_HEAP_SIZE="4G" -#HEAP_NEWSIZE="800M" - -if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" ]; then - calculate_heap_sizes -else - if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" ]; then - echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see cassandra-env.sh)" - exit 1 - fi -fi - -# Specifies the default port over which Cassandra will be available for -# JMX connections. -JMX_PORT="@@jmx.port@@" - - -# Here we create the arguments that will get passed to the jvm when -# starting cassandra. - -JVM_EXTRA_OPTS="@@cassandra.ring.delay.property@@@@cassandra.ring.delay@@" - -# enable assertions. disabling this in production will give a modest -# performance benefit (around 5%). -JVM_OPTS="$JVM_OPTS -ea" - -# add the jamm javaagent -if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" > "1.6.0" ] \ - || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION" -ge 23 ] -then - JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar" -fi - -# enable thread priorities, primarily so we can give periodic tasks -# a lower priority to avoid interfering with client workload -JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities" -# allows lowering thread priority without being root. see -# http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround.htm... -JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42" - -# min and max heap sizes should be set to the same value to avoid -# stop-the-world GC pauses during resize, and so that we can lock the -# heap in memory on startup to prevent any of it from being swapped -# out. -JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}" -JVM_OPTS="$JVM_OPTS -Xmx${MAX_HEAP_SIZE}" -JVM_OPTS="$JVM_OPTS -Xmn${HEAP_NEWSIZE}" -JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError" - -# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR -if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then - JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof" -fi - - -startswith() { [ "${1#$2}" != "$1" ]; } - -if [ "`uname`" = "Linux" ] ; then - # reduce the per-thread stack size to minimize the impact of Thrift - # thread-per-client. (Best practice is for client connections to - # be pooled anyway.) Only do so on Linux where it is known to be - # supported. - # u34 and greater need 180k - JVM_OPTS="$JVM_OPTS -Xss180k" -fi -echo "xss = $JVM_OPTS" - -# GC tuning options -JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC" -JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC" -JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled" -JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8" -JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1" -JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75" -JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly" - -# GC logging options -- uncomment to enable -# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails" -# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps" -# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC" -# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution" -# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime" -# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure" -# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1" -# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log" - -# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414 -# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414" - -# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See -# http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version: -# comment out this entry to enable IPv6 support). -JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true" - -# jmx: metrics and administration interface -# -# add this if you're having trouble connecting: -# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>" -# -# see -# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in... -# for more on configuring JMX through firewalls, etc. (Short version: -# get it working with no firewall first.) -JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT" -JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false" -JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false" -JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS" diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/cassandra.yaml b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/cassandra.yaml deleted file mode 100644 index 43779d6..0000000 --- a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/cassandra.yaml +++ /dev/null @@ -1,627 +0,0 @@ -# Cassandra storage config YAML - -# NOTE: -# See http://wiki.apache.org/cassandra/StorageConfiguration for -# full explanations of configuration directives -# /NOTE - -# The name of the cluster. This is mainly used to prevent machines in -# one logical cluster from joining another. -cluster_name: @@cluster.name@@ - -# This defines the number of tokens randomly assigned to this node on the ring -# The more tokens, relative to other nodes, the larger the proportion of data -# that this node will store. You probably want all nodes to have the same number -# of tokens assuming they have equal hardware capability. -# -# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -# and will use the initial_token as described below. -# -# Specifying initial_token will override this setting. -# -# If you already have a cluster with 1 token per node, and wish to migrate to -# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations -num_tokens: @@rhq.cassandra.node.num_tokens@@ - -# If you haven't specified num_tokens, or have set it to the default of 1 then -# you should always specify InitialToken when setting up a production -# cluster for the first time, and often when adding capacity later. -# The principle is that each node should be given an equal slice of -# the token ring; see http://wiki.apache.org/cassandra/Operations -# for more details. -# -# If blank, Cassandra will request a token bisecting the range of -# the heaviest-loaded existing node. If there is no load information -# available, such as is the case with a new cluster, it will pick -# a random token, which will lead to hot spots. -#initial_token: - -# See http://wiki.apache.org/cassandra/HintedHandoff -hinted_handoff_enabled: true -# this defines the maximum amount of time a dead host will have hints -# generated. After it has been dead this long, hints will be dropped. -max_hint_window_in_ms: 10800000 # 3 hours -# throttle in KB's per second, per delivery thread -hinted_handoff_throttle_in_kb: 1024 -# Number of threads with which to deliver hints; -# Consider increasing this number when you have multi-dc deployments, since -# cross-dc handoff tends to be slower -max_hints_delivery_threads: 2 - -# The following setting populates the page cache on memtable flush and compaction -# WARNING: Enable this setting only when the whole node's data fits in memory. -# Defaults to: false -# populate_io_cache_on_flush: false - -# authentication backend, implementing IAuthenticator; used to identify users -authenticator: org.apache.cassandra.auth.AllowAllAuthenticator - -# authorization backend, implementing IAuthority; used to limit access/provide permissions -authority: org.apache.cassandra.auth.AllowAllAuthority - -# The partitioner is responsible for distributing rows (by key) across -# nodes in the cluster. Any IPartitioner may be used, including your -# own as long as it is on the classpath. Out of the box, Cassandra -# provides org.apache.cassandra.dht.RandomPartitioner -# org.apache.cassandra.dht.ByteOrderedPartitioner, -# org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated), -# and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner -# (deprecated). -# -# - RandomPartitioner distributes rows across the cluster evenly by md5. -# When in doubt, this is the best option. -# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 -# Hash Function instead of md5 -# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows -# scanning rows in key order, but the ordering can generate hot spots -# for sequential insertion workloads. -# - OrderPreservingPartitioner is an obsolete form of BOP, that stores -# - keys in a less-efficient format and only works with keys that are -# UTF8-encoded Strings. -# - CollatingOPP colates according to EN,US rules rather than lexical byte -# ordering. Use this as an example if you need custom collation. -# -# See http://wiki.apache.org/cassandra/Operations for more on -# partitioners and token selection. -partitioner: org.apache.cassandra.dht.Murmur3Partitioner - -# directories where Cassandra should store data on disk. -data_file_directories: - - @@rhq.deploy.dir@@/@@data.dir@@ - -# commit log -commitlog_directory: @@rhq.deploy.dir@@/@@commitlog.dir@@ - -# policy for data disk failures: -# stop: shut down gossip and Thrift, leaving the node effectively dead, but -# still inspectable via JMX. -# best_effort: stop using the failed disk and respond to requests based on -# remaining available sstables. This means you WILL see obsolete -# data at CL.ONE! -# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra -disk_failure_policy: stop - -# Maximum size of the key cache in memory. -# -# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -# minimum, sometimes more. The key cache is fairly tiny for the amount of -# time it saves, so it's worthwhile to use it at large numbers. -# The row cache saves even more time, but must store the whole values of -# its rows, so it is extremely space-intensive. It's best to only use the -# row cache if you have hot rows or static rows. -# -# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. -# -# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. -key_cache_size_in_mb: - -# Duration in seconds after which Cassandra should -# safe the keys cache. Caches are saved to saved_caches_directory as -# specified in this configuration file. -# -# Saved caches greatly improve cold-start speeds, and is relatively cheap in -# terms of I/O for the key cache. Row cache saving is much more expensive and -# has limited use. -# -# Default is 14400 or 4 hours. -key_cache_save_period: 14400 - -# Number of keys from the key cache to save -# Disabled by default, meaning all keys are going to be saved -# key_cache_keys_to_save: 100 - -# Maximum size of the row cache in memory. -# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. -# -# Default value is 0, to disable row caching. -row_cache_size_in_mb: 0 - -# Duration in seconds after which Cassandra should -# safe the row cache. Caches are saved to saved_caches_directory as specified -# in this configuration file. -# -# Saved caches greatly improve cold-start speeds, and is relatively cheap in -# terms of I/O for the key cache. Row cache saving is much more expensive and -# has limited use. -# -# Default is 0 to disable saving the row cache. -row_cache_save_period: 0 - -# Number of keys from the row cache to save -# Disabled by default, meaning all keys are going to be saved -# row_cache_keys_to_save: 100 - -# The provider for the row cache to use. -# -# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider -# -# SerializingCacheProvider serialises the contents of the row and stores -# it in native memory, i.e., off the JVM Heap. Serialized rows take -# significantly less memory than "live" rows in the JVM, so you can cache -# more rows in a given memory footprint. And storing the cache off-heap -# means you can use smaller heap sizes, reducing the impact of GC pauses. -# -# It is also valid to specify the fully-qualified class name to a class -# that implements org.apache.cassandra.cache.IRowCacheProvider. -# -# Defaults to SerializingCacheProvider -row_cache_provider: SerializingCacheProvider - -# saved caches -saved_caches_directory: @@rhq.deploy.dir@@/@@saved.caches.dir@@ - -# commitlog_sync may be either "periodic" or "batch." -# When in batch mode, Cassandra won't ack writes until the commit log -# has been fsynced to disk. It will wait up to -# commitlog_sync_batch_window_in_ms milliseconds for other writes, before -# performing the sync. -# -# commitlog_sync: batch -# commitlog_sync_batch_window_in_ms: 50 -# -# the other option is "periodic" where writes may be acked immediately -# and the CommitLog is simply synced every commitlog_sync_period_in_ms -# milliseconds. -commitlog_sync: periodic -commitlog_sync_period_in_ms: 10000 - -# The size of the individual commitlog file segments. A commitlog -# segment may be archived, deleted, or recycled once all the data -# in it (potentally from each columnfamily in the system) has been -# flushed to sstables. -# -# The default size is 32, which is almost always fine, but if you are -# archiving commitlog segments (see commitlog_archiving.properties), -# then you probably want a finer granularity of archiving; 8 or 16 MB -# is reasonable. -commitlog_segment_size_in_mb: 32 - -# any class that implements the SeedProvider interface and has a -# constructor that takes a Map<String, String> of parameters will do. -seed_provider: - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: "<ip1>,<ip2>,<ip3>" - - seeds: "@@seeds@@" - -# emergency pressure valve: each time heap usage after a full (CMS) -# garbage collection is above this fraction of the max, Cassandra will -# flush the largest memtables. -# -# Set to 1.0 to disable. Setting this lower than -# CMSInitiatingOccupancyFraction is not likely to be useful. -# -# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: -# it is most effective under light to moderate load, or read-heavy -# workloads; under truly massive write load, it will often be too -# little, too late. -flush_largest_memtables_at: 0.75 - -# emergency pressure valve #2: the first time heap usage after a full -# (CMS) garbage collection is above this fraction of the max, -# Cassandra will reduce cache maximum _capacity_ to the given fraction -# of the current _size_. Should usually be set substantially above -# flush_largest_memtables_at, since that will have less long-term -# impact on the system. -# -# Set to 1.0 to disable. Setting this lower than -# CMSInitiatingOccupancyFraction is not likely to be useful. -reduce_cache_sizes_at: 0.85 -reduce_cache_capacity_to: 0.6 - -# For workloads with more data than can fit in memory, Cassandra's -# bottleneck will be reads that need to fetch data from -# disk. "concurrent_reads" should be set to (16 * number_of_drives) in -# order to allow the operations to enqueue low enough in the stack -# that the OS and drives can reorder them. -# -# On the other hand, since writes are almost never IO bound, the ideal -# number of "concurrent_writes" is dependent on the number of cores in -# your system; (8 * number_of_cores) is a good rule of thumb. -concurrent_reads: 32 -concurrent_writes: 32 - -# Total memory to use for memtables. Cassandra will flush the largest -# memtable when this much memory is used. -# If omitted, Cassandra will set it to 1/3 of the heap. -# memtable_total_space_in_mb: 2048 - -# Total space to use for commitlogs. Since commitlog segments are -# mmapped, and hence use up address space, the default size is 32 -# on 32-bit JVMs, and 1024 on 64-bit JVMs. -# -# If space gets above this value (it will round up to the next nearest -# segment multiple), Cassandra will flush every dirty CF in the oldest -# segment and remove it. So a small total commitlog space will tend -# to cause more flush activity on less-active columnfamilies. -# commitlog_total_space_in_mb: 4096 - -# This sets the amount of memtable flush writer threads. These will -# be blocked by disk io, and each one will hold a memtable in memory -# while blocked. If you have a large heap and many data directories, -# you can increase this value for better flush performance. -# By default this will be set to the amount of data directories defined. -#memtable_flush_writers: 1 - -# the number of full memtables to allow pending flush, that is, -# waiting for a writer thread. At a minimum, this should be set to -# the maximum number of secondary indexes created on a single CF. -memtable_flush_queue_size: 4 - -# Whether to, when doing sequential writing, fsync() at intervals in -# order to force the operating system to flush the dirty -# buffers. Enable this to avoid sudden dirty buffer flushing from -# impacting read latencies. Almost always a good idea on SSD:s; not -# necessarily on platters. -trickle_fsync: false -trickle_fsync_interval_in_kb: 10240 - -# TCP port, for commands and data -storage_port: 7000 - -# SSL port, for encrypted communication. Unused unless enabled in -# encryption_options -ssl_storage_port: 7001 - -# Address to bind to and tell other Cassandra nodes to connect to. You -# _must_ change this if you want multiple nodes to be able to -# communicate! -# -# Leaving it blank leaves it up to InetAddress.getLocalHost(). This -# will always do the Right Thing *if* the node is properly configured -# (hostname, name resolution, etc), and the Right Thing is to use the -# address associated with the hostname (it might not be). -# -# Setting this to 0.0.0.0 is always wrong. -listen_address: @@listen.address@@ - -# Address to broadcast to other Cassandra nodes -# Leaving this blank will set it to the same value as listen_address -# broadcast_address: 1.2.3.4 - - -# Whether to start the native transport server. -# Currently, only the thrift server is started by default because the native -# transport is considered beta. -start_native_transport: false -# The address to bind the CQL native transport to. The same remarks than for -# rpc_address applies. -native_transport_address: localhost -# port for the CQL native transport to listen for clients on -native_transport_port: 8000 -# The maximum of thread handling requests. The meaning is the same than -# rpc_max_threads. The default is unlimited. -#native_transport_max_threads: 2048 - - -# Whether to start the thrift rpc server. -start_rpc: true -# The address to bind the Thrift RPC service to -- clients connect -# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if -# you want Thrift to listen on all interfaces. -# -# Leaving this blank has the same effect it does for ListenAddress, -# (i.e. it will be based on the configured hostname of the node). -rpc_address: @@rpc.address@@ -# port for Thrift to listen for clients on -rpc_port: 9160 - -# enable or disable keepalive on rpc connections -rpc_keepalive: true - -# Cassandra provides three options for the RPC Server: -# -# sync -> One thread per thrift connection. For a very large number of clients, memory -# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size -# per thread, and that will correspond to your use of virtual memory (but physical memory -# may be limited depending on use of stack space). -# -# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled -# asynchronously using a small number of threads that does not vary with the amount -# of thrift clients (and thus scales well to many clients). The rpc requests are still -# synchronous (one thread per active request). -# -# The default is sync because on Windows hsha is about 30% slower. On Linux, -# sync/hsha performance is about the same, with hsha of course using less memory. -rpc_server_type: sync - -# Uncomment rpc_min|max_thread to set request pool size limits. -# -# Regardless of your choice of RPC server (see above), the number of maximum requests in the -# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -# RPC server, it also dictates the number of clients that can be connected at all). -# -# The default is unlimited and thus provide no protection against clients overwhelming the server. You are -# encouraged to set a maximum that makes sense for you in production, but do keep in mind that -# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. -# -# rpc_min_threads: 16 -# rpc_max_threads: 2048 - -# uncomment to set socket buffer sizes on rpc connections -# rpc_send_buff_size_in_bytes: -# rpc_recv_buff_size_in_bytes: - -# Frame size for thrift (maximum field length). -# 0 disables TFramedTransport in favor of TSocket. This option -# is deprecated; we strongly recommend using Framed mode. -thrift_framed_transport_size_in_mb: 15 - -# The max length of a thrift message, including all fields and -# internal thrift overhead. -thrift_max_message_length_in_mb: 16 - -# Set to true to have Cassandra create a hard link to each sstable -# flushed or streamed locally in a backups/ subdirectory of the -# Keyspace data. Removing these links is the operator's -# responsibility. -incremental_backups: false - -# Whether or not to take a snapshot before each compaction. Be -# careful using this option, since Cassandra won't clean up the -# snapshots for you. Mostly useful if you're paranoid when there -# is a data format change. -snapshot_before_compaction: false - -# Whether or not a snapshot is taken of the data before keyspace truncation -# or dropping of column families. The STRONGLY advised default of true -# should be used to provide data safety. If you set this flag to false, you will -# lose data on truncation or drop. -auto_snapshot: true - -# Add column indexes to a row after its contents reach this size. -# Increase if your column values are large, or if you have a very large -# number of columns. The competing causes are, Cassandra has to -# deserialize this much of the row to read a single column, so you want -# it to be small - at least if you do many partial-row reads - but all -# the index data is read for each access, so you don't want to generate -# that wastefully either. -column_index_size_in_kb: 64 - -# Size limit for rows being compacted in memory. Larger rows will spill -# over to disk and use a slower two-pass compaction process. A message -# will be logged specifying the row key. -in_memory_compaction_limit_in_mb: 64 - -# Number of simultaneous compactions to allow, NOT including -# validation "compactions" for anti-entropy repair. Simultaneous -# compactions can help preserve read performance in a mixed read/write -# workload, by mitigating the tendency of small sstables to accumulate -# during a single long running compactions. The default is usually -# fine and if you experience problems with compaction running too -# slowly or too fast, you should look at -# compaction_throughput_mb_per_sec first. -# -# This setting has no effect on LeveledCompactionStrategy. -# -# concurrent_compactors defaults to the number of cores. -# Uncomment to make compaction mono-threaded, the pre-0.8 default. -#concurrent_compactors: 1 - -# Multi-threaded compaction. When enabled, each compaction will use -# up to one thread per core, plus one thread per sstable being merged. -# This is usually only useful for SSD-based hardware: otherwise, -# your concern is usually to get compaction to do LESS i/o (see: -# compaction_throughput_mb_per_sec), not more. -multithreaded_compaction: false - -# Throttles compaction to the given total throughput across the entire -# system. The faster you insert data, the faster you need to compact in -# order to keep the sstable count down, but in general, setting this to -# 16 to 32 times the rate you are inserting data is more than sufficient. -# Setting this to 0 disables throttling. Note that this account for all types -# of compaction, including validation compaction. -compaction_throughput_mb_per_sec: 16 - -# Track cached row keys during compaction, and re-cache their new -# positions in the compacted sstable. Disable if you use really large -# key caches. -compaction_preheat_key_cache: true - -# Throttles all outbound streaming file transfers on this node to the -# given total throughput in Mbps. This is necessary because Cassandra does -# mostly sequential IO when streaming data during bootstrap or repair, which -# can lead to saturating the network connection and degrading rpc performance. -# When unset, the default is 400 Mbps or 50 MB/s. -# stream_throughput_outbound_megabits_per_sec: 400 - -# How long the coordinator should wait for read operations to complete -read_rpc_timeout_in_ms: 10000 -# How long the coordinator should wait for seq or index scans to complete -range_rpc_timeout_in_ms: 10000 -# How long the coordinator should wait for writes to complete -write_rpc_timeout_in_ms: 10000 -# How long the coordinator should wait for truncates to complete -# (This can be much longer, because we need to flush all CFs -# to make sure we can clear out anythink in the commitlog that could -# cause truncated data to reappear.) -truncate_rpc_timeout_in_ms: 300000 -# The default timeout for other, miscellaneous operations -rpc_timeout_in_ms: 10000 - -# Enable socket timeout for streaming operation. -# When a timeout occurs during streaming, streaming is retried from the start -# of the current file. This *can* involve re-streaming an important amount of -# data, so you should avoid setting the value too low. -# Default value is 0, which never timeout streams. -# streaming_socket_timeout_in_ms: 0 - -# phi value that must be reached for a host to be marked down. -# most users should never need to adjust this. -# phi_convict_threshold: 8 - -# endpoint_snitch -- Set this to a class that implements -# IEndpointSnitch. The snitch has two functions: -# - it teaches Cassandra enough about your network topology to route -# requests efficiently -# - it allows Cassandra to spread replicas around your cluster to avoid -# correlated failures. It does this by grouping machines into -# "datacenters" and "racks." Cassandra will do its best not to have -# more than one replica on the same "rack" (which may not actually -# be a physical location) -# -# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, -# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS -# ARE PLACED. -# -# Out of the box, Cassandra provides -# - SimpleSnitch: -# Treats Strategy order as proximity. This improves cache locality -# when disabling read repair, which can further improve throughput. -# Only appropriate for single-datacenter deployments. -# - PropertyFileSnitch: -# Proximity is determined by rack and data center, which are -# explicitly configured in cassandra-topology.properties. -# - GossipingPropertyFileSnitch -# The rack and datacenter for the local node are defined in -# cassandra-rackdc.properties and propagated to other nodes via gossip. If -# cassandra-topology.properties exists, it is used as a fallback, allowing -# migration from the PropertyFileSnitch. -# - RackInferringSnitch: -# Proximity is determined by rack and data center, which are -# assumed to correspond to the 3rd and 2nd octet of each node's -# IP address, respectively. Unless this happens to match your -# deployment conventions (as it did Facebook's), this is best used -# as an example of writing a custom Snitch class. -# - Ec2Snitch: -# Appropriate for EC2 deployments in a single Region. Loads Region -# and Availability Zone information from the EC2 API. The Region is -# treated as the Datacenter, and the Availability Zone as the rack. -# Only private IPs are used, so this will not work across multiple -# Regions. -# - Ec2MultiRegionSnitch: -# Uses public IPs as broadcast_address to allow cross-region -# connectivity. (Thus, you should set seed addresses to the public -# IP as well.) You will need to open the storage_port or -# ssl_storage_port on the public IP firewall. (For intra-Region -# traffic, Cassandra will switch to the private IP after -# establishing a connection.) -# -# You can use a custom Snitch by setting this to the full class name -# of the snitch, which will be assumed to be on your classpath. -endpoint_snitch: SimpleSnitch - -# controls how often to perform the more expensive part of host score -# calculation -dynamic_snitch_update_interval_in_ms: 100 -# controls how often to reset all host scores, allowing a bad host to -# possibly recover -dynamic_snitch_reset_interval_in_ms: 600000 -# if set greater than zero and read_repair_chance is < 1.0, this will allow -# 'pinning' of replicas to hosts in order to increase cache capacity. -# The badness threshold will control how much worse the pinned host has to be -# before the dynamic snitch will prefer other replicas over it. This is -# expressed as a double which represents a percentage. Thus, a value of -# 0.2 means Cassandra would continue to prefer the static snitch values -# until the pinned host was 20% worse than the fastest. -dynamic_snitch_badness_threshold: 0.1 - -# request_scheduler -- Set this to a class that implements -# RequestScheduler, which will schedule incoming client requests -# according to the specific policy. This is useful for multi-tenancy -# with a single Cassandra cluster. -# NOTE: This is specifically for requests from the client and does -# not affect inter node communication. -# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -# client requests to a node with a separate queue for each -# request_scheduler_id. The scheduler is further customized by -# request_scheduler_options as described below. -request_scheduler: org.apache.cassandra.scheduler.NoScheduler - -# Scheduler Options vary based on the type of scheduler -# NoScheduler - Has no options -# RoundRobin -# - throttle_limit -- The throttle_limit is the number of in-flight -# requests per client. Requests beyond -# that limit are queued up until -# running requests can complete. -# The value of 80 here is twice the number of -# concurrent_reads + concurrent_writes. -# - default_weight -- default_weight is optional and allows for -# overriding the default which is 1. -# - weights -- Weights are optional and will default to 1 or the -# overridden default_weight. The weight translates into how -# many requests are handled during each turn of the -# RoundRobin, based on the scheduler id. -# -# request_scheduler_options: -# throttle_limit: 80 -# default_weight: 5 -# weights: -# Keyspace1: 1 -# Keyspace2: 5 - -# request_scheduler_id -- An identifer based on which to perform -# the request scheduling. Currently the only valid option is keyspace. -# request_scheduler_id: keyspace - -# index_interval controls the sampling of entries from the primrary -# row index in terms of space versus time. The larger the interval, -# the smaller and less effective the sampling will be. In technicial -# terms, the interval coresponds to the number of index entries that -# are skipped between taking each sample. All the sampled entries -# must fit in memory. Generally, a value between 128 and 512 here -# coupled with a large key cache size on CFs results in the best trade -# offs. This value is not often changed, however if you have many -# very small rows (many to an OS page), then increasing this will -# often lower memory usage without a impact on performance. -index_interval: 128 - -# Enable or disable inter-node encryption -# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that -# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher -# suite for authentication, key exchange and encryption of the actual data transfers. -# NOTE: No custom encryption options are enabled at the moment -# The available internode options are : all, none, dc, rack -# -# If set to dc cassandra will encrypt the traffic between the DCs -# If set to rack cassandra will encrypt the traffic between the racks -# -# The passwords used in these options must match the passwords used when generating -# the keystore and truststore. For instructions on generating these files, see: -# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSE... -# -encryption_options: - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] - -# internode_compression controls whether traffic between nodes is -# compressed. -# can be: all - all traffic is compressed -# dc - traffic between different datacenters is compressed -# none - nothing is compressed. -internode_compression: all diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/log4j-server.properties b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/log4j-server.properties deleted file mode 100644 index 08ecb45..0000000 --- a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/conf/log4j-server.properties +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# for production, you should probably set pattern to %c instead of %l. -# (%l is slower.) - -# output messages into a rolling log file as well as stdout -log4j.rootLogger=@@logging.level@@,stdout,R,tracing - -# stdout -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n -log4j.appender.stdout.Threshold=@@logging.level@@ - -# rolling log file -log4j.appender.R=org.apache.log4j.RollingFileAppender -log4j.appender.R.maxFileSize=20MB -log4j.appender.R.maxBackupIndex=50 -log4j.appender.R.layout=org.apache.log4j.PatternLayout -log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n -# Edit the next line to point to your logs directory -log4j.appender.R.File=@@rhq.deploy.dir@@/@@log.dir@@/system.log -log4j.appender.R.Threshold=@@logging.level@@ - -log4j.appender.tracing=org.apache.cassandra.tracing.TracingAppender -log4j.appender.tracing.layout=org.apache.log4j.PatternLayout -log4j.appender.tracing.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n -log4j.appender.tracing.Threshold=DEBUG - - -# Application logging options -#log4j.logger.org.apache.cassandra=DEBUG -#log4j.logger.org.apache.cassandra.db=DEBUG -#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG - -# Adding this to avoid thrift logging disconnect errors. -log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR - diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/lib/jna-3.4.1.jar b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/lib/jna-3.4.1.jar deleted file mode 100644 index 4e05a4a..0000000 Binary files a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/lib/jna-3.4.1.jar and /dev/null differ diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/lib/platform-3.4.1.jar b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/lib/platform-3.4.1.jar deleted file mode 100644 index 8357d2e..0000000 Binary files a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta1/lib/platform-3.4.1.jar and /dev/null differ diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/cassandra-env.sh b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/cassandra-env.sh new file mode 100644 index 0000000..79b1369 --- /dev/null +++ b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/cassandra-env.sh @@ -0,0 +1,233 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +calculate_heap_sizes() +{ + case "`uname`" in + Linux) + system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'` + system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo` + ;; + FreeBSD) + system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + SunOS) + system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'` + system_cpu_cores=`psrinfo | wc -l` + ;; + *) + # assume reasonable defaults for e.g. a modern desktop or + # cheap server + system_memory_in_mb="2048" + system_cpu_cores="2" + ;; + esac + + # some systems like the raspberry pi don't report cores, use at least 1 + if [ "$system_cpu_cores" -lt "1" ] + then + system_cpu_cores="1" + fi + + # set max heap size based on the following + # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB)) + # calculate 1/2 ram and cap to 1024MB + # calculate 1/4 ram and cap to 8192MB + # pick the max + half_system_memory_in_mb=`expr $system_memory_in_mb / 2` + quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2` + if [ "$half_system_memory_in_mb" -gt "1024" ] + then + half_system_memory_in_mb="1024" + fi + if [ "$quarter_system_memory_in_mb" -gt "8192" ] + then + quarter_system_memory_in_mb="8192" + fi + if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ] + then + max_heap_size_in_mb="$half_system_memory_in_mb" + else + max_heap_size_in_mb="$quarter_system_memory_in_mb" + fi + MAX_HEAP_SIZE="${max_heap_size_in_mb}M" + + # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size) + max_sensible_yg_per_core_in_mb="100" + max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores` + + desired_yg_in_mb=`expr $max_heap_size_in_mb / 4` + + if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ] + then + HEAP_NEWSIZE="${max_sensible_yg_in_mb}M" + else + HEAP_NEWSIZE="${desired_yg_in_mb}M" + fi +} + +# Determine the sort of JVM we'll be running on. + +java_ver_output=`"${JAVA:-java}" -version 2>&1` + +jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print $2}'` +JVM_VERSION=${jvmver%_*} +JVM_PATCH_VERSION=${jvmver#*_} + +jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'` +case "$jvm" in + OpenJDK) + JVM_VENDOR=OpenJDK + # this will be "64-Bit" or "32-Bit" + JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'` + ;; + "Java(TM)") + JVM_VENDOR=Oracle + # this will be "64-Bit" or "32-Bit" + JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'` + ;; + *) + # Help fill in other JVM values + JVM_VENDOR=other + JVM_ARCH=unknown + ;; +esac + + +# Override these to set the amount of memory to allocate to the JVM at +# start-up. For production use you may wish to adjust this for your +# environment. MAX_HEAP_SIZE is the total amount of memory dedicated +# to the Java heap; HEAP_NEWSIZE refers to the size of the young +# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set +# or not (if you set one, set the other). +# +# The main trade-off for the young generation is that the larger it +# is, the longer GC pause times will be. The shorter it is, the more +# expensive GC will be (usually). +# +# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause +# times. If in doubt, and if you do not particularly want to tweak, go with +# 100 MB per physical CPU core. + +#MAX_HEAP_SIZE="4G" +#HEAP_NEWSIZE="800M" + +if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" ]; then + calculate_heap_sizes +else + if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" ]; then + echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see cassandra-env.sh)" + exit 1 + fi +fi + +# Specifies the default port over which Cassandra will be available for +# JMX connections. +JMX_PORT="@@jmx.port@@" + + +# Here we create the arguments that will get passed to the jvm when +# starting cassandra. + +JVM_EXTRA_OPTS="@@cassandra.ring.delay.property@@@@cassandra.ring.delay@@" + +# enable assertions. disabling this in production will give a modest +# performance benefit (around 5%). +JVM_OPTS="$JVM_OPTS -ea" + +# add the jamm javaagent +if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" > "1.6.0" ] \ + || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION" -ge 23 ] +then + JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar" +fi + +# enable thread priorities, primarily so we can give periodic tasks +# a lower priority to avoid interfering with client workload +JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities" +# allows lowering thread priority without being root. see +# http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround.htm... +JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42" + +# min and max heap sizes should be set to the same value to avoid +# stop-the-world GC pauses during resize, and so that we can lock the +# heap in memory on startup to prevent any of it from being swapped +# out. +JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}" +JVM_OPTS="$JVM_OPTS -Xmx${MAX_HEAP_SIZE}" +JVM_OPTS="$JVM_OPTS -Xmn${HEAP_NEWSIZE}" +JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError" + +# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR +if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then + JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof" +fi + + +startswith() { [ "${1#$2}" != "$1" ]; } + +if [ "`uname`" = "Linux" ] ; then + # reduce the per-thread stack size to minimize the impact of Thrift + # thread-per-client. (Best practice is for client connections to + # be pooled anyway.) Only do so on Linux where it is known to be + # supported. + # u34 and greater need 180k + JVM_OPTS="$JVM_OPTS -Xss180k" +fi +echo "xss = $JVM_OPTS" + +# GC tuning options +JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC" +JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC" +JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled" +JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8" +JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1" +JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75" +JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly" + +# GC logging options -- uncomment to enable +# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails" +# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps" +# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC" +# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution" +# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime" +# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure" +# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1" +# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log" + +# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414 +# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414" + +# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See +# http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version: +# comment out this entry to enable IPv6 support). +JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true" + +# jmx: metrics and administration interface +# +# add this if you're having trouble connecting: +# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>" +# +# see +# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in... +# for more on configuring JMX through firewalls, etc. (Short version: +# get it working with no firewall first.) +JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT" +JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false" +JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false" +JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS" diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/cassandra.yaml b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/cassandra.yaml new file mode 100644 index 0000000..f58c26a --- /dev/null +++ b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/cassandra.yaml @@ -0,0 +1,645 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: @@cluster.name@@ + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: @@rhq.cassandra.node.num_tokens@@ + +# If you haven't specified num_tokens, or have set it to the default of 1 then +# you should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +#initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, hints will be dropped. +max_hint_window_in_ms: 10800000 # 3 hours +# throttle in KB's per second, per delivery thread +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# authentication backend, implementing IAuthenticator; used to identify users +authenticator: org.apache.cassandra.auth.AllowAllAuthenticator + +# authorization backend, implementing IAuthority; used to limit access/provide permissions +authority: org.apache.cassandra.auth.AllowAllAuthority + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP colates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# directories where Cassandra should store data on disk. +data_file_directories: + - @@rhq.deploy.dir@@/@@data.dir@@ + +# commit log +commitlog_directory: @@rhq.deploy.dir@@/@@commitlog.dir@@ + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# still inspectable via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must store the whole values of +# its rows, so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# safe the keys cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: @@rhq.deploy.dir@@/@@saved.caches.dir@@ + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentally from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map<String, String> of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: "<ip1>,<ip2>,<ip3>" + - seeds: "@@seeds@@" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSD:s; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7001 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing *if* the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: @@listen.address@@ + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + + +# Whether to start the native transport server. +# Currently, only the thrift server is started by default because the native +# transport is considered beta. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: false +# port for the CQL native transport to listen for clients on +native_transport_port: 9042 +# The maximum of thread handling requests. The meaning is the same than +# rpc_max_threads. The default is unlimited. +#native_transport_max_threads: 2048 + + +# Whether to start the thrift rpc server. +start_rpc: true +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: @@rpc.address@@ +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provide no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# Keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 400 Mbps or 50 MB/s. +# stream_throughput_outbound_megabits_per_sec: 400 + +# How long the coordinator should wait for read operations to complete +read_rpc_timeout_in_ms: 10000 +# How long the coordinator should wait for seq or index scans to complete +range_rpc_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_rpc_timeout_in_ms: 10000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because we need to flush all CFs +# to make sure we can clear out anythink in the commitlog that could +# cause truncated data to reappear.) +truncate_rpc_timeout_in_ms: 300000 +# The default timeout for other, miscellaneous operations +rpc_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts, If disabled cassandra will assuming the request +# was forwarded to the replica instantly by the coordinator +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This *can* involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the Datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifer based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSE... +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# enable or disable client/server encryption. +# The available internode options are: none, all +client_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: none diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/log4j-server.properties b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/log4j-server.properties new file mode 100644 index 0000000..08ecb45 --- /dev/null +++ b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/conf/log4j-server.properties @@ -0,0 +1,52 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# for production, you should probably set pattern to %c instead of %l. +# (%l is slower.) + +# output messages into a rolling log file as well as stdout +log4j.rootLogger=@@logging.level@@,stdout,R,tracing + +# stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n +log4j.appender.stdout.Threshold=@@logging.level@@ + +# rolling log file +log4j.appender.R=org.apache.log4j.RollingFileAppender +log4j.appender.R.maxFileSize=20MB +log4j.appender.R.maxBackupIndex=50 +log4j.appender.R.layout=org.apache.log4j.PatternLayout +log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n +# Edit the next line to point to your logs directory +log4j.appender.R.File=@@rhq.deploy.dir@@/@@log.dir@@/system.log +log4j.appender.R.Threshold=@@logging.level@@ + +log4j.appender.tracing=org.apache.cassandra.tracing.TracingAppender +log4j.appender.tracing.layout=org.apache.log4j.PatternLayout +log4j.appender.tracing.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n +log4j.appender.tracing.Threshold=DEBUG + + +# Application logging options +#log4j.logger.org.apache.cassandra=DEBUG +#log4j.logger.org.apache.cassandra.db=DEBUG +#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG + +# Adding this to avoid thrift logging disconnect errors. +log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR + diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/lib/jna-3.4.1.jar b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/lib/jna-3.4.1.jar new file mode 100644 index 0000000..4e05a4a Binary files /dev/null and b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/lib/jna-3.4.1.jar differ diff --git a/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/lib/platform-3.4.1.jar b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/lib/platform-3.4.1.jar new file mode 100644 index 0000000..8357d2e Binary files /dev/null and b/modules/common/cassandra-common/src/main/resources/cassandra-1.2.0-beta2/lib/platform-3.4.1.jar differ
rhq-commits@lists.fedorahosted.org