modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java
| 111 +
modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java
| 21
modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java
| 94 +
modules/common/cassandra-util/src/test/resources/cassandra.yaml
| 690 ++++++++++
modules/plugins/cassandra/pom.xml
| 9
modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
| 128 -
modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
| 51
7 files changed, 1031 insertions(+), 73 deletions(-)
New commits:
commit 237d38ea10fe57dfb699f01cd999c3aa53391ef4
Author: John Sanda <jsanda(a)redhat.com>
Date: Sat Jul 27 12:49:30 2013 -0400
adding some initial test coverage for prepareForBootstrap operation
The prepareForBootstrap method has been refactored to use ConfigEditor but
there is still a good bit of clean up to do.
diff --git
a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java
b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java
index 679a84c..0b4a127 100644
---
a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java
+++
b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java
@@ -76,6 +76,22 @@ public class ConfigEditor {
}
}
+ public File getBackupFile() {
+ return backupFile;
+ }
+
+ public String getCommitLogDirectory() {
+ return (String) config.get("commitlog_directory");
+ }
+
+ public List<String> getDataFileDirectories() {
+ return (List<String>) config.get("data_file_directories");
+ }
+
+ public String getSavedCachesDirectory() {
+ return (String) config.get("saved_caches_directory");
+ }
+
public void setSeeds(String... seeds) {
List seedProviderList = (List) config.get("seed_provider");
Map seedProvider = (Map) seedProviderList.get(0);
diff --git a/modules/plugins/cassandra/pom.xml b/modules/plugins/cassandra/pom.xml
index da90f09..bafc8d7 100644
--- a/modules/plugins/cassandra/pom.xml
+++ b/modules/plugins/cassandra/pom.xml
@@ -36,6 +36,11 @@
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-util</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
<artifactId>rhq-core-domain</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
@@ -89,6 +94,10 @@
<outputDirectory>${project.build.outputDirectory}/lib</outputDirectory>
<artifactItems>
<artifactItem>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-util</artifactId>
+ </artifactItem>
+ <artifactItem>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-core</artifactId>
<version>${cassandra.driver.version}</version>
diff --git
a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
index 5933093..eb0b9fd 100644
---
a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
+++
b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
@@ -26,7 +26,6 @@
package org.rhq.plugins.storage;
import java.io.File;
-import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
@@ -45,9 +44,10 @@ import org.mc4j.ems.connection.EmsConnection;
import org.mc4j.ems.connection.bean.EmsBean;
import org.mc4j.ems.connection.bean.attribute.EmsAttribute;
import org.mc4j.ems.connection.bean.operation.EmsOperation;
-import org.yaml.snakeyaml.DumperOptions;
-import org.yaml.snakeyaml.Yaml;
+import org.yaml.snakeyaml.error.YAMLException;
+import org.rhq.cassandra.util.ConfigEditor;
+import org.rhq.cassandra.util.ConfigEditorException;
import org.rhq.core.domain.configuration.Configuration;
import org.rhq.core.domain.configuration.ConfigurationUpdateStatus;
import org.rhq.core.domain.configuration.Property;
@@ -277,27 +277,18 @@ public class StorageNodeComponent extends CassandraNodeComponent
implements Oper
}
try {
- FileUtil.copyFile(authFile, authBackupFile);
- } catch (IOException e) {
- String msg = "Failed to backup " + authFile + " prior to
making updates. The operation will abort due " +
- "to unexpected error";
- log.error(msg, e);
- result.setErrorMessage(msg + ": " +
ThrowableUtil.getRootMessage(e));
- return true;
- }
-
- try {
StreamUtil.copy(new StringReader(StringUtil.collectionToString(ipAddresses,
"\n")),
new FileWriter(authFile), true);
} catch (IOException e) {
log.error("An error occurred while updating " + authFile, e);
try {
+ log.info("Restoring back up file " + authBackupFile);
FileUtil.copyFile(authBackupFile, authFile);
+ authBackupFile.delete();
} catch (IOException e1) {
log.error("Failed to revert backup of " + authFile, e1);
}
- result.setErrorMessage("There was an unexpected error while updating
" + authFile + ". Make sure that " +
- "it matches " + authBackupFile + " and then reschedule the
operation.");
+ result.setErrorMessage("There was an unexpected error while updating
" + authFile);
return true;
}
return false;
@@ -325,74 +316,67 @@ public class StorageNodeComponent extends CassandraNodeComponent
implements Oper
String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration");
File yamlFile = new File(yamlProp);
- DumperOptions options = new DumperOptions();
- options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
- Yaml yaml = new Yaml(options);
-
- Map yamlConfig = null;
+ ConfigEditor configEditor = new ConfigEditor(yamlFile);
try {
- yamlConfig = (Map) yaml.load(new FileInputStream(yamlFile));
- } catch (FileNotFoundException e) {
- log.error("Failed to load " + yamlFile, e);
- log.error("Cannot prepare " + this + " for bootstrap. " +
yamlFile + " must exist in order to make the " +
- "necessary configuration changes.");
- result.setErrorMessage("Cannot prepare storage node for bootstrap. It
appears that " + yamlFile +
- " does not exist. Make sure that it exists so that the necessary
configuration changes can be made.");
+ configEditor.load();
- return result;
- }
+ purgeDir(new File(configEditor.getCommitLogDirectory()));
+ for (String dir : configEditor.getDataFileDirectories()) {
+ purgeDir(new File(dir));
+ }
+ purgeDir(new File(configEditor.getSavedCachesDirectory()));
- purgeDir(getCommitLogDir(yamlConfig));
- for (File dataDir : getDataDirs(yamlConfig)) {
- purgeDir(dataDir);
- }
- purgeDir(getSavedCachesDir(yamlConfig));
- log.info("Updating cluster settings");
+ log.info("Updating cluster settings");
- String address = pluginConfig.getSimpleValue("host");
- List<String> seeds =
getAddresses(params.getList("storageNodeIPAddresses"));
- // Make sure this node's address is not in the list; otherwise, it
- // won't bootstrap properly.
- seeds.remove(address);
- try {
- updateSeedsList(seeds);
- } catch (IOException e) {
- log.error("Failed to update seeds property in " + yamlFile, e);
- result.setErrorMessage("Failed to prepared node for bootstrap due to
unexpected error that occurred " +
- "while updating seeds property in " + yamlFile +
":\n" + ThrowableUtil.getAllMessages(e));
- return result;
- }
+ String address = pluginConfig.getSimpleValue("host");
+ int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort"));
+ int gossipPort =
Integer.parseInt(params.getSimpleValue("gossipPort"));
+ List<String> addresses =
getAddresses(params.getList("storageNodeIPAddresses"));
- if (updateAuthFile(result, new HashSet<String>(seeds))) {
- return result;
- }
+ // Make sure this node's address is not in the list; otherwise, it
+ // won't bootstrap properly.
+ List<String> seeds = new ArrayList<String>(addresses);
+ seeds.remove(address);
- int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort"));
- int gossipPort =
Integer.parseInt(params.getSimpleValue("gossipPort"));
+ configEditor.setSeeds(seeds.toArray(new String[seeds.size()]));
+ configEditor.setNativeTransportPort(cqlPort);
+ configEditor.setStoragePort(gossipPort);
- yamlConfig.put("native_transport_port", cqlPort);
- yamlConfig.put("storage_port", gossipPort);
+ configEditor.save();
- try {
- yaml.dump(yamlConfig, new FileWriter(yamlFile));
- } catch (IOException e) {
- log.error("Could not update cluster settings in " + yamlFile, e);
- result.setErrorMessage("Could not update cluster settings in " +
yamlFile + ":\n" +
- ThrowableUtil.getAllMessages(e));
- return result;
- }
+ if (updateAuthFile(result, new HashSet<String>(addresses))) {
+ return result;
+ }
- log.info(this + " is ready to be bootstrap. Restarting storage
node...");
- OperationResult startResult = startNode();
- if (startResult.getErrorMessage() != null) {
- log.error("Failed to restart storage node:\n" +
startResult.getErrorMessage());
- result.setErrorMessage("Failed to restart storage node:\n" +
startResult.getErrorMessage());
- } else {
- result.setSimpleResult("The storage node was succesfully updated is now
bootstrapping into the cluster.");
- }
+ log.info(this + " is ready to be bootstrap. Restarting storage
node...");
+ OperationResult startResult = startNode();
+ if (startResult.getErrorMessage() != null) {
+ log.error("Failed to restart storage node:\n" +
startResult.getErrorMessage());
+ result.setErrorMessage("Failed to restart storage node:\n" +
startResult.getErrorMessage());
+ } else {
+ result.setSimpleResult("The storage node was succesfully updated is
now bootstrapping into the cluster.");
+ }
- return result;
+ return result;
+ } catch (ConfigEditorException e) {
+ log.error("There was an error while trying to update " + yamlFile,
e);
+ if (e.getCause() instanceof YAMLException) {
+ log.info("Attempting to restore " + yamlFile);
+ try {
+ configEditor.restore();
+ result.setErrorMessage("Failed to update configuration file
[" + yamlFile + "]: " +
+ ThrowableUtil.getAllMessages(e.getCause()));
+ } catch (ConfigEditorException e1) {
+ log.error("Failed to restore " + yamlFile + ". A copy
of the file prior to any modifications " +
+ "can be found at " + configEditor.getBackupFile());
+ result.setErrorMessage("There was an error updating [" +
yamlFile + "] and undoing the changes " +
+ "Failed. A copy of the file can be found at " +
configEditor.getBackupFile() + ". See the " +
+ "agent logs for more details");
+ }
+ }
+ return result;
+ }
}
private void purgeDir(File dir) {
diff --git
a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
index eb4d545..d10e428 100644
---
a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
+++
b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
@@ -26,15 +26,25 @@ import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileReader;
import java.net.InetAddress;
import java.util.Set;
+import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.SeedProviderDef;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.hyperic.sigar.OperatingSystem;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
+import org.yaml.snakeyaml.Loader;
+import org.yaml.snakeyaml.TypeDescription;
+import org.yaml.snakeyaml.Yaml;
import org.rhq.cassandra.CassandraClusterManager;
import org.rhq.cassandra.ClusterInitService;
@@ -63,12 +73,15 @@ import org.rhq.core.system.ProcessExecution;
import org.rhq.core.system.ProcessExecutionResults;
import org.rhq.core.system.SystemInfo;
import org.rhq.core.system.SystemInfoFactory;
+import org.rhq.core.util.stream.StreamUtil;
/**
* @author John Sanda
*/
public class StorageNodeComponentITest {
+ private final Log log = LogFactory.getLog(StorageNodeComponentITest.class);
+
private File basedir;
private Resource storageNode;
@@ -232,7 +245,7 @@ public class StorageNodeComponentITest {
}
@Test(dependsOnMethods = "restartStorageNode")
- public void prepareForBootstrap() {
+ public void prepareForBootstrap() throws Exception {
Configuration params = Configuration.builder().addSimple("cqlPort",
9242).addSimple("gossipPort", 7200)
.openList("storageNodeIPAddresses",
"storageNodeIPAddresse").addSimples("127.0.0.1",
"127.0.0.2")
.closeList().build();
@@ -245,10 +258,22 @@ public class StorageNodeComponentITest {
OperationServicesResult result =
operationsService.invokeOperation(operationContext, "prepareForBootstrap",
params, timeout);
+ log.info("Waiting for node to boostrap...");
+ Thread.sleep(33000);
+
assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS,
"The operation failed: " +
result.getErrorStackTrace());
assertNodeIsUp("Expected " + storageNode + " to be up after the
prepareForBootstrap operation completes.");
+
+ assertThatInternodeAuthConfFileMatches("127.0.0.1",
"127.0.0.2");
+
+ File confDir = new File(basedir, "conf");
+ File cassandraYamlFile = new File(confDir, "cassandra.yaml");
+ Config config = loadConfig(cassandraYamlFile);
+
+ assertEquals(config.seed_provider.parameters.get("seeds"),
"127.0.0.2", "Failed to update seeds " +
+ "property in " + cassandraYamlFile);
}
private void assertNodeIsUp(String msg) {
@@ -292,4 +317,28 @@ public class StorageNodeComponentITest {
return type.getPlugin().equals("RHQStorage") &&
type.getName().equals("RHQ Storage Node");
}
+ private void assertThatInternodeAuthConfFileMatches(String... addresses) throws
Exception {
+ File confDir = new File(basedir, "conf");
+ File internodeAuthConfFile = new File(confDir,
"rhq-storage-auth.conf");
+ String contents = StreamUtil.slurp(new FileReader(internodeAuthConfFile));
+
+ Set<String> expected = ImmutableSet.copyOf(addresses);
+ Set<String> actual = ImmutableSet.copyOf(contents.split("\n"));
+
+ assertEquals(actual, expected, "Failed to update internode authentication
conf file " +
+ internodeAuthConfFile + ".");
+ }
+
+ private Config loadConfig(File configFile) throws Exception {
+ FileInputStream inputStream = new FileInputStream(configFile);
+ org.yaml.snakeyaml.constructor.Constructor constructor =
+ new org.yaml.snakeyaml.constructor.Constructor(Config.class);
+ TypeDescription seedDesc = new TypeDescription(SeedProviderDef.class);
+ seedDesc.putMapPropertyType("parameters", String.class, String.class);
+ constructor.addTypeDescription(seedDesc);
+ Yaml yaml = new Yaml(new Loader(constructor));
+
+ return (Config) yaml.load(inputStream);
+ }
+
}
commit 30c6d6678edbb44bd7b07db0cd2855b9f1790aba
Author: John Sanda <jsanda(a)redhat.com>
Date: Sat Jul 27 10:14:18 2013 -0400
initial commit for ConfigEditor which handles updating cassandra.yaml
diff --git
a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java
b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java
new file mode 100644
index 0000000..679a84c
--- /dev/null
+++
b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditor.java
@@ -0,0 +1,95 @@
+package org.rhq.cassandra.util;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import org.yaml.snakeyaml.DumperOptions;
+import org.yaml.snakeyaml.Yaml;
+
+import org.rhq.core.util.StringUtil;
+import org.rhq.core.util.file.FileUtil;
+
+/**
+ * @author John Sanda
+ */
+public class ConfigEditor {
+
+ private File configFile;
+
+ private File backupFile;
+
+ private Yaml yaml;
+
+ private Map config;
+
+ public ConfigEditor(File cassandraYamlFile) {
+ configFile = cassandraYamlFile;
+ }
+
+ public void load() {
+ try {
+ DumperOptions options = new DumperOptions();
+ options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
+ yaml = new Yaml(options);
+ config = (Map) yaml.load(new FileInputStream(configFile));
+ createBackup();
+ } catch (FileNotFoundException e) {
+ throw new ConfigEditorException("Failed to load " + configFile,
e);
+ }
+ }
+
+ public void save() {
+ try {
+ yaml.dump(config, new FileWriter(configFile));
+ backupFile.delete();
+ yaml = null;
+ config = null;
+ backupFile = null;
+ } catch (Exception e) {
+ throw new ConfigEditorException("Failed to save changes to " +
configFile, e);
+ }
+ }
+
+ public void restore() {
+ try {
+ FileUtil.copyFile(backupFile, configFile);
+ backupFile.delete();
+ yaml = null;
+ config = null;
+ backupFile = null;
+ } catch (IOException e) {
+ throw new ConfigEditorException("Failed to restore " + configFile +
" from " + backupFile, e);
+ }
+ }
+
+ private void createBackup() {
+ backupFile = new File(configFile.getParent(), "." +
configFile.getName() + ".bak");
+ try {
+ FileUtil.copyFile(configFile, backupFile);
+ } catch (IOException e) {
+ throw new ConfigEditorException("Failed to create " + backupFile,
e);
+ }
+ }
+
+ public void setSeeds(String... seeds) {
+ List seedProviderList = (List) config.get("seed_provider");
+ Map seedProvider = (Map) seedProviderList.get(0);
+ List paramsList = (List) seedProvider.get("parameters");
+ Map params = (Map) paramsList.get(0);
+ params.put("seeds", StringUtil.arrayToString(seeds));
+ }
+
+ public void setNativeTransportPort(int port) {
+ config.put("native_transport_port", port);
+ }
+
+ public void setStoragePort(int port) {
+ config.put("storage_port", port);
+ }
+
+}
diff --git
a/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java
b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java
new file mode 100644
index 0000000..db9e7ea
--- /dev/null
+++
b/modules/common/cassandra-util/src/main/java/org/rhq/cassandra/util/ConfigEditorException.java
@@ -0,0 +1,21 @@
+package org.rhq.cassandra.util;
+
+/**
+ * @author John Sanda
+ */
+public class ConfigEditorException extends RuntimeException {
+
+ public ConfigEditorException() {
+ }
+
+ public ConfigEditorException(String message) {
+ }
+
+ public ConfigEditorException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public ConfigEditorException(Throwable cause) {
+ super(cause);
+ }
+}
diff --git
a/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java
b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java
new file mode 100644
index 0000000..cf344e2
--- /dev/null
+++
b/modules/common/cassandra-util/src/test/java/org/rhq/cassandra/util/ConfigEditorTest.java
@@ -0,0 +1,94 @@
+package org.rhq.cassandra.util;
+
+import static org.testng.Assert.assertEquals;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.InputStream;
+import java.lang.reflect.Method;
+
+import org.apache.cassandra.config.Config;
+import org.apache.cassandra.config.SeedProviderDef;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+import org.yaml.snakeyaml.Loader;
+import org.yaml.snakeyaml.TypeDescription;
+import org.yaml.snakeyaml.Yaml;
+
+import org.rhq.core.util.file.FileUtil;
+import org.rhq.core.util.stream.StreamUtil;
+
+/**
+ * @author John Sanda
+ */
+public class ConfigEditorTest {
+
+ private File basedir;
+
+ private File configFile;
+
+ @BeforeMethod
+ public void initTestDir(Method test) throws Exception {
+ File dir = new File(getClass().getResource(".").toURI());
+ basedir = new File(dir, getClass().getSimpleName() + "/" +
test.getName());
+ FileUtil.purge(basedir, true);
+ basedir.mkdirs();
+
+ configFile = new File(basedir, "cassandra.yaml");
+
+ InputStream inputStream =
getClass().getResourceAsStream("/cassandra.yaml");
+ FileOutputStream outputStream = new FileOutputStream(configFile);
+ StreamUtil.copy(inputStream, outputStream);
+ }
+
+ @Test
+ public void updateSeeds() throws Exception {
+ ConfigEditor editor = new ConfigEditor(configFile);
+ editor.load();
+ editor.setSeeds("127.0.0.1", "127.0.0.2",
"127.0.0.3");
+ editor.save();
+
+ Config config = loadConfig();
+
+ assertEquals(config.seed_provider.parameters.get("seeds"),
"127.0.0.1,127.0.0.2,127.0.0.3",
+ "Failed to update seeds property.");
+ }
+
+ @Test
+ public void updateNativeTransportPort() throws Exception {
+ ConfigEditor editor = new ConfigEditor(configFile);
+ editor.load();
+ editor.setNativeTransportPort(9393);
+ editor.save();
+
+ Config config = loadConfig();
+
+ assertEquals(config.native_transport_port, (Integer) 9393, "Failed to update
native_transport_port");
+ }
+
+ @Test
+ public void updateStoragePort() throws Exception {
+ ConfigEditor editor = new ConfigEditor(configFile);
+ editor.load();
+ editor.setStoragePort(6767);
+ editor.save();
+
+ Config config = loadConfig();
+
+ assertEquals(config.storage_port, (Integer) 6767, "Failed to update
storage_port");
+ }
+
+ private Config loadConfig() throws Exception {
+ FileInputStream inputStream = new FileInputStream(configFile);
+ org.yaml.snakeyaml.constructor.Constructor constructor =
+ new org.yaml.snakeyaml.constructor.Constructor(Config.class);
+ TypeDescription seedDesc = new TypeDescription(SeedProviderDef.class);
+ seedDesc.putMapPropertyType("parameters", String.class, String.class);
+ constructor.addTypeDescription(seedDesc);
+ Yaml yaml = new Yaml(new Loader(constructor));
+
+ return (Config) yaml.load(inputStream);
+ }
+
+}
diff --git a/modules/common/cassandra-util/src/test/resources/cassandra.yaml
b/modules/common/cassandra-util/src/test/resources/cassandra.yaml
new file mode 100644
index 0000000..fd7973b
--- /dev/null
+++ b/modules/common/cassandra-util/src/test/resources/cassandra.yaml
@@ -0,0 +1,690 @@
+# Cassandra storage config YAML
+
+# NOTE:
+# See
http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: rhq
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy
compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see
http://wiki.apache.org/cassandra/Operations
+num_tokens: 256
+
+# If you haven't specified num_tokens, or have set it to the default of 1 then
+# you should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see
http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node. If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+#initial_token:
+
+# See
http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+# throttle in KB's per second, per delivery thread
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# The following setting populates the page cache on memtable flush and compaction
+# WARNING: Enable this setting only when the whole node's data fits in memory.
+# Defaults to: false
+# populate_io_cache_on_flush: false
+
+# Authentication backend, implementing IAuthenticator; used to identify users
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+# PasswordAuthenticator}.
+#
+# - AllowAllAuthenticator performs no checks - set it to disable authentication.
+# - PasswordAuthenticator relies on username/password pairs to authenticate
+# users. It keeps usernames and hashed passwords in system_auth.credentials table.
+# Please increase system_auth keyspace replication factor if you use this authenticator.
+authenticator: org.apache.cassandra.auth.PasswordAuthenticator
+
+# Authorization backend, implementing IAuthorizer; used to limit access/provide
permissions
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+# CassandraAuthorizer}.
+#
+# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+# increase system_auth keyspace replication factor if you use this authorizer.
+authorizer: org.apache.cassandra.auth.CassandraAuthorizer
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 2000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+permissions_validity_in_ms: 2000
+
+# The partitioner is responsible for distributing rows (by key) across
+# nodes in the cluster. Any IPartitioner may be used, including your
+# own as long as it is on the classpath. Out of the box, Cassandra
+# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
+# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
+#
+# - RandomPartitioner distributes rows across the cluster evenly by md5.
+# This is the default prior to 1.2 and is retained for compatibility.
+# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
+# Hash Function instead of md5. When in doubt, this is the best option.
+# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
+# scanning rows in key order, but the ordering can generate hot spots
+# for sequential insertion workloads.
+# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
+# - keys in a less-efficient format and only works with keys that are
+# UTF8-encoded Strings.
+# - CollatingOPP colates according to EN,US rules rather than lexical byte
+# ordering. Use this as an example if you need custom collation.
+#
+# See
http://wiki.apache.org/cassandra/Operations for more on
+# partitioners and token selection.
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+ - /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/data
+
+# commit log
+commitlog_directory: /Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/commit_log
+
+# policy for data disk failures:
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+# still inspectable via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must store the whole values of
+# its rows, so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)).
Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# safe the keys cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The provider for the row cache to use.
+#
+# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
+#
+# SerializingCacheProvider serialises the contents of the row and stores
+# it in native memory, i.e., off the JVM Heap. Serialized rows take
+# significantly less memory than "live" rows in the JVM, so you can cache
+# more rows in a given memory footprint. And storing the cache off-heap
+# means you can use smaller heap sizes, reducing the impact of GC pauses.
+#
+# It is also valid to specify the fully-qualified class name to a class
+# that implements org.apache.cassandra.cache.IRowCacheProvider.
+#
+# Defaults to SerializingCacheProvider
+row_cache_provider: SerializingCacheProvider
+
+# saved caches
+saved_caches_directory:
/Users/jsanda/Development/redhat/rhq/rhq-data/storage-1/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentally from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: "<ip1>,<ip2>,<ip3>"
+ - seeds: "127.0.0.1"
+
+# emergency pressure valve: each time heap usage after a full (CMS)
+# garbage collection is above this fraction of the max, Cassandra will
+# flush the largest memtables.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+#
+# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
+# it is most effective under light to moderate load, or read-heavy
+# workloads; under truly massive write load, it will often be too
+# little, too late.
+flush_largest_memtables_at: 0.75
+
+# emergency pressure valve #2: the first time heap usage after a full
+# (CMS) garbage collection is above this fraction of the max,
+# Cassandra will reduce cache maximum _capacity_ to the given fraction
+# of the current _size_. Should usually be set substantially above
+# flush_largest_memtables_at, since that will have less long-term
+# impact on the system.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+reduce_cache_sizes_at: 0.85
+reduce_cache_capacity_to: 0.6
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+
+# Total memory to use for memtables. Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/3 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs. Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread. At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSD:s; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+storage_port: 7100
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+ssl_storage_port: 7101
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: 127.0.0.1
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+# Internode authentication backend, implementing IInternodeAuthenticator;
+# used to allow/disallow connections from peer nodes.
+internode_authenticator: org.rhq.cassandra.auth.RhqInternodeAuthenticator
+
+# Whether to start the native transport server.
+# Currently, only the thrift server is started by default because the native
+# transport is considered beta.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+native_transport_port: 9142
+# The minimum and maximum threads for handling requests when the native
+# transport is used. The meaning is those is similar to the one of
+# rpc_min_threads and rpc_max_threads, though the default differ slightly and
+# are the ones below:
+# native_transport_min_threads: 16
+native_transport_max_threads: 128
+
+
+# Whether to start the thrift rpc server.
+start_rpc: false
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+#
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: 127.0.0.1
+# port for Thrift to listen for clients on
+rpc_port: 9160
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides three out-of-the-box options for the RPC Server:
+#
+# sync -> One thread per thrift connection. For a very large number of clients,
memory
+# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack
size
+# per thread, and that will correspond to your use of virtual memory (but
physical memory
+# may be limited depending on use of stack space).
+#
+# hsha -> Stands for "half synchronous, half asynchronous." All thrift
clients are handled
+# asynchronously using a small number of threads that does not vary with the
amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are
still
+# synchronous (one thread per active request).
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class
name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in
the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are
using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provide no protection against clients overwhelming
the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind
that
+# rpc_max_threads represents the maximum number of client requests this server may
execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# See:
+# /proc/sys/net/core/wmem_max
+# /proc/sys/net/core/rmem_max
+# /proc/sys/net/ipv4/tcp_wmem
+# /proc/sys/net/ipv4/tcp_wmem
+# and: man tcp
+# internode_send_buff_size_in_bytes:
+# internode_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# Keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns. The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory. Larger rows will spill
+# over to disk and use a slower two-pass compaction process. A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise,
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable. Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 200 Mbps or 25 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 200
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 20000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 20000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 20000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because we need to flush all CFs
+# to make sure we can clear out anythink in the commitlog that could
+# cause truncated data to reappear.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 20000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts, If disabled cassandra will assuming the request
+# was forwarded to the replica instantly by the coordinator
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to
have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This improves cache locality
+# when disabling read repair, which can further improve throughput.
+# Only appropriate for single-datacenter deployments.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - GossipingPropertyFileSnitch
+# The rack and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via gossip. If
+# cassandra-topology.properties exists, it is used as a fallback, allowing
+# migration from the PropertyFileSnitch.
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's
+# IP address, respectively. Unless this happens to match your
+# deployment conventions (as it did Facebook's), this is best used
+# as an example of writing a custom Snitch class.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the Datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+# - throttle_limit -- The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# - default_weight -- default_weight is optional and allows for
+# overriding the default which is 1.
+# - weights -- Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time. The larger the interval,
+# the smaller and less effective the sampling will be. In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample. All the sampled entries
+# must fit in memory. Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs. This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
+index_interval: 128
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+#
http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/J...
+#
+server_encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+ # require_client_auth: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # require_client_auth: false
+ # Set trustore and truststore_password if require_client_auth is true
+ # truststore: conf/.truststore
+ # truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+internode_compression: none
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: true