[rhq] modules/plugins
by Thomas Segismont
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 38 +++++++--
modules/plugins/rhq-storage/pom.xml | 16 +++
modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java | 26 +++++-
modules/plugins/rhq-storage/src/test/resources/log4j.properties | 42 ----------
modules/plugins/rhq-storage/src/test/resources/log4j.xml | 40 +++++++++
5 files changed, 109 insertions(+), 53 deletions(-)
New commits:
commit 20f29263bca99d540b4027b63b690f805663ed13
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Wed Jul 24 18:55:25 2013 +0200
Fix StorageNodeComponentITest.shutdownStorageNode
Fixed storage node module pom typo
Made test Cassandra server start with relative paths in classpath (otherwise the command line is too long and gets truncated in /proc/pid/cmdline this preventing the process query to find the server)
Made CassandraNodeComponent shutdown operation to wait for server to go down
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
index 8d74ccc..c41e8e7 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
@@ -132,12 +132,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
long start = System.nanoTime();
try {
// Get a fresh snapshot of the process
- ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot();
- if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) {
- processInfo = getResourceContext().getNativeProcess();
- // Safe to get prior snapshot here, we've just recreated the process info instance
- processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot();
- }
+ ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot();
return (processInfoSnapshot != null && processInfoSnapshot.isRunning()) ? AvailabilityType.UP
: AvailabilityType.DOWN;
} finally {
@@ -151,11 +146,23 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
}
}
+ private ProcessInfoSnapshot getProcessInfoSnapshot() {
+ ProcessInfoSnapshot processInfoSnapshot = (processInfo == null) ? null : processInfo.freshSnapshot();
+ if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) {
+ processInfo = getResourceContext().getNativeProcess();
+ // Safe to get prior snapshot here, we've just recreated the process info instance
+ processInfoSnapshot = (processInfo == null) ? null : processInfo.priorSnaphot();
+ }
+ return processInfoSnapshot;
+ }
+
@Override
public OperationResult invokeOperation(String name, Configuration parameters) throws Exception {
if (name.equals("shutdown")) {
- return shutdownNode();
+ OperationResult operationResult = shutdownNode();
+ waitForNodeToGoDown();
+ return operationResult;
} else if (name.equals("start")) {
return startNode();
} else if (name.equals("restart")) {
@@ -167,6 +174,23 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
return null;
}
+ private void waitForNodeToGoDown() throws InterruptedException {
+ for (ProcessInfoSnapshot processInfoSnapshot = getProcessInfoSnapshot();; processInfoSnapshot = getProcessInfoSnapshot()) {
+ if (processInfoSnapshot == null || !processInfoSnapshot.isRunning()) {
+ // Process not found, so it died, that's fine
+ // OR
+ // Process info says process is no longer running, that's fine as well
+ break;
+ }
+ if (getResourceContext().getComponentInvocationContext().isInterrupted()) {
+ // Operation canceled or timed out
+ throw new InterruptedException();
+ }
+ // Process is still running, wait a second and check again
+ Thread.sleep(SECONDS.toMillis(1));
+ }
+ }
+
@SuppressWarnings("rawtypes")
protected OperationResult shutdownNode() {
ResourceContext<?> context = getResourceContext();
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml
index df79e40..b1d50b8 100644
--- a/modules/plugins/rhq-storage/pom.xml
+++ b/modules/plugins/rhq-storage/pom.xml
@@ -55,6 +55,20 @@
</dependency>
<dependency>
+ <groupId>${rhq.groupId}</groupId>
+ <artifactId>test-utils</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-nop</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+
+ <dependency>
<groupId>${project.groupId}</groupId>
<artifactId>rhq-cassandra-schema</artifactId>
<version>${project.version}</version>
@@ -70,7 +84,7 @@
<phase>pre-integration-test</phase>
<configuration>
<target>
- <property name="sigar.dir" value="${project.build.directory/sigar}"/>
+ <property name="sigar.dir" value="${project.build.directory}/sigar"/>
<mkdir dir="${pc.basedir}"/>
<mkdir dir="${pc.lib.dir}"/>
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
index b668073..5bc8b31 100644
--- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
+++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
@@ -1,3 +1,22 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
package org.rhq.plugins.storage;
import static java.util.Arrays.asList;
@@ -94,8 +113,10 @@ public class StorageNodeComponentITest {
File binDir = new File(basedir, "bin");
SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
- File startScript = new File(binDir, "cassandra");
+ File startScript = new File("./cassandra");
ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript);
+ startScriptExe.setWorkingDirectory(binDir.getAbsolutePath());
+ startScriptExe.setCheckExecutableExists(false);
startScriptExe.addArguments(asList("-p", "cassandra.pid"));
startScriptExe.setCaptureOutput(true);
@@ -176,8 +197,7 @@ public class StorageNodeComponentITest {
assertFalse(pidFile.exists(), pidFile + " should be deleted when the storage node is shutdown.");
- // TODO why is this failing?
- //assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down");
+ assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down");
}
@Test(dependsOnMethods = "shutdownStorageNode")
diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.properties b/modules/plugins/rhq-storage/src/test/resources/log4j.properties
deleted file mode 100644
index 67db049..0000000
--- a/modules/plugins/rhq-storage/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# /*
-# * RHQ Management Platform
-# * Copyright (C) 2005-2012 Red Hat, Inc.
-# * All rights reserved.
-# *
-# * This program is free software; you can redistribute it and/or modify
-# * it under the terms of the GNU General Public License, version 2, as
-# * published by the Free Software Foundation, and/or the GNU Lesser
-# * General Public License, version 2.1, also as published by the Free
-# * Software Foundation.
-# *
-# * This program is distributed in the hope that it will be useful,
-# * but WITHOUT ANY WARRANTY; without even the implied warranty of
-# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# * GNU General Public License and the GNU Lesser General Public License
-# * for more details.
-# *
-# * You should have received a copy of the GNU General Public License
-# * and the GNU Lesser General Public License along with this program;
-# * if not, write to the Free Software Foundation, Inc.,
-# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-# */
-#
-
-log4j.rootCategory=WARN, FILE, CONSOLE
-
-log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.FILE.DatePattern='.'yyyy-MM-dd
-log4j.appender.FILE.File=./target/test.log
-log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
-#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n
-log4j.appender.FILE.Append=false
-
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
-
-log4j.logger.org.rhq=DEBUG
-log4j.logger.com.datastax=DEBUG
diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.xml b/modules/plugins/rhq-storage/src/test/resources/log4j.xml
new file mode 100644
index 0000000..ec3cd98
--- /dev/null
+++ b/modules/plugins/rhq-storage/src/test/resources/log4j.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+
+<!-- | For more configuration information and examples, see the Jakarta Log4j | website: http://jakarta.apache.org/log4j -->
+
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
+
+ <appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
+ <param name="Target" value="System.out" />
+ <param name="Threshold" value="WARN" />
+ <layout class="org.apache.log4j.PatternLayout">
+ <param name="ConversionPattern" value="%-5p %d{dd-MM HH:mm:ss,SSS} (%F:%M:%L) - %m%n" />
+ </layout>
+ </appender>
+
+ <appender name="FILE" class="org.apache.log4j.RollingFileAppender">
+ <param name="File" value="target/test.log" />
+ <param name="Append" value="false" />
+ <param name="Threshold" value="DEBUG" />
+ <layout class="org.apache.log4j.PatternLayout">
+ <param name="ConversionPattern" value="%-5p %d{dd-MM HH:mm:ss,SSS} (%F:%M:%L) - %m%n" />
+ </layout>
+ </appender>
+
+ <logger name="org.rhq">
+ <level value="DEBUG" />
+ </logger>
+
+ <logger name="com.datastax">
+ <level value="DEBUG" />
+ </logger>
+
+ <root>
+ <level value="WARN" />
+ <appender-ref ref="CONSOLE" />
+ <appender-ref ref="FILE" />
+ </root>
+
+</log4j:configuration>
10 years, 10 months
[rhq] modules/plugins
by John Sanda
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 11 +++
modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java | 33 +++++++++-
2 files changed, 42 insertions(+), 2 deletions(-)
New commits:
commit 68fcc27445e375748d2bd27429d7729ae3c076db
Author: John Sanda <jsanda(a)redhat.com>
Date: Wed Jul 24 07:47:24 2013 -0400
[BZ 987899] remove and create pid file during shutdown and start operations
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
index cf24fcd..8d74ccc 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
@@ -18,6 +18,7 @@
*/
package org.rhq.plugins.cassandra;
+import static java.util.Arrays.asList;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.rhq.core.system.OperatingSystemType.WINDOWS;
@@ -210,6 +211,14 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
long pid = process.getPid();
try {
process.kill("KILL");
+
+ Configuration pluginConfig = getResourceContext().getPluginConfiguration();
+ File basedir = new File(pluginConfig.getSimpleValue("baseDir"));
+ File binDir = new File(basedir, "bin");
+ File pidFile = new File(binDir, "cassandra.pid");
+
+ pidFile.delete();
+
return new OperationResult("Successfully shut down Cassandra daemon with pid " + pid);
} catch (SigarException e) {
LOG.warn("Failed to shut down Cassandra node with pid " + pid, e);
@@ -226,8 +235,10 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
String baseDir = pluginConfig.getSimpleValue("baseDir");
File binDir = new File(baseDir, "bin");
File startScript = new File(binDir, getStartScript());
+ File pidFile = new File(binDir, "cassandra.pid");
ProcessExecution scriptExe = ProcessExecutionUtility.createProcessExecution(startScript);
+ scriptExe.addArguments(asList("-p", pidFile.getAbsolutePath()));
SystemInfo systemInfo = context.getSystemInformation();
ProcessExecutionResults results = systemInfo.executeProcess(scriptExe);
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
index cd9f148..b668073 100644
--- a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
+++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
@@ -2,7 +2,9 @@ package org.rhq.plugins.storage;
import static java.util.Arrays.asList;
import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
+import static org.testng.Assert.assertTrue;
import java.io.File;
import java.net.InetAddress;
@@ -168,8 +170,34 @@ public class StorageNodeComponentITest {
new Configuration(), timeout);
assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The shutdown operation failed");
+
+ File binDir = new File(basedir, "bin");
+ File pidFile = new File(binDir, "cassandra.pid");
+
+ assertFalse(pidFile.exists(), pidFile + " should be deleted when the storage node is shutdown.");
+
// TODO why is this failing?
- assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down");
+ //assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down");
+ }
+
+ @Test(dependsOnMethods = "shutdownStorageNode")
+ public void restartStorageNode() {
+ OperationManager operationManager = PluginContainer.getInstance().getOperationManager();
+ OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager);
+
+ long timeout = 1000 * 60;
+ OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId());
+ OperationServicesResult result = operationsService.invokeOperation(operationContext, "start",
+ new Configuration(), timeout);
+
+ assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The start operation failed.");
+
+ File binDir = new File(basedir, "bin");
+ File pidFile = new File(binDir, "cassandra.pid");
+
+ assertTrue(pidFile.exists(), pidFile + " should be created when starting the storage node.");
+
+ assertNodeIsUp("Expected " + storageNode + " to be up after restarting it.");
}
private void assertNodeIsUp(String msg) {
@@ -192,7 +220,8 @@ public class StorageNodeComponentITest {
private Availability getAvailability() {
InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
- return inventoryManager.getAvailabilityIfKnown(storageNode);
+// return inventoryManager.getAvailabilityIfKnown(storageNode);
+ return inventoryManager.getCurrentAvailability(storageNode);
}
private void executeAvailabilityScan() {
10 years, 10 months
[rhq] modules/plugins
by John Sanda
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
New commits:
commit 96286a4e22da2135a85ad6b09d069b9e690a05c9
Author: John Sanda <jsanda(a)redhat.com>
Date: Wed Jul 24 07:08:09 2013 -0400
uncommented code that was done while debugging tests
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
index f76da22..cf24fcd 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
@@ -111,7 +111,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
clusterBuilder = clusterBuilder.withCredentials(username, password);
}
-// this.cassandraSession = clusterBuilder.build().connect(clusterName);
+ this.cassandraSession = clusterBuilder.build().connect(clusterName);
} catch (Exception e) {
LOG.error("Connect to Cassandra " + host + ":" + nativePort, e);
throw e;
10 years, 10 months
[rhq] modules/enterprise modules/integration-tests
by Heiko W. Rupp
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java | 49 +++++++---
modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java | 8 +
2 files changed, 45 insertions(+), 12 deletions(-)
New commits:
commit 1ceae7f8fc049bd036d03e4c84fadb5a8a057563
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Wed Jul 24 11:54:53 2013 +0200
BZ 976786 Add a bit more wait time and an additional check if SUCCESS really means it. Return IN_PROGRESS otherwise.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java
index 4bfbb7c..7a6fb33 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/rest/ResourceHandlerBean.java
@@ -721,6 +721,9 @@ public class ResourceHandlerBean extends AbstractRestBean {
@POST
@Path("/")
+ @ApiErrors({
+ @ApiError(code = 302, reason = "Creation is still happening. Check back with a GET on the Location.")
+ })
@ApiOperation(value = "Create a new resource as a child of an existing resource. ",
notes= "If a handle is given, a content based resource is created; the content identified by the handle is not removed from the content store." +
"If no handle is given, a resource is created from the data of the passed 'resource' object.")
@@ -824,6 +827,11 @@ public class ResourceHandlerBean extends AbstractRestBean {
CreateResourceStatus status = history.getStatus();
+ try {
+ Thread.sleep(2000L); // give the agent time to do the work
+ } catch (InterruptedException e) {
+ ; // nothing
+ }
MediaType mediaType = headers.getAcceptableMediaTypes().get(0);
@@ -832,11 +840,16 @@ public class ResourceHandlerBean extends AbstractRestBean {
if ( status == CreateResourceStatus.SUCCESS) {
ResourceWithType rwt = findCreatedResource(history.getParentResource().getId(),history.getCreatedResourceName(),uriInfo);
-
- builder = Response.ok();
- builder.entity(rwt);
+ if (rwt!=null) {
+ builder = Response.ok();
+ builder.entity(rwt);
+ } else {
+ // History says we had success but due to internal timing
+ // the resource is not yet visible, so switch to in_progress
+ status = CreateResourceStatus.IN_PROGRESS;
+ }
}
- else if (status==CreateResourceStatus.IN_PROGRESS) {
+ if (status==CreateResourceStatus.IN_PROGRESS) {
try {
Thread.sleep(2000L); // give the agent time to do the work
@@ -865,6 +878,7 @@ public class ResourceHandlerBean extends AbstractRestBean {
@GET
@Path("/creationStatus/{id}")
@ApiOperation("Get the status of a resource creation for content based resources.")
+ @ApiError(code = 302, reason = "Creation is still going on. Check back later with the same URL.")
public Response getHistoryItem(@PathParam("id") int historyId, @Context HttpHeaders headers, @Context UriInfo uriInfo) {
CreateResourceHistory history;
@@ -888,13 +902,17 @@ public class ResourceHandlerBean extends AbstractRestBean {
if (status== CreateResourceStatus.SUCCESS) {
ResourceWithType rwt = findCreatedResource(history.getParentResource().getId(),history.getCreatedResourceName(),uriInfo);
-
- builder = Response.ok();
- setCachingHeader(builder, 600);
- builder.entity(rwt);
-
+ if (rwt!=null) {
+ builder = Response.ok();
+ setCachingHeader(builder, 600);
+ builder.entity(rwt);
+ } else {
+ // History says we had success but due to internal timing
+ // the resource is not yet visible, so switch to in_progress
+ status = CreateResourceStatus.IN_PROGRESS;
+ }
}
- else if (status==CreateResourceStatus.IN_PROGRESS) {
+ if (status==CreateResourceStatus.IN_PROGRESS) {
UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
@@ -913,6 +931,14 @@ public class ResourceHandlerBean extends AbstractRestBean {
}
+ /**
+ * Find the created resource by its name and parent. Will only return it
+ * if the resource is already committed.
+ * @param parentId Id of the parent
+ * @param name Name of the resource to find
+ * @param uriInfo UriInfo object to fill links in the returned resource
+ * @return A ResourceWithType if found, null otherwise.
+ */
private ResourceWithType findCreatedResource(int parentId, String name, UriInfo uriInfo) {
ResourceCriteria criteria = new ResourceCriteria();
criteria.setStrict(true);
@@ -920,6 +946,9 @@ public class ResourceHandlerBean extends AbstractRestBean {
criteria.addFilterName(name);
criteria.addFilterInventoryStatus(InventoryStatus.COMMITTED);
List<Resource> resources = resMgr.findResourcesByCriteria(caller,criteria);
+ if (resources.size()==0) {
+ return null;
+ }
Resource res = resources.get(0);
return fillRWT(res,uriInfo);
}
diff --git a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java
index 8303513..b12eea3 100644
--- a/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java
+++ b/modules/integration-tests/rest-api/src/test/java/org/rhq/modules/integrationTests/restApi/ContentTest.java
@@ -222,8 +222,10 @@ public class ContentTest extends AbstractBase {
int status = response.getStatusCode();
String location = response.getHeader("Location");
- System.out.println("\nLocation " + location + "\n\n");
- assert location!=null;
+ if (status!=200) {
+ System.out.println("\nLocation " + location + "\n\n");
+ assert location!=null;
+ }
// We need to check what we got. A 302 means the deploy is still
// in progress, so we need to wait a little longer
@@ -244,6 +246,7 @@ public class ContentTest extends AbstractBase {
createdResourceId = response.jsonPath().getInt("resourceId");
+ System.out.flush();
System.out.println("\n Deploy is done, resource Id = " + createdResourceId + " \n");
System.out.flush();
@@ -254,6 +257,7 @@ public class ContentTest extends AbstractBase {
// Remove the uploaded content
removeContent(handle, false);
+ System.out.flush();
System.out.println("\n Content removed \n");
System.out.flush();
10 years, 10 months
[rhq] Branch 'code-smell' - 51 commits - etc/dev-utils etc/scripts modules/common modules/core modules/enterprise modules/helpers modules/plugins pom.xml
by lkrejci
etc/dev-utils/TestLdapSettings.java | 102 +
etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat | 48
etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh | 42
modules/common/cassandra-auth/pom.xml | 27
modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticator.java | 78 +
modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticatorMBean.java | 10
modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml | 9
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra | 3
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties | 2
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh | 247 ---
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml | 2
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java | 37
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java | 87 +
modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties | 2
modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml | 2
modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java | 93 +
modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java | 93 +
modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra-env.sh | 247 +++
modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra.yaml | 690 ++++++++++
modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/log4j-server.properties | 45
modules/common/cassandra-schema/pom.xml | 6
modules/common/pom.xml | 1
modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java | 44
modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java | 2
modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java | 159 ++
modules/core/client-api/src/test/resources/test-hibernate.xml | 8
modules/core/client-api/src/test/resources/test-jbossas.xml | 10
modules/core/client-api/src/test/resources/test-subcategories-nested.xml | 60
modules/core/domain/intentional-api-changes-since-4.8.0.xml | 8
modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java | 3
modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java | 3
modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java | 4
modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java | 2
modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java | 130 +
modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java | 58
modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java | 9
modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java | 13
modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java | 15
modules/enterprise/gui/coregui/.externalToolBuilders/org.eclipse.wst.jsdt.core.javascriptValidator.launch | 7
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java | 32
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java | 5
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java | 20
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java | 42
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java | 38
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties | 4
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties | 4
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties | 3
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties | 4
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties | 4
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties | 4
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties | 4
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties | 4
modules/enterprise/server/appserver/pom.xml | 11
modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf | 36
modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env | 24
modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc | 6
modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml | 11
modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java | 5
modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java | 37
modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml | 14
modules/enterprise/server/jar/pom.xml | 10
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java | 207 ++-
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java | 47
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java | 22
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java | 1
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java | 3
modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java | 45
modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java | 86 +
modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java | 37
modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java | 18
modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java | 10
modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java | 10
modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java | 49
modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java | 12
modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java | 126 +
modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java | 104 -
modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl | 14
modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java | 11
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 15
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java | 68
modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml | 8
modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java | 55
modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java | 24
modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java | 7
modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java | 12
modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java | 7
modules/plugins/rhq-storage/pom.xml | 113 +
modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java | 245 +++
modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java | 142 +-
modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml | 34
modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java | 216 +++
modules/plugins/rhq-storage/src/test/resources/log4j.properties | 42
pom.xml | 6
93 files changed, 3893 insertions(+), 633 deletions(-)
New commits:
commit b537244bad778a80f6fdf92880abc245eed465ec
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Jul 23 22:23:06 2013 -0400
initial commit for StorageNodeComponentITest
This is a first stab at some integration tests for the storage plugin.
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml
index a88f56e..df79e40 100644
--- a/modules/plugins/rhq-storage/pom.xml
+++ b/modules/plugins/rhq-storage/pom.xml
@@ -10,11 +10,16 @@
<groupId>org.rhq</groupId>
<artifactId>rhq-rhqstorage-plugin</artifactId>
- <packaging>jar</packaging>
<name>RHQ Storage Plugin</name>
<description>A plugin for managing RHQ Storage Nodes</description>
+ <properties>
+ <pc.basedir>${project.build.directory}/plugin-container</pc.basedir>
+ <pc.plugins.dir>${pc.basedir}/plugins</pc.plugins.dir>
+ <pc.lib.dir>${pc.basedir}/lib</pc.lib.dir>
+ </properties>
+
<dependencies>
<dependency>
<groupId>${rhq.groupId}</groupId>
@@ -27,7 +32,6 @@
<groupId>${rhq.groupId}</groupId>
<artifactId>rhq-cassandra-plugin</artifactId>
<version>${project.version}</version>
- <!--<scope>provided</scope>-->
</dependency>
<dependency>
@@ -35,8 +39,113 @@
<artifactId>org-mc4j-ems</artifactId>
<scope>provided</scope>
</dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-ccm-core</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-platform-plugin</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-schema</artifactId>
+ <version>${project.version}</version>
+ </dependency>
</dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <phase>pre-integration-test</phase>
+ <configuration>
+ <target>
+ <property name="sigar.dir" value="${project.build.directory/sigar}"/>
+
+ <mkdir dir="${pc.basedir}"/>
+ <mkdir dir="${pc.lib.dir}"/>
+ <mkdir dir="${pc.plugins.dir}"/>
+
+ <copy file="${org.rhq:rhq-platform-plugin:jar}" todir="${pc.plugins.dir}"/>
+ <copy file="${org.rhq:rhq-jmx-plugin:jar}" todir="${pc.plugins.dir}"/>
+ <copy file="${org.rhq:rhq-cassandra-plugin:jar}" todir="${pc.plugins.dir}"/>
+ <copy file="${project.build.directory}/${project.build.finalName}.jar" todir="${pc.plugins.dir}"/>
+
+ <unzip src="${org.hyperic:sigar-dist:zip}" dest="${sigar.dir}">
+ <patternset>
+ <include name="**/lib/sigar.jar" />
+ <include name="**/lib/bcel*.jar" />
+ <include name="**/lib/*.so" />
+ <include name="**/lib/*.sl" />
+ <include name="**/lib/*.dll" />
+ <include name="**/lib/*.dylib" />
+ </patternset>
+ </unzip>
+ <move todir="${pc.lib.dir}" flatten="true">
+ <fileset dir="${sigar.dir}" includes="**/lib/*"/>
+ </move>
+ <delete dir="${sigar.dir}"/>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <version>2.13</version>
+ <executions>
+ <execution>
+ <id>integration-test</id>
+ <goals>
+ <goal>integration-test</goal>
+ </goals>
+ <configuration>
+ <includes>
+ <include>**/*ITest.java</include>
+ </includes>
+ <argLine>-Djava.library.path=${pc.lib.dir}</argLine>
+ <systemPropertyVariables>
+ <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir>
+ </systemPropertyVariables>
+ </configuration>
+ </execution>
+ <execution>
+ <id>verify</id>
+ <goals>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <testFailureIgnore>false</testFailureIgnore>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <excludes>
+ <exclude>**/*ITest.java</exclude>
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
<profiles>
<profile>
<id>dev</id>
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
new file mode 100644
index 0000000..cd9f148
--- /dev/null
+++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
@@ -0,0 +1,216 @@
+package org.rhq.plugins.storage;
+
+import static java.util.Arrays.asList;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNotNull;
+
+import java.io.File;
+import java.net.InetAddress;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
+
+import org.testng.annotations.AfterSuite;
+import org.testng.annotations.BeforeSuite;
+import org.testng.annotations.Test;
+
+import org.rhq.cassandra.CassandraClusterManager;
+import org.rhq.cassandra.ClusterInitService;
+import org.rhq.cassandra.Deployer;
+import org.rhq.cassandra.DeploymentOptions;
+import org.rhq.cassandra.DeploymentOptionsFactory;
+import org.rhq.cassandra.schema.SchemaManager;
+import org.rhq.core.clientapi.server.discovery.InventoryReport;
+import org.rhq.core.domain.cloud.StorageNode;
+import org.rhq.core.domain.configuration.Configuration;
+import org.rhq.core.domain.measurement.Availability;
+import org.rhq.core.domain.measurement.AvailabilityType;
+import org.rhq.core.domain.resource.Resource;
+import org.rhq.core.domain.resource.ResourceType;
+import org.rhq.core.pc.PluginContainer;
+import org.rhq.core.pc.PluginContainerConfiguration;
+import org.rhq.core.pc.inventory.InventoryManager;
+import org.rhq.core.pc.operation.OperationContextImpl;
+import org.rhq.core.pc.operation.OperationManager;
+import org.rhq.core.pc.operation.OperationServicesAdapter;
+import org.rhq.core.pc.plugin.FileSystemPluginFinder;
+import org.rhq.core.pluginapi.operation.OperationServicesResult;
+import org.rhq.core.pluginapi.operation.OperationServicesResultCode;
+import org.rhq.core.pluginapi.util.ProcessExecutionUtility;
+import org.rhq.core.system.ProcessExecution;
+import org.rhq.core.system.ProcessExecutionResults;
+import org.rhq.core.system.SystemInfo;
+import org.rhq.core.system.SystemInfoFactory;
+
+/**
+ * @author John Sanda
+ */
+public class StorageNodeComponentITest {
+
+ private File basedir;
+
+ private Resource storageNode;
+
+ @BeforeSuite
+ public void deployStorageNodeAndPluginContainer() throws Exception {
+ basedir = new File("target", "rhq-storage");
+
+ deployStorageNode();
+
+ initPluginContainer();
+ }
+
+ private void deployStorageNode() throws Exception {
+ DeploymentOptionsFactory factory = new DeploymentOptionsFactory();
+ DeploymentOptions deploymentOptions = factory.newDeploymentOptions();
+ String address = "127.0.0.1";
+
+ deploymentOptions.setSeeds(address);
+ deploymentOptions.setListenAddress(address);
+ deploymentOptions.setRpcAddress(address);
+ deploymentOptions.setBasedir(basedir.getAbsolutePath());
+ deploymentOptions.setCommitLogDir(new File(basedir, "commit_log").getAbsolutePath());
+ deploymentOptions.setDataDir(new File(basedir, "data").getAbsolutePath());
+ deploymentOptions.setSavedCachesDir(new File(basedir, "saved_caches").getAbsolutePath());
+ deploymentOptions.setCommitLogDir(new File(basedir, "logs").getAbsolutePath());
+ deploymentOptions.setLoggingLevel("DEBUG");
+ deploymentOptions.setNativeTransportPort(9142);
+ deploymentOptions.setJmxPort(7399);
+ deploymentOptions.setHeapSize("256M");
+ deploymentOptions.setHeapNewSize("64M");
+
+ deploymentOptions.load();
+
+ Deployer deployer = new Deployer();
+ deployer.setDeploymentOptions(deploymentOptions);
+
+ deployer.unzipDistro();
+ deployer.applyConfigChanges();
+ deployer.updateFilePerms();
+ deployer.updateStorageAuthConf(Sets.newHashSet(InetAddress.getByName(address)));
+
+ File binDir = new File(basedir, "bin");
+ SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
+
+ File startScript = new File(binDir, "cassandra");
+ ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript);
+
+ startScriptExe.addArguments(asList("-p", "cassandra.pid"));
+ startScriptExe.setCaptureOutput(true);
+ ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe);
+
+ assertEquals(results.getExitCode(), (Integer) 0, "Cassandra failed to start: " + results.getCapturedOutput());
+
+ StorageNode storageNode = new StorageNode();
+ storageNode.parseNodeInformation("127.0.0.1|7399|9142");
+
+ ClusterInitService clusterInitService = new ClusterInitService();
+ clusterInitService.waitForClusterToStart(asList(storageNode));
+
+ SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142");
+ schemaManager.install();
+ schemaManager.updateTopology(true);
+ }
+
+ private void initPluginContainer() {
+ PluginContainerConfiguration pcConfig = new PluginContainerConfiguration();
+ File pluginsDir = new File(System.getProperty("pc.plugins.dir"));
+ pcConfig.setPluginDirectory(pluginsDir);
+ pcConfig.setPluginFinder(new FileSystemPluginFinder(pluginsDir));
+
+ pcConfig.setInsideAgent(false);
+ PluginContainer.getInstance().setConfiguration(pcConfig);
+ PluginContainer.getInstance().initialize();
+ }
+
+ @AfterSuite
+ public void ShutdownPluginContainerAndStorageNode() throws Exception {
+ PluginContainer.getInstance().shutdown();
+ shutdownStorageNodeIfNecessary();
+ }
+
+ private void shutdownStorageNodeIfNecessary() throws Exception {
+ File binDir = new File(basedir, "bin");
+ File pidFile = new File(binDir, "cassandra.pid");
+
+ if (pidFile.exists()) {
+ CassandraClusterManager ccm = new CassandraClusterManager();
+ ccm.killNode(basedir);
+ }
+ }
+
+ @Test
+ public void discoverStorageNode() {
+ InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
+ InventoryReport inventoryReport = inventoryManager.executeServerScanImmediately();
+
+ if (inventoryReport.getAddedRoots().isEmpty()) {
+ // could be empty if the storage node is already in inventory from
+ // a prior discovery scan.
+ Resource platform = inventoryManager.getPlatform();
+ storageNode = findCassandraNode(platform.getChildResources());
+ } else {
+ storageNode = findCassandraNode(inventoryReport.getAddedRoots());
+ }
+
+ assertNotNull(storageNode, "Failed to discover Storage Node instance");
+ assertNodeIsUp("Expected " + storageNode + " to be UP after discovery");
+ }
+
+ @Test(dependsOnMethods = "discoverStorageNode")
+ public void shutdownStorageNode() throws Exception {
+ OperationManager operationManager = PluginContainer.getInstance().getOperationManager();
+ OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager);
+
+ long timeout = 1000 * 60;
+ OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId());
+ OperationServicesResult result = operationsService.invokeOperation(operationContext, "shutdown",
+ new Configuration(), timeout);
+
+ assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The shutdown operation failed");
+ // TODO why is this failing?
+ assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down");
+ }
+
+ private void assertNodeIsUp(String msg) {
+ executeAvailabilityScan();
+
+ Availability availability = getAvailability();
+
+ assertNotNull(availability, "Unable to determine availability for " + storageNode);
+ assertEquals(availability.getAvailabilityType(), AvailabilityType.UP, msg);
+ }
+
+ private void assertNodeIsDown(String msg) {
+ executeAvailabilityScan();
+
+ Availability availability = getAvailability();
+
+ assertNotNull(availability, "Unable to determine availability for " + storageNode);
+ assertEquals(availability.getAvailabilityType(), AvailabilityType.DOWN, msg);
+ }
+
+ private Availability getAvailability() {
+ InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
+ return inventoryManager.getAvailabilityIfKnown(storageNode);
+ }
+
+ private void executeAvailabilityScan() {
+ InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
+ inventoryManager.executeAvailabilityScanImmediately(false, true);
+ }
+
+ private Resource findCassandraNode(Set<Resource> resources) {
+ for (Resource resource : resources) {
+ if (isCassandraNode(resource.getResourceType())) {
+ return resource;
+ }
+ }
+ return null;
+ }
+
+ private boolean isCassandraNode(ResourceType type) {
+ return type.getPlugin().equals("RHQStorage") && type.getName().equals("RHQ Storage Node");
+ }
+
+}
diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.properties b/modules/plugins/rhq-storage/src/test/resources/log4j.properties
new file mode 100644
index 0000000..67db049
--- /dev/null
+++ b/modules/plugins/rhq-storage/src/test/resources/log4j.properties
@@ -0,0 +1,42 @@
+#
+# /*
+# * RHQ Management Platform
+# * Copyright (C) 2005-2012 Red Hat, Inc.
+# * All rights reserved.
+# *
+# * This program is free software; you can redistribute it and/or modify
+# * it under the terms of the GNU General Public License, version 2, as
+# * published by the Free Software Foundation, and/or the GNU Lesser
+# * General Public License, version 2.1, also as published by the Free
+# * Software Foundation.
+# *
+# * This program is distributed in the hope that it will be useful,
+# * but WITHOUT ANY WARRANTY; without even the implied warranty of
+# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# * GNU General Public License and the GNU Lesser General Public License
+# * for more details.
+# *
+# * You should have received a copy of the GNU General Public License
+# * and the GNU Lesser General Public License along with this program;
+# * if not, write to the Free Software Foundation, Inc.,
+# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# */
+#
+
+log4j.rootCategory=WARN, FILE, CONSOLE
+
+log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.FILE.DatePattern='.'yyyy-MM-dd
+log4j.appender.FILE.File=./target/test.log
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
+#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n
+log4j.appender.FILE.Append=false
+
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
+
+log4j.logger.org.rhq=DEBUG
+log4j.logger.com.datastax=DEBUG
commit 83e5b228871c9a8352e98a12e0db76f8f4ea982e
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Jul 23 22:16:41 2013 -0400
first stab at prepareForBootstrap operation (which is currently broken)
This is clearly broken from some manual testing I did. Given that the
implementation is a bit sloppy at the moment, this is a good time to get some
automated tests in place. The operation will perform the following steps in the
ordered specified:
1) shut down the storage node
2) update cassandra.yaml
3) update rhq-storage-auth.conf
4) restart the node
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
index 0037bfe..f76da22 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
@@ -111,7 +111,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
clusterBuilder = clusterBuilder.withCredentials(username, password);
}
- this.cassandraSession = clusterBuilder.build().connect(clusterName);
+// this.cassandraSession = clusterBuilder.build().connect(clusterName);
} catch (Exception e) {
LOG.error("Connect to Cassandra " + host + ":" + nativePort, e);
throw e;
@@ -196,7 +196,17 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
operation = storageService.getOperation("drain", emptyParams);
operation.invoke((Object[]) emptyParams);
- ProcessInfo process = context.getNativeProcess();
+ return stopNode();
+ }
+
+ protected OperationResult stopNode() {
+ ProcessInfo process = getResourceContext().getNativeProcess();
+
+ if (processInfo == null) {
+ LOG.warn("Failed to obtain process info. It appears Cassandra is already shutdown.");
+ return new OperationResult("Failed to obtain process info. It appears Cassandra is already shutdown.");
+ }
+
long pid = process.getPid();
try {
process.kill("KILL");
@@ -209,6 +219,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
}
}
+
protected OperationResult startNode() {
ResourceContext<?> context = getResourceContext();
Configuration pluginConfig = context.getPluginConfiguration();
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
index 3b0aa5b..d9b35b9 100644
--- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
+++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
@@ -26,11 +26,15 @@
package org.rhq.plugins.storage;
import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.io.StringReader;
+import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
@@ -39,6 +43,8 @@ import org.mc4j.ems.connection.EmsConnection;
import org.mc4j.ems.connection.bean.EmsBean;
import org.mc4j.ems.connection.bean.attribute.EmsAttribute;
import org.mc4j.ems.connection.bean.operation.EmsOperation;
+import org.yaml.snakeyaml.DumperOptions;
+import org.yaml.snakeyaml.Yaml;
import org.rhq.core.domain.configuration.Configuration;
import org.rhq.core.domain.configuration.ConfigurationUpdateStatus;
@@ -48,6 +54,7 @@ import org.rhq.core.domain.configuration.PropertyMap;
import org.rhq.core.domain.configuration.PropertySimple;
import org.rhq.core.pluginapi.configuration.ConfigurationFacet;
import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport;
+import org.rhq.core.pluginapi.inventory.ResourceContext;
import org.rhq.core.pluginapi.operation.OperationFacet;
import org.rhq.core.pluginapi.operation.OperationResult;
import org.rhq.core.util.StringUtil;
@@ -96,6 +103,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
return updateConfiguration(parameters);
} else if (name.equals("updateKnownNodes")) {
return updateKnownNodes(parameters);
+ } else if (name.equals("prepareForBootstrap")) {
+ return prepareForBootstrap(parameters);
} else {
return super.invokeOperation(name, parameters);
}
@@ -132,6 +141,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
ipAddresses.add(propertySimple.getStringValue());
}
+ if (updateAuthFile(result, ipAddresses)) return result;
+
+ EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator");
+ EmsOperation emsOperation = authBean.getOperation("reloadConfiguration");
+ emsOperation.invoke();
+
+ result.setSimpleResult("Successfully updated the set of known nodes.");
+
+ return result;
+ }
+
+ private boolean updateAuthFile(OperationResult result, Set<String> ipAddresses) {
log.info("Updating known nodes to " + ipAddresses);
File confDir = new File(getBasedir(), "conf");
@@ -150,7 +171,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
log.error(msg);
result.setErrorMessage(msg);
- return result;
+ return true;
}
}
@@ -161,7 +182,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
"to unexpected error";
log.error(msg, e);
result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e));
- return result;
+ return true;
}
try {
@@ -176,18 +197,127 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
}
result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " +
"it matches " + authBackupFile + " and then reschedule the operation.");
+ return true;
+ }
+ return false;
+ }
+
+ private OperationResult prepareForBootstrap(Configuration params) {
+ log.info("Preparing " + this + " for bootstrap...");
+
+ ResourceContext context = getResourceContext();
+ OperationResult result = new OperationResult();
+
+ log.info("Stopping storage node");
+ OperationResult stopNodeResult = stopNode();
+ if (stopNodeResult.getErrorMessage() != null) {
+ log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " +
+ "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " +
+ "the operation");
+ result.setErrorMessage("Failed to stop storage node. Cannot prepare the node for bootstrap which means " +
+ "that it cannot join the cluster. Make sure that the node is not running and retry the operation. " +
+ "Stopping the storage node failed with this error: " + stopNodeResult.getErrorMessage());
return result;
}
- EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator");
- EmsOperation emsOperation = authBean.getOperation("reloadConfiguration");
- emsOperation.invoke();
+ Configuration pluginConfig = context.getPluginConfiguration();
+ String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration");
+ File yamlFile = new File(yamlProp);
- result.setSimpleResult("Successfully updated the set of known nodes.");
+ DumperOptions options = new DumperOptions();
+ options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
+ Yaml yaml = new Yaml(options);
+
+ Map yamlConfig = null;
+ try {
+ yamlConfig = (Map) yaml.load(new FileInputStream(yamlFile));
+ } catch (FileNotFoundException e) {
+ log.error("Failed to load " + yamlFile, e);
+ log.error("Cannot prepare " + this + " for bootstrap. " + yamlFile + " must exist in order to make the " +
+ "necessary configuration changes.");
+ result.setErrorMessage("Cannot prepare storage node for bootstrap. It appears that " + yamlFile +
+ " does not exist. Make sure that it exists so that the necessary configuration changes can be made.");
+
+ return result;
+ }
+
+ purgeDir(getCommitLogDir(yamlConfig));
+ for (File dataDir : getDataDirs(yamlConfig)) {
+ purgeDir(dataDir);
+ }
+ purgeDir(getSavedCachesDir(yamlConfig));
+
+ log.info("Updating cluster settings");
+
+ String address = pluginConfig.getSimpleValue("host");
+ List<String> seeds = getAddresses(params.getList("storageNodeIPAddresses"));
+ // Make sure this node's address is not in the list; otherwise, it
+ // won't bootstrap properly.
+ seeds.remove(address);
+ try {
+ updateSeedsList(seeds);
+ } catch (IOException e) {
+ log.error("Failed to update seeds property in " + yamlFile, e);
+ result.setErrorMessage("Failed to prepared node for bootstrap due to unexpected error that occurred " +
+ "while updating seeds property in " + yamlFile + ":\n" + ThrowableUtil.getAllMessages(e));
+ return result;
+ }
+
+ if (updateAuthFile(result, new HashSet<String>(seeds))) {
+ return result;
+ }
+
+ int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort"));
+ int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort"));
+
+ yamlConfig.put("native_transport_port", cqlPort);
+ yamlConfig.put("storage_port", gossipPort);
+
+ try {
+ yaml.dump(yamlConfig, new FileWriter(yamlFile));
+ } catch (IOException e) {
+ log.error("Could not update cluster settings in " + yamlFile, e);
+ result.setErrorMessage("Could not update cluster settings in " + yamlFile + ":\n" +
+ ThrowableUtil.getAllMessages(e));
+ return result;
+ }
+
+ log.info(this + " is ready to be bootstrap. Restarting storage node...");
+ OperationResult startResult = startNode();
+ if (startResult.getErrorMessage() != null) {
+ log.error("Failed to restart storage node:\n" + startResult.getErrorMessage());
+ result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage());
+ } else {
+ result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster.");
+ }
return result;
}
+ private void purgeDir(File dir) {
+ log.info("Purging " + dir);
+ FileUtil.purge(dir, true);
+ }
+
+ private File getCommitLogDir(Map yamlConfig) {
+ return new File((String) yamlConfig.get("commitlog_directory"));
+ }
+
+ private List<File> getDataDirs(Map yamlConfig) {
+ List<File> dirs = new ArrayList<File>();
+ List<String> dirNames = (List<String>) yamlConfig.get("data_file_directories");
+
+ for (String dirName : dirNames) {
+ dirs.add(new File(dirName));
+ }
+
+ return dirs;
+ }
+
+ private File getSavedCachesDir(Map yamlConfig) {
+ return new File((String) yamlConfig.get("saved_caches_directory"));
+ }
+
private OperationResult nodeAdded(Configuration params) {
boolean runRepair = params.getSimple("runRepair").getBooleanValue();
boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue();
@@ -405,4 +535,10 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
boolean succeeded;
String details;
}
+
+ @Override
+ public String toString() {
+ return StorageNodeComponent.class.getSimpleName() + "[resourceKey: " + getResourceContext().getResourceKey() +
+ "]";
+ }
}
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
index 1e39d6c..cd84de6 100644
--- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
@@ -101,6 +101,16 @@
</parameters>
</operation>
+ <operation name="prepareForBootstrap">
+ <parameters>
+ <c:simple-property name="cqlPort" type="integer" displayName="CQL Port"/>
+ <c:simple-property name="gossipPort" type="integer"/>
+ <c:list-property name="storageNodeIPAddresses" displayName="Storage Node IP Addresses">
+ <c:simple-property name="storageNodeIPAddress"/>
+ </c:list-property>
+ </parameters>
+ </operation>
+
<operation name="prepareForUpgrade" description="Prepares the storage node for upgrade (this operation consists of following steps: 1) turning off the RPC server, 2) turning off the gossiper, 3) taking the snapshot (backuping the data), 4) invoking the drain operation">
<parameters>
<c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
commit 4fa9f082b2e011b3bde9defe1021248148c4ad40
Author: Simeon Pinder <spinder(a)fulliautomatix.conchfritter.com>
Date: Tue Jul 23 15:02:09 2013 -0400
[BZ 984649] fix module metadata.
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
index 145e3af..82ff294 100644
--- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
+++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
@@ -290,7 +290,7 @@
<delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar.index" />
<!-- Update the module metadata to the patched version -->
<replace file="${jboss.home}/modules/system/layers/base/org/jgroups/main/module.xml"
- token="jgroups-${jgroups.initial.version}.jar" value="${jgroups.patch.version}"/>
+ token="jgroups-${jgroups.initial.version}.jar" value="jgroups-${jgroups.patch.version}.jar"/>
<!-- Copy in patched version -->
<copy file="${settings.localRepository}/org/jgroups/jgroups/${jgroups.patch.version}/jgroups-${jgroups.patch.version}.jar"
toDir="${jboss.home}/modules/system/layers/base/org/jgroups/main" verbose="true"/>
commit 8203c669b3b3ba5ed5c3ef27f051220da93ea868
Author: Simeon Pinder <spinder(a)fulliautomatix.conchfritter.com>
Date: Tue Jul 23 13:37:32 2013 -0400
Upgrading richfaces to latest patched version.
diff --git a/pom.xml b/pom.xml
index 3662bc7..f909033 100644
--- a/pom.xml
+++ b/pom.xml
@@ -135,7 +135,7 @@
<postgresql.version>9.2-1002.jdbc4</postgresql.version>
<h2.version>1.2.139</h2.version>
<jtds.version>1.2.2</jtds.version>
- <richfaces.version>3.3.3.Final</richfaces.version>
+ <richfaces.version>3.3.4.Final</richfaces.version>
<jline.version>0.9.94</jline.version>
<sigar.version>1.6.5.132-5</sigar.version>
<sigar.zip.version>1.6.5</sigar.zip.version>
commit caeb7a5c832334b74f76a265c8028a5697152dda
Author: Simeon Pinder <spinder(a)fulliautomatix.conchfritter.com>
Date: Tue Jul 23 12:28:52 2013 -0400
[BZ 984649] update jgroups usage to latest patched version.
diff --git a/modules/enterprise/server/appserver/pom.xml b/modules/enterprise/server/appserver/pom.xml
index 0a61138..f1a4c7b 100644
--- a/modules/enterprise/server/appserver/pom.xml
+++ b/modules/enterprise/server/appserver/pom.xml
@@ -19,6 +19,8 @@
<properties>
<rhq.dev.data.dir>${rhq.rootDir}/rhq-data</rhq.dev.data.dir>
+ <jgroups.initial.version>3.2.7.Final</jgroups.initial.version>
+ <jgroups.patch.version>3.2.10.Final</jgroups.patch.version>
</properties>
<dependencies>
@@ -72,6 +74,13 @@
<groupId>org.codehaus.groovy</groupId>
<artifactId>groovy-all</artifactId>
</dependency>
+
+ <!-- Pull down the patched version of JGroups. See CVE 2013-4112 and BZ 984365 -->
+ <dependency>
+ <groupId>org.jgroups</groupId>
+ <artifactId>jgroups</artifactId>
+ <version>${jgroups.patch.version}</version>
+ </dependency>
</dependencies>
<build>
@@ -157,6 +166,8 @@
<property name="rhq.server.http.port" value="${rhq.server.http.port}" />
<property name="rhq.server.https.port" value="${rhq.server.https.port}" />
<property name="rhq.sync.endpoint-address" value="${rhq.sync.endpoint-address}" />
+ <property name="jgroups.initial.version" value="${jgroups.initial.version}" />
+ <property name="jgroups.patch.version" value="${jgroups.patch.version}" />
</ant>
</target>
</configuration>
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
index a81b6cd..145e3af 100644
--- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
+++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
@@ -283,6 +283,17 @@
</resources>
</module>
]]></echo>
+
+ <echo>Updating JGroups module component for EAP to ${jgroups.patch.version}</echo>
+ <!-- Remove the unpatched version -->
+ <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar" />
+ <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar.index" />
+ <!-- Update the module metadata to the patched version -->
+ <replace file="${jboss.home}/modules/system/layers/base/org/jgroups/main/module.xml"
+ token="jgroups-${jgroups.initial.version}.jar" value="${jgroups.patch.version}"/>
+ <!-- Copy in patched version -->
+ <copy file="${settings.localRepository}/org/jgroups/jgroups/${jgroups.patch.version}/jgroups-${jgroups.patch.version}.jar"
+ toDir="${jboss.home}/modules/system/layers/base/org/jgroups/main" verbose="true"/>
<echo>Generate SSL key for RHQ server - 128-bit key that expires in 20 years</echo>
<property name="jboss.conf.dir" location="${jboss.home}/standalone/configuration" />
commit cc64adde1d8835f8c000afe2de0746fda5bbd5c1
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue Jul 23 08:43:19 2013 -0500
One more place where the previous rebase removed code for the storage node configuration.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index 31e3bf7..9416c67 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -643,6 +643,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
Configuration parameters = new Configuration();
parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + "");
parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + "");
+ parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + "");
+ parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + "");
boolean updateConfigurationResult = runOperationAndWaitForResult(subject, storageNodeResource,
UPDATE_CONFIGURATION_OPERATION, parameters);
commit 373a931987b402479df3d02269cc00f4ac88a358
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue Jul 23 08:42:49 2013 -0500
Enable the new set of calculated metrics for disk space utilization in the UI and CLI.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java
index 80bfdd6..2c0b8f8 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java
@@ -41,9 +41,13 @@ public class StorageNodeLoadComposite implements Serializable {
private MeasurementAggregateWithUnits heapUsed;
private MeasurementAggregateWithUnits heapPercentageUsed;
private MeasurementAggregateWithUnits load;
- private MeasurementAggregateWithUnits partitionDiskUsedPercentage;
private MeasurementAggregateWithUnits dataDiskUsed;
private MeasurementAggregate tokens;
+
+ private MeasurementAggregateWithUnits dataDiskUsedPercentage;
+ private MeasurementAggregateWithUnits totalDiskUsedPercentage;
+ private MeasurementAggregate freeDiskToDataSizeRatio;
+
private MeasurementAggregateWithUnits actuallyOwns;
public StorageNodeLoadComposite() {
@@ -113,35 +117,59 @@ public class StorageNodeLoadComposite implements Serializable {
public void setHeapPercentageUsed(MeasurementAggregateWithUnits heapPercentageUsed) {
this.heapPercentageUsed = heapPercentageUsed;
}
-
+
/**
* @deprecated use {@link #getPartitionDiskUsedPercentage() getPartitionDiskUsedPercentage()} instead
- *
+ *
* @return partitionDiskUsedPercentage
*/
public MeasurementAggregateWithUnits getDiskSpacePercentageUsed() {
- return getPartitionDiskUsedPercentage();
+ return getDataDiskUsedPercentage();
}
-
+
/**
* @deprecated use {@link #setPartitionDiskUsedPercentage() setPartitionDiskUsedPercentage()} instead
- *
+ *
* @param partitionDiskUsedPercentage
*/
- public void setDiskSpacePercentageUsed(MeasurementAggregateWithUnits partitionDiskUsedPercentage) {
- setPartitionDiskUsedPercentage(partitionDiskUsedPercentage);
+ public void setDiskSpacePercentageUsed(MeasurementAggregateWithUnits diskUsedPercentage) {
+ setDataDiskUsedPercentage(diskUsedPercentage);
+ }
+
+ /**
+ * @return A computed metric for the percentage of disk space used by data file on the corresponding partitions.
+ * If multiple data locations are configured then the aggregate is calculated.
+ */
+ public MeasurementAggregateWithUnits getDataDiskUsedPercentage() {
+ return dataDiskUsedPercentage;
+ }
+
+ public void setDataDiskUsedPercentage(MeasurementAggregateWithUnits dataDiskUsedPercentage) {
+ this.dataDiskUsedPercentage = dataDiskUsedPercentage;
+ }
+
+ /**
+ * @return A computed metric for the percentage of total (system + Storage Node data file) disk space used the partitions where data files are stored.
+ * If multiple data locations are configured then the aggregate is calculated.
+ */
+ public MeasurementAggregateWithUnits getTotalDiskUsedPercentage() {
+ return totalDiskUsedPercentage;
+ }
+
+ public void setTotalDiskUsedPercentage(MeasurementAggregateWithUnits totalDiskUsedPercentage) {
+ this.totalDiskUsedPercentage = totalDiskUsedPercentage;
}
/**
- * @return A computed metric for the percentage of disk space used on the partition that contains the SSTables.
- * If multiple data locations are configured then the partition with the highest utilization will be reported.
+ * @return A computed metric for the percentage of total (system + Storage Node data file) disk space used the partitions where data files are stored.
+ * If multiple data locations are configured then the aggregate is calculated.
*/
- public MeasurementAggregateWithUnits getPartitionDiskUsedPercentage() {
- return partitionDiskUsedPercentage;
+ public MeasurementAggregate getFreeDiskToDataSizeRatio() {
+ return freeDiskToDataSizeRatio;
}
- public void setPartitionDiskUsedPercentage(MeasurementAggregateWithUnits partitionDiskUsedPercentage) {
- this.partitionDiskUsedPercentage = partitionDiskUsedPercentage;
+ public void setFreeDiskToDataSizeRatio(MeasurementAggregate freeDiskToDataSizeRatio) {
+ this.freeDiskToDataSizeRatio = freeDiskToDataSizeRatio;
}
/**
@@ -202,7 +230,7 @@ public class StorageNodeLoadComposite implements Serializable {
builder.append("heapUsed=").append(heapUsed).append(", ");
builder.append("heapPercentageUsed=").append(heapPercentageUsed).append(", ");
builder.append("load=").append(load).append(", ");
- builder.append("partitionDiskUsedPercentage=").append(partitionDiskUsedPercentage).append(", ");
+ builder.append("dataUsedPercentage=").append(dataDiskUsedPercentage).append(", ");
builder.append("dataDiskUsed=").append(dataDiskUsed).append(", ");
builder.append("tokens=").append(tokens).append(", ");
builder.append("actuallyOwns=").append(actuallyOwns);
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java
index 7d413fd..07064b7 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java
@@ -47,6 +47,7 @@ import org.rhq.core.domain.cloud.StorageNode.OperationMode;
import org.rhq.core.domain.cloud.StorageNodeLoadComposite;
import org.rhq.core.domain.cloud.StorageNodeLoadComposite.MeasurementAggregateWithUnits;
import org.rhq.core.domain.criteria.StorageNodeCriteria;
+import org.rhq.core.domain.measurement.MeasurementAggregate;
import org.rhq.core.domain.util.PageControl;
import org.rhq.core.domain.util.PageList;
import org.rhq.core.domain.util.PageOrdering;
@@ -200,7 +201,8 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
public static class StorageNodeLoadCompositeDatasource extends RPCDataSource<StorageNodeLoadComposite, StorageNodeCriteria> {
public static final String HEAP_PERCENTAGE_KEY = "heapPercentage";
- public static final String DISK_SPACE_PERCENTAGE_KEY = "diskSpacePercentage";
+ public static final String DATA_DISK_SPACE_PERCENTAGE_KEY = "dataDiskSpacePercentage";
+ public static final String TOTAL_DISK_SPACE_PERCENTAGE_KEY = "totalDiskSpacePercentage";
private int id;
public static StorageNodeLoadCompositeDatasource getInstance(int id) {
@@ -296,10 +298,15 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
"This value is calculated by dividing Heap Used by Heap Maximum.", HEAP_PERCENTAGE_KEY),
Arrays.<Object> asList(loadComposite.getLoad(), "Load", "Data stored on the node", "load"),
Arrays.<Object> asList(
- loadComposite.getPartitionDiskUsedPercentage(),
- "Disk Space Percent Used",
- "Percentage of total disk space used for the partition that contains the data files.If multiple data locations are specified then this will report the average utilization accross all the partitions.",
- DISK_SPACE_PERCENTAGE_KEY),
+ loadComposite.getDataDiskUsedPercentage(),
+ "Data Disk Space Percent Used",
+ "Percentage of disk space used by data files on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.",
+ DATA_DISK_SPACE_PERCENTAGE_KEY),
+ Arrays.<Object> asList(
+ loadComposite.getTotalDiskUsedPercentage(),
+ "Total Disk Space Percent Used",
+ "Percentage of total disk space used (system and Storage Node) on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.",
+ TOTAL_DISK_SPACE_PERCENTAGE_KEY),
Arrays.<Object> asList(
loadComposite.getDataDiskUsed(),
"Total Disk Space Used",
@@ -325,6 +332,21 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
recordsList.add(tokens);
}
+
+ if (loadComposite.getFreeDiskToDataSizeRatio() != null){
+ MeasurementAggregate aggregate = loadComposite.getFreeDiskToDataSizeRatio();
+
+ ListGridRecord record = new ListGridRecord();
+ record.setAttribute("id", "freeDiskToDataSizeRatio");
+ record.setAttribute("name", "Free Disk To Data Size Ratio");
+ record.setAttribute("hover", "Ratio of (Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.");
+ record.setAttribute("min", aggregate.getMin());
+ record.setAttribute("avg", aggregate.getAvg());
+ record.setAttribute("max", aggregate.getMax());
+
+ recordsList.add(record);
+ }
+
ListGridRecord[] records = recordsList.toArray(new ListGridRecord[recordsList.size()]);
return records;
}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java
index e8dde9d..e044e4e 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java
@@ -51,8 +51,9 @@ public class StorageNodeLoadComponent extends EnhancedVLayout {
@Override
protected String getCellCSSText(ListGridRecord record, int rowNum, int colNum) {
if ("avg".equals(getFieldName(colNum))
- && (StorageNodeLoadCompositeDatasource.HEAP_PERCENTAGE_KEY.equals(record.getAttribute("id")) || StorageNodeLoadCompositeDatasource.DISK_SPACE_PERCENTAGE_KEY
- .equals(record.getAttribute("id")))) {
+ && (StorageNodeLoadCompositeDatasource.HEAP_PERCENTAGE_KEY.equals(record.getAttribute("id")) ||
+ StorageNodeLoadCompositeDatasource.DATA_DISK_SPACE_PERCENTAGE_KEY.equals(record.getAttribute("id")) ||
+ StorageNodeLoadCompositeDatasource.TOTAL_DISK_SPACE_PERCENTAGE_KEY.equals(record.getAttribute("id")))) {
if (record.getAttributeAsFloat("avgFloat") > .85) {
return "font-weight:bold; color:#d64949;";
} else if (record.getAttributeAsFloat("avgFloat") > .7) {
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index fab803b..31e3bf7 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -253,13 +253,16 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();
// get the schedule ids for Storage Service resource
- final String tokensMetric = "Tokens", ownershipMetric = "Ownership", diskUsedPercentageMetric = "Calculated.PartitionDiskUsedPercentage";
+ final String tokensMetric = "Tokens", ownershipMetric = "Ownership";
+ final String dataDiskUsedPercentageMetric = "Calculated.DataDiskUsedPercentage";
+ final String totalDiskUsedPercentageMetric = "Calculated.TotalDiskUsedPercentage";
+ final String freeDiskToDataRatioMetric = "Calculated.FreeDiskToDataSizeRatio";
final String loadMetric = "Load", keyCacheSize = "KeyCacheSize", rowCacheSize = "RowCacheSize", totalCommitLogSize = "TotalCommitlogSize";
TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery(
StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class);
query.setParameter("parrentId", resourceId).setParameter("metricNames",
- Arrays.asList(tokensMetric, ownershipMetric, diskUsedPercentageMetric, loadMetric, keyCacheSize,
- rowCacheSize, totalCommitLogSize));
+ Arrays.asList(tokensMetric, ownershipMetric, loadMetric, keyCacheSize, rowCacheSize, totalCommitLogSize,
+ dataDiskUsedPercentageMetric, totalDiskUsedPercentageMetric, freeDiskToDataRatioMetric));
for (Object[] pair : query.getResultList()) {
scheduleIdsMap.put((String) pair[0], (Integer) pair[1]);
}
@@ -292,10 +295,22 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
result.setActuallyOwns(ownershipAggregateWithUnits);
}
- if ((scheduleId = scheduleIdsMap.get(diskUsedPercentageMetric)) != null) {
- StorageNodeLoadComposite.MeasurementAggregateWithUnits diskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits(
+
+ //calculated disk space related metrics
+ if ((scheduleId = scheduleIdsMap.get(dataDiskUsedPercentageMetric)) != null) {
+ StorageNodeLoadComposite.MeasurementAggregateWithUnits dataDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits(
+ subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
+ result.setDataDiskUsedPercentage(dataDiskUsedPercentageAggregateWithUnits);
+ }
+ if ((scheduleId = scheduleIdsMap.get(totalDiskUsedPercentageMetric)) != null) {
+ StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits(
subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
- result.setPartitionDiskUsedPercentage(diskUsedPercentageAggregateWithUnits);
+ result.setDataDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits);
+ }
+ if ((scheduleId = scheduleIdsMap.get(freeDiskToDataRatioMetric)) != null) {
+ MeasurementAggregate freeDiskToDataRatioAggregate = measurementManager.getAggregate(subject,
+ scheduleId, beginTime, endTime);
+ result.setFreeDiskToDataSizeRatio(freeDiskToDataRatioAggregate);
}
if ((scheduleId = scheduleIdsMap.get(loadMetric)) != null) {
diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
index 5bbebed..e95f995 100644
--- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
@@ -188,7 +188,7 @@
<metric property="Calculated.DataDiskUsedPercentage" displayName="Data File Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of disk space used by Cassandra data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
<metric property="Calculated.TotalDiskUsedPercentage" displayName="Total Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of total disk space used. The metric acounts overall disk usage (including system files), not just disk space used by Cassandra. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
- <metric property="Calculated.FreeDiskToDataSizeRatio" displayName="Free Disk to Data Size Ratio" dataType="measurement" displayType="summary" description="Ratio of (Amount of Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
+ <metric property="Calculated.FreeDiskToDataSizeRatio" displayName="Free Disk to Data Size Ratio" dataType="measurement" displayType="summary" description="Ratio of (Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
<metric property="CurrentGenerationNumber" dataType="trait" displayType="summary" description="Current generation number"/>
<metric property="ExceptionCount" measurementType="trendsup" dataType="measurement" displayType="summary" description="Exception Count"/>
commit 6997631e56204db41c9f4902eef1c6210706be3f
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Mon Jul 22 15:14:22 2013 -0500
Add back code used to update storage node configuration tha was lost due to rebase. This code updates two additional storage node properties added.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index b32ab5b..fab803b 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -611,6 +611,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
storageNodeResource.getId());
configuration.setHeapSize(storageNodeConfiguration.getSimpleValue("maxHeapSize"));
+ configuration.setHeapNewSize(storageNodeConfiguration.getSimpleValue("heapNewSize"));
+ configuration.setThreadStackSize(storageNodeConfiguration.getSimpleValue("threadStackSize"));
configuration.setJmxPort(storageNode.getJmxPort());
}
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
index 8156d02..1e39d6c 100644
--- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
@@ -122,6 +122,9 @@
<operation name="updateConfiguration" description="Updates the node configuration. Will require a separate server restart for the settings to take effect.">
<parameters>
<c:simple-property name="heapSize" type="string" description="The heap size to be used for both -Xms and -Xmx JVM options."/>
+ <c:simple-property name="heapNewSize" type="string" description="The heap new size to be used be used with -Xmn JVM option."/>
+ <c:simple-property name="threadStackSize" type="integer" description="The thread stack size. This memory is allocated to each thread off heap. The
+ value should be an integer that will be interpreted in kilobytes."/>
<c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/>
</parameters>
<results>
commit e85abcf495d6939d876b09bd1de1e71a29af17ec
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Tue Jul 23 10:42:17 2013 +0200
BZ 796480 (and others) add support for subCategory in embedded types (aka runs-inside)
diff --git a/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java b/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java
index 88dd865..d5ff2ef 100644
--- a/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java
+++ b/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java
@@ -417,16 +417,56 @@ public class PluginMetadataParser {
return serviceResourceType;
}
- private static void setSubCategory(ResourceDescriptor resourceDescriptor, ResourceType resourceType)
+ /**
+ * Try to find the subCategory of the p/s/s descriptor in one of the parents
+ * <subcategories><subcategory>Foo</subcategory></subcategories> elements and
+ * set it on the resourceType if found.
+ *
+ * It is not enough to look at the direct parents, but we need to also look at the
+ * <runs-inside> types if our type is "embedded" in a different type.
+ * @param resourceDescriptor Descriptor to get the subCategory attribute from
+ * @param resourceType The type to attach the ResourceSubCategory to.
+ * @throws InvalidPluginDescriptorException If the descriptor.subCategory can not be found in any parent.
+ */
+ private void setSubCategory(ResourceDescriptor resourceDescriptor, ResourceType resourceType)
throws InvalidPluginDescriptorException {
String subCatName = resourceDescriptor.getSubCategory();
if (subCatName != null) {
ResourceSubCategory subCat = SubCategoriesMetadataParser.findSubCategoryOnResourceTypeAncestor(
resourceType, subCatName);
- if (subCat == null)
+
+ // We need to look at resourceDescriptor -> runsInside to see if one of those defines the
+ // subcategories that we are looking for.
+ if (subCat == null && resourceDescriptor.getRunsInside() != null) {
+ RunsInsideType rit = resourceDescriptor.getRunsInside();
+ List<ParentResourceType> parentResourceTypeList = rit.getParentResourceType();
+ for (ParentResourceType parentResourceType : parentResourceTypeList) {
+ ResourceType parentType = getResourceTypeFromPlugin(parentResourceType.getName(),parentResourceType.getPlugin());
+ // check on the parent
+ if (parentType.getChildSubCategories()!=null ) {
+ for (ResourceSubCategory parentSubcat : parentType.getChildSubCategories()) {
+ if (parentSubcat.getName().equals(subCatName)) {
+ subCat = parentSubcat;
+ break;
+ }
+ }
+ }
+
+ // Not found on runs-inside type look at the ancestor of those runs-inside types?
+ if (subCat==null) {
+ subCat = SubCategoriesMetadataParser.findSubCategoryOnResourceTypeAncestor(parentType,subCatName);
+ }
+ if (subCat!=null) {
+ break;
+ }
+ }
+ }
+
+ if (subCat == null) {
throw new InvalidPluginDescriptorException("Resource type [" + resourceType.getName()
+ "] specified a subcategory (" + subCatName
+ ") that is not defined as a child subcategory of one of its ancestor resource types.");
+ }
resourceType.setSubCategory(subCat);
}
}
diff --git a/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java
index 20e1aa9..2a9595f 100644
--- a/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java
+++ b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java
@@ -226,6 +226,7 @@ public class ExtensionModelTest {
assert jbossServer.getCategory().equals(ResourceCategory.SERVER);
assert jbossServer.getDescription().equals("JBoss Application Server Description");
assert jbossServer.getParentResourceTypes().size() == 0;
+ assert jbossServer.getChildSubCategories().size() == 2;
assert jbossServer.getChildResourceTypes().size() == 1;
ResourceType embeddedTomcatServer = jbossServer.getChildResourceTypes().iterator().next();
@@ -263,6 +264,7 @@ public class ExtensionModelTest {
assert hibernateService.getDescription().equals("Hibernate Service Description");
assert hibernateService.getChildResourceTypes().size() == 0;
assert hibernateService.getParentResourceTypes().size() == 3;
+ assert hibernateService.getSubCategory().getName().equals("Framework");
ResourceType tomcatServer = metadataManager.getType("TomcatServer", "Tomcat");
ResourceType jbossServer = metadataManager.getType("JBossASServer", "JBossAS");
diff --git a/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java
new file mode 100644
index 0000000..1cba523
--- /dev/null
+++ b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java
@@ -0,0 +1,159 @@
+ /*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2008 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation, and/or the GNU Lesser
+ * General Public License, version 2.1, also as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License and the GNU Lesser General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * and the GNU Lesser General Public License along with this program;
+ * if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+package org.rhq.core.clientapi.agent.metadata.test;
+
+ import java.net.URL;
+ import java.util.List;
+ import java.util.Set;
+
+ import javax.xml.bind.JAXBContext;
+ import javax.xml.bind.Unmarshaller;
+ import javax.xml.bind.util.ValidationEventCollector;
+
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+ import org.testng.annotations.BeforeSuite;
+ import org.testng.annotations.Test;
+
+ import org.rhq.core.clientapi.agent.metadata.PluginMetadataManager;
+ import org.rhq.core.clientapi.agent.metadata.SubCategoriesMetadataParser;
+ import org.rhq.core.clientapi.descriptor.AgentPluginDescriptorUtil;
+ import org.rhq.core.clientapi.descriptor.DescriptorPackages;
+ import org.rhq.core.clientapi.descriptor.plugin.PluginDescriptor;
+ import org.rhq.core.clientapi.descriptor.plugin.ResourceDescriptor;
+ import org.rhq.core.clientapi.descriptor.plugin.ServerDescriptor;
+ import org.rhq.core.clientapi.descriptor.plugin.ServiceDescriptor;
+ import org.rhq.core.clientapi.descriptor.plugin.SubCategoryDescriptor;
+ import org.rhq.core.domain.resource.ResourceCategory;
+ import org.rhq.core.domain.resource.ResourceSubCategory;
+ import org.rhq.core.domain.resource.ResourceType;
+
+ /**
+ * @author Charles Crouch
+ * @author Heiko W. Rupp
+ */
+public class NestedSubCategoriesMetadataParserTest {
+ private static final String DESCRIPTOR_FILENAME = "test-subcategories-nested.xml";
+ private final Log LOG = LogFactory.getLog(NestedSubCategoriesMetadataParserTest.class);
+
+ private PluginDescriptor pluginDescriptor;
+
+ @BeforeSuite
+ public void loadPluginDescriptor() throws Exception {
+ try {
+ URL descriptorUrl = this.getClass().getClassLoader().getResource(DESCRIPTOR_FILENAME);
+ LOG.info("Loading plugin descriptor at: " + descriptorUrl);
+
+ JAXBContext jaxbContext = JAXBContext.newInstance(DescriptorPackages.PC_PLUGIN);
+
+ Unmarshaller unmarshaller = jaxbContext.createUnmarshaller();
+ ValidationEventCollector vec = new ValidationEventCollector();
+ unmarshaller.setEventHandler(vec);
+ pluginDescriptor = (PluginDescriptor) unmarshaller.unmarshal(descriptorUrl.openStream());
+ } catch (Throwable t) {
+ // Catch RuntimeExceptions and Errors and dump their stack trace, because Surefire will completely swallow them
+ // and throw a cryptic NPE (see http://jira.codehaus.org/browse/SUREFIRE-157)!
+ t.printStackTrace();
+ throw new RuntimeException(t);
+ }
+ }
+
+ @Test
+ public void parseSingleSubCategory() {
+ List<ServerDescriptor> servers = pluginDescriptor.getServers();
+ ServerDescriptor server0 = servers.get(0);
+ ResourceDescriptor.Subcategories subCategoriesDescriptor = server0.getSubcategories();
+ assert subCategoriesDescriptor != null : "No subcategories element: " + server0.getName();
+
+ List<SubCategoryDescriptor> subCategoryDescriptors = subCategoriesDescriptor.getSubcategory();
+
+ assert subCategoryDescriptors != null : "No subcategory elements: " + server0.getName();
+ assert !subCategoryDescriptors.isEmpty() : "No subcategory elements: " + server0.getName();
+
+ ResourceSubCategory subCat;
+
+ ResourceType resType = new ResourceType("testResType", "myplugin", ResourceCategory.SERVER, null);
+ subCat = SubCategoriesMetadataParser.getSubCategory(subCategoryDescriptors.get(0), resType);
+
+ assert subCat != null : "Null subcategory received from parser";
+ assert subCat.getName().equals("applications") : "Name not read correctly";
+ assert subCat.getDisplayName().equals("Apps") : "Display name not read correctly";
+ assert subCat.getDescription().equals("The apps.") : "Description not read correctly";
+ // getSubCategory is no longer responsible for setting resourcetype information, that is done in PluginMetadataParser
+ //assert subCat.getResourceType().equals(resType) : "ResourceType not set correctly";
+
+ }
+
+ @Test
+ public void parseNestedSubCategories() {
+ List<ServerDescriptor> servers = pluginDescriptor.getServers();
+ ServerDescriptor server2 = servers.get(1);
+ assert server2.getName().equals("testServer2");
+ ResourceDescriptor.Subcategories subCategoriesDescriptor = server2.getSubcategories();
+ assert subCategoriesDescriptor == null : "Unexpected subcategories element: " + server2.getName();
+ assert server2.getSubCategory().equals("applications");
+
+ List<ServiceDescriptor> services = pluginDescriptor.getServices();
+ ServiceDescriptor service1 = services.get(0);
+ assert service1.getName().equals("testService");
+ assert service1.getSubCategory().equals("applications");
+ }
+
+ @Test
+ public void testParseViaMetaDataManager() throws Exception {
+
+ PluginDescriptor pluginDescriptor;
+
+ URL descriptorUrl = this.getClass().getClassLoader().getResource(DESCRIPTOR_FILENAME);
+ System.out.println("Loading plugin descriptor at: " + descriptorUrl);
+
+ pluginDescriptor = (PluginDescriptor) AgentPluginDescriptorUtil.parsePluginDescriptor(descriptorUrl
+ .openStream());
+
+ PluginMetadataManager metadataManager = new PluginMetadataManager();
+ Set<ResourceType> typeSet = metadataManager.loadPlugin(pluginDescriptor);
+ assert typeSet != null : "Got no types!!";
+ assert typeSet.size()==5 : "Expected 5 types, but got " + typeSet.size();
+
+ ResourceType testService = findType(typeSet,"testService");
+ assert testService.getSubCategory().getName().equals("applications");
+
+ ResourceType testService2 = findType(typeSet,"testService2");
+ assert testService2.getSubCategory().getName().equals("applications");
+
+ ResourceType testService3 = findType(typeSet,"testService3");
+ assert testService3.getSubCategory().getName().equals("fooBar");
+
+
+ }
+
+ private ResourceType findType(Set<ResourceType> types, String name) {
+ for (ResourceType type : types ) {
+ if (type.getName().equals(name)) {
+ return type;
+ }
+ }
+ assert false : "Type with name " + name + " not found";
+ return null;
+ }
+ }
\ No newline at end of file
diff --git a/modules/core/client-api/src/test/resources/test-hibernate.xml b/modules/core/client-api/src/test/resources/test-hibernate.xml
index 37a2e03..9051ca2 100644
--- a/modules/core/client-api/src/test/resources/test-hibernate.xml
+++ b/modules/core/client-api/src/test/resources/test-hibernate.xml
@@ -3,7 +3,7 @@
package="org.rhq.plugins.test2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:xmlns:rhq-plugin">
-
+
<depends plugin="JMX" />
<depends plugin="Tomcat" />
<depends plugin="JBossAS" />
@@ -11,11 +11,13 @@
<service name="HibernateService"
discovery="HibernateDiscoveryComponent"
class="HibernateServiceComponent"
- description="Hibernate Service Description">
+ description="Hibernate Service Description"
+ subCategory="Framework"
+ >
<runs-inside>
<parent-resource-type name="TomcatServer" plugin="Tomcat"/>
<parent-resource-type name="JBossASServer" plugin="JBossAS"/>
<parent-resource-type name="EmbeddedTomcatServer" plugin="JBossAS"/>
</runs-inside>
- </service>
+ </service>
</plugin>
\ No newline at end of file
diff --git a/modules/core/client-api/src/test/resources/test-jbossas.xml b/modules/core/client-api/src/test/resources/test-jbossas.xml
index 4d2602b..8269343 100644
--- a/modules/core/client-api/src/test/resources/test-jbossas.xml
+++ b/modules/core/client-api/src/test/resources/test-jbossas.xml
@@ -12,7 +12,11 @@
discovery="JBossASDiscoveryComponent"
class="JBossASServerComponent"
description="JBoss Application Server Description">
-
+ <subcategories>
+ <subcategory name="Applications" />
+ <subcategory name="Framework" />
+ </subcategories>
+
<operation name="stop" displayName="Stop JBossAS Server" description="Kills the server" timeout="30">
<parameters>
<c:simple-property name="force"
@@ -29,7 +33,7 @@
required="true"
description="If true, the server is definitely down; otherwise, the shutdown was issued but it is unclear if it really died"/>
</results>
- </operation>
+ </operation>
<server name="EmbeddedTomcatServer"
description="Embedded Tomcat Web Server Description"
sourcePlugin="Tomcat"
@@ -37,6 +41,6 @@
discovery="JBossASTomcatDiscoveryComponent"
class="JBossASTomcatServerComponent">
</server>
-
+
</server>
</plugin>
\ No newline at end of file
diff --git a/modules/core/client-api/src/test/resources/test-subcategories-nested.xml b/modules/core/client-api/src/test/resources/test-subcategories-nested.xml
new file mode 100644
index 0000000..eb43af7
--- /dev/null
+++ b/modules/core/client-api/src/test/resources/test-subcategories-nested.xml
@@ -0,0 +1,60 @@
+<!--
+ ~ RHQ Management Platform
+ ~ Copyright (C) 2005-2013 Red Hat, Inc.
+ ~ All rights reserved.
+ ~
+ ~ This program is free software; you can redistribute it and/or modify
+ ~ it under the terms of the GNU General Public License as published by
+ ~ the Free Software Foundation version 2 of the License.
+ ~
+ ~ This program is distributed in the hope that it will be useful,
+ ~ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ ~ GNU General Public License for more details.
+ ~
+ ~ You should have received a copy of the GNU General Public License
+ ~ along with this program; if not, write to the Free Software Foundation, Inc.,
+ ~ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ -->
+
+<plugin name="TestPlugin" displayName="Mock JBoss AS" package="org.rhq.plugins.mock.jboss"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns="urn:xmlns:rhq-plugin">
+
+ <server name="testServer1">
+ <subcategories>
+ <subcategory name="applications" displayName="Apps" description="The apps."/>
+ <subcategory name="fooBar"/>
+ </subcategories>
+ </server>
+
+ <!-- subCategory="applications" means that resources of this type go in the 'applications folder' of testServer1-->
+ <server name="testServer2" subCategory="applications">
+ <runs-inside>
+ <parent-resource-type name="testServer1" plugin="TestPlugin"/>
+ </runs-inside>
+ <metric property="testMetric"/>
+ </server>
+
+ <service name="testService" subCategory="applications">
+ <runs-inside>
+ <parent-resource-type name="testServer1" plugin="TestPlugin"/>
+ </runs-inside>
+
+ </service>
+
+ <service name="testService2" subCategory="applications">
+ <runs-inside>
+ <parent-resource-type name="testServer1" plugin="TestPlugin"/>
+ </runs-inside>
+
+ </service>
+
+ <service name="testService3" subCategory="fooBar">
+ <runs-inside>
+ <parent-resource-type name="testServer1" plugin="TestPlugin"/>
+ </runs-inside>
+
+ </service>
+
+</plugin>
commit 60329fcbda5a1961e0f9285c70eb56ea12fe2eb0
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Mon Jul 22 15:00:35 2013 +0200
Add Michael Burman as contributor
diff --git a/pom.xml b/pom.xml
index c5cf5a7..3662bc7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2353,6 +2353,10 @@
<timezone>+1</timezone>
</contributor>
<contributor>
+ <name>Michael Burman</name>
+ <timezone>+2</timezone>
+ </contributor>
+ <contributor>
<name>Torben Jäger</name>
<timezone>+1</timezone>
</contributor>
commit d7e9f5b9871824d1f02ae762b44cff85ff6c3d44
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Tue Jul 23 10:41:12 2013 +0200
Bug 969621 - EAP 6 managed plug-in is unable to discover EAP servers when more then one is running on a single host
Update expected resource keys in itests
diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java
index 91ece92..269474b 100644
--- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java
+++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java
@@ -43,8 +43,11 @@ public class DomainServerComponentTest extends AbstractServerComponentTest {
public static final ResourceType RESOURCE_TYPE =
new ResourceType("JBossAS7 Host Controller", PLUGIN_NAME, ResourceCategory.SERVER, null);
- // The key is the server's base dir.
- public static final String RESOURCE_KEY = new File(JBOSS_HOME, "domain").getPath();
+ // The key is the server host config file
+ // hostConfig: /tmp/jboss-as-6.0.0/domain/configuration/host.xml
+ public static final String RESOURCE_KEY = "hostConfig: "
+ + new File(JBOSS_HOME, "domain" + File.separator + "configuration" + File.separator + "host.xml")
+ .getAbsolutePath();
@Override
protected ResourceType getServerResourceType() {
diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java
index d128144..182ef36 100644
--- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java
+++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java
@@ -1,6 +1,6 @@
/*
* RHQ Management Platform
- * Copyright (C) 2005-2011 Red Hat, Inc.
+ * Copyright (C) 2005-2013 Red Hat, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -13,8 +13,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
package org.rhq.modules.plugins.jbossas7.itest.domain;
@@ -488,10 +488,8 @@ public class SecurityModuleOptionsTest extends AbstractJBossAS7PluginTest {
InventoryManager im = pluginContainer.getInventoryManager();
Resource platform = im.getPlatform();
//host controller
- ResourceType hostControllerType = new ResourceType("JBossAS7 Host Controller", PLUGIN_NAME,
- ResourceCategory.SERVER, null);
- Resource hostController = getResourceByTypeAndKey(platform, hostControllerType,
- "/tmp/jboss-as-6.0.0/domain");
+ Resource hostController = getResourceByTypeAndKey(platform, DomainServerComponentTest.RESOURCE_TYPE,
+ DomainServerComponentTest.RESOURCE_KEY);
//profile=full-ha
ResourceType profileType = new ResourceType("Profile", PLUGIN_NAME, ResourceCategory.SERVICE, null);
String key = PROFILE;
diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java
index 8446345..32f92c7 100644
--- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java
+++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java
@@ -51,8 +51,11 @@ public class StandaloneServerComponentTest extends AbstractServerComponentTest {
public static final ResourceType RESOURCE_TYPE =
new ResourceType("JBossAS7 Standalone Server", PLUGIN_NAME, ResourceCategory.SERVER, null);
- // The key is the server's base dir.
- public static final String RESOURCE_KEY = new File(JBOSS_HOME, "standalone").getPath();
+ // The key is the server host config file
+ // hostConfig: /tmp/jboss-as-6.0.0/standalone/configuration/standalone-full-ha.xml
+ public static final String RESOURCE_KEY = "hostConfig: "
+ + new File(JBOSS_HOME, "standalone" + File.separator + "configuration" + File.separator
+ + "standalone-full-ha.xml").getAbsolutePath();
private static final String RELOAD_OPERATION_NAME = "reload";
private static final String RESTART_OPERATION_NAME = "restart";
commit 567aee7f81c6aa0f7680d4f394cccb1974705320
Author: Larry O'Leary <loleary(a)redhat.com>
Date: Mon Jul 22 16:10:09 2013 -0500
BZ 981015: Fix test failures introduced by commit 01cd91b
- findLdapUserDetails was appending baseDN twice during fallback code
- FakeLdapContext contained some lazy escaping on the mock group entries
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java
index dad31ce..2ae6265 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java
@@ -99,7 +99,6 @@ public class FakeLdapContext implements LdapContext {
try {
return new FakeNamingEnumeration<SearchResult>(ldapTestData.getSearchResults(attributes));
} catch (Exception e) {
- // TODO Auto-generated catch block
e.printStackTrace();
return null;
}
@@ -516,12 +515,12 @@ public class FakeLdapContext implements LdapContext {
attr = new BasicAttribute("member");
attr.add("cn=Robert Smith,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Cannon\\, Brett,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Charles H\\\\Samlin,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Charles H\\Samlin,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Craig \\#1 Sellers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Beverly \\+1 Balanger,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Bethany \\<Stuart\\> Wallace,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Zachory S\\; Balanger,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Allen \\\"The Hammer\\\" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Allen \"The Hammer\" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Sam Not \\= Smitherson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=\\ Billy The Kiddough\\ ,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=System/Integration API,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
@@ -557,12 +556,12 @@ public class FakeLdapContext implements LdapContext {
attr = new BasicAttribute("member");
attr.add("cn=John Smith,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Dr. Greg Hause\\, MD,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Cindy\\\\Cynthia Groober,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Cindy\\Cynthia Groober,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Biff \\# Rogers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Steven \\+2 Reed,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Lisa \\<The Great\\> Toller,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Homer J Simpsonite\\; III,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Jessica \\\"Crouching Tiger\\\" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Jessica \"Crouching Tiger\" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Hope \\= Rein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=\\ Sue Ferguson\\ ,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Phil/Susan Carlson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
@@ -598,12 +597,12 @@ public class FakeLdapContext implements LdapContext {
attr = new BasicAttribute("member");
attr.add("cn=Sheri Smith,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Walsh\\, Brad,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Jim\\\\James Kirk,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Jim\\James Kirk,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Sandra \\# Phillips,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=William Tell Overture \\+1,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Craig \\<Bison\\> Allen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Walter T Fredrick\\; The Second,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Stanley \\\"Short\\\" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Stanley \"Short\" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Noah \\= Sadler,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=\\ Stuart Smiley\\ ,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=System/Integration API 2,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
@@ -984,7 +983,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with backslash (\\) in 'cn' in the RHQ Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Charles H\\\\Samlin,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Charles H\\Samlin,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Cindy\\Cynthia Groober,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -1033,7 +1032,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with backslash (\\) in 'cn' in the JBoss Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Cindy\\\\Cynthia Groober,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Cindy\\Cynthia Groober,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Jim\\James Kirk,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -1082,7 +1081,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with backslash (\\) in 'cn' in the JBoss Monitor Group");
attrs.put(attr);
- sr = new SearchResult("cn=Jim\\\\James Kirk,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Jim\\James Kirk,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Craig \#1 Sellers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -1675,7 +1674,7 @@ public class FakeLdapContext implements LdapContext {
null, attrs, true);
this.add(sr);
- // dn: cn=Allen \"The Hammer\" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
+ // dn: cn=Allen "The Hammer" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
attrs = new BasicAttributes();
attr = new BasicAttribute("baseName");
@@ -1721,11 +1720,11 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with quote (\") in 'cn' in the RHQ Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Allen \\\"The Hammer\\\" Callen,ou=users", "javax.naming.directory.DirContext",
+ sr = new SearchResult("cn=Allen \"The Hammer\" Callen,ou=users", "javax.naming.directory.DirContext",
null, attrs, true);
this.add(sr);
- // dn: cn=Jessica \"Crouching Tiger\" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
+ // dn: cn=Jessica "Crouching Tiger" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
attrs = new BasicAttributes();
attr = new BasicAttribute("baseName");
@@ -1771,11 +1770,11 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with quote (\") in 'cn' in the JBoss Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Jessica \\\"Crouching Tiger\\\" Mathers,ou=users",
+ sr = new SearchResult("cn=Jessica \"Crouching Tiger\" Mathers,ou=users",
"javax.naming.directory.DirContext", null, attrs, true);
this.add(sr);
- // dn: cn=Stanley \"Short\" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
+ // dn: cn=Stanley "Short" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
attrs = new BasicAttributes();
attr = new BasicAttribute("baseName");
@@ -1821,7 +1820,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with quote (\") in 'cn' in the JBoss Monitor Group");
attrs.put(attr);
- sr = new SearchResult("cn=Stanley \\\"Short\\\" Mein,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Stanley \"Short\" Mein,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Sam Not \= Smitherson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -2160,7 +2159,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with slash (/) in 'cn' in the RHQ Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=System/Integration API,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=System\\/Integration API,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Phil/Susan Carlson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -2209,7 +2208,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with slash (/) in 'cn' in the JBoss Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Phil/Susan Carlson,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Phil\\/Susan Carlson,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=System/Integration API 2,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -2254,7 +2253,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with slash (/) in 'cn' in the JBoss Monitor Group");
attrs.put(attr);
- sr = new SearchResult("cn=System/Integration API 2,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=System\\/Integration API 2,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Lee -Fast- Croutche,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java
index a28c709..7473321 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java
@@ -350,7 +350,6 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal {
if (si.isRelative()) {
userDN += "," + baseDNs[x];
}
- userDN = userDN + "," + baseDNs[x];
}
userDetails.put("dn", userDN);
commit 8c693ee685d538a28a3c42ce813b10b49997f871
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon Jul 22 16:36:23 2013 -0400
get rid of the upgrade wording, it's confusing when performing an initial install.
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
index 61c8a9c..fb9bceb 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
@@ -363,14 +363,14 @@ public abstract class AbstractInstall extends ControlCommand {
protected void startRHQServerForInstallation() throws IOException {
try {
- log.info("The RHQ Server must be started to complete its upgrade. Starting the RHQ server in preparation of running the server installer...");
+ log.info("The RHQ Server must be started to complete its installation. Starting the RHQ server in preparation of running the server installer...");
// when you unzip the distro, you are getting a fresh, unadulterated, out-of-box EAP installation, which by default listens
// to port 9999 for its native management subsystem. Make sure some other independent EAP server (or anything for that matter)
// isn't already listening to that port.
if (isPortInUse("127.0.0.1", 9999)) {
throw new IOException(
- "Something is already listening to port 9999 - shut it down before upgrading the server.");
+ "Something is already listening to port 9999 - shut it down before installing the server.");
}
Executor executor = new DefaultExecutor();
@@ -400,7 +400,7 @@ public abstract class AbstractInstall extends ControlCommand {
}
// Wait for the server to complete it's startup
- log.info("Waiting for the RHQ Server to start in preparation of running the server installer for upgrade...");
+ log.info("Waiting for the RHQ Server to start in preparation of running the server installer...");
commandLine = getCommandLine("rhq-installer", "--test");
Executor installerExecutor = new DefaultExecutor();
commit 2409ed2dcd705c58e5024182e95445431b25acf5
Author: John Sanda <jsanda(a)redhat.com>
Date: Mon Jul 22 15:16:49 2013 -0400
document the 4.8 storage patch script
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh
index 33984d1..ae78240 100755
--- a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh
+++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh
@@ -1,7 +1,43 @@
#!/bin/bash
+#
+# BACKGROUND:
+# This patch script needs to be run against RHQ 4.8.0 installations prior to
+# script. You do not need to run this script if upgrading from a version
+# earlier than 4.8.0.
+#
+# PREREQUISITES:
+# 1) Shut down the RHQ storage node and server.
+#
+# 2) Edit <rhq-install-dir>/rhq-storage/conf/cassandra.yaml and set the
+# following property,
+#
+# start_rpc: true
+#
+# 3) Note the value of rpc_port in cassandra.yaml. By default it is 9160 which
+# is fine.
+#
+# RUNNING THE PATCH:
+# 1) cd <patch-dir>
+#
+# 2) ./rhq48-storage-patch.sh <rhq-480-server-dir> <storage-ip-address> <thrift-port> <jmx-port>
+#
+# 3) Carefully reivew the script output for any errors.
+#
+# 4) Edit cassandra.yaml against and reset start_rpc: false
+#
+# ADDITIONAL NOTES:
+# The <jmx-port> defaults to 7299. If you are uncertain of what value to use,
+# you can find it in the UI. Log into RHQ and go to Administration --> Storage Nodes.
+#
+# If you are uncertain of the value to use for the storage node IP address, you
+# find the correct valu in the storage nodes admin UI as well.
+#
+# EXAMPLE:
+# ./rhq48-storage-patch.sh /opt/rhq-4.8.0 127.0.0.1 9160 7299
+# Usage: ./rhq48-storage-patch.sh <rhq-480-server-dir> <storage-ip-address> <thrift-port> <jmx-port>
function usage() {
- echo "Usage: $0 <rhq-server-dir> <storage-ip-address> <cql-port> <jmx-port>"
+ echo "Usage: $0 <rhq-480-server-dir> <storage-ip-address> <thrift-port> <jmx-port>"
}
if [ $# -ne 4 ]; then
@@ -16,11 +52,11 @@ fi
RHQ_SERVER_DIR=$1
CQL_HOSTNAME=$2
-CQL_PORT=$3
+THRIFT_PORT=$3
JMX_PORT=$4
export CQLSH_HOST=$2
-export CQL_PORT=$3
+export CQLSH_PORT=$3
PATCH="apache-cassandra-1.2.4-patch-1.jar"
commit c91c8f23416db836308b2bf3871fdda87559297e
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Mon Jul 22 14:12:08 2013 -0500
Update the storage node manager API for alerts to support UI functionality.
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml
index a9ce322..58c4eda 100644
--- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml
+++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml
@@ -11,8 +11,22 @@
<difference>
<className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className>
<differenceType>7012</differenceType> <!-- method added to an interface -->
+ <method>org.rhq.core.domain.util.PageList findNotAcknowledgedStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method>
+ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification>
+ </difference>
+
+ <difference>
+ <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className>
+ <differenceType>7012</differenceType> <!-- method added to an interface -->
<method>org.rhq.core.domain.util.PageList findAllStorageNodeAlerts(org.rhq.core.domain.auth.Subject)</method>
<justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification>
</difference>
+ <difference>
+ <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className>
+ <differenceType>7012</differenceType> <!-- method added to an interface -->
+ <method>org.rhq.core.domain.util.PageList findAllStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method>
+ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification>
+ </difference>
+
</differences>
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index c2a7b46..b32ab5b 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -512,12 +512,22 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override
public PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject) {
- return findStorageNodeAlerts(subject, false);
+ return findStorageNodeAlerts(subject, false, null);
+ }
+
+ @Override
+ public PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject, StorageNode storageNode) {
+ return findStorageNodeAlerts(subject, false, storageNode);
}
@Override
public PageList<Alert> findAllStorageNodeAlerts(Subject subject) {
- return findStorageNodeAlerts(subject, true);
+ return findStorageNodeAlerts(subject, true, null);
+ }
+
+ @Override
+ public PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode) {
+ return findStorageNodeAlerts(subject, true, storageNode);
}
/**
@@ -527,8 +537,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
* @param allAlerts if [true] then return all alerts; if [false] then return only alerts that are not acknowledged
* @return alerts
*/
- private PageList<Alert> findStorageNodeAlerts(Subject subject, boolean allAlerts) {
- Integer[] resouceIdsWithAlertDefinitions = findResourcesWithAlertDefinitions();
+ private PageList<Alert> findStorageNodeAlerts(Subject subject, boolean allAlerts, StorageNode storageNode) {
+ Integer[] resouceIdsWithAlertDefinitions = findResourcesWithAlertDefinitions(storageNode);
PageList<Alert> alerts = new PageList<Alert>();
if( resouceIdsWithAlertDefinitions != null && resouceIdsWithAlertDefinitions.length != 0 ){
@@ -555,31 +565,35 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return alerts;
}
- /**
- * Return resource Ids for all resources and sub-resources of Storage Nodes that
- * have alert definitions. This will be used by the resource criteria to find
- * all alerts triggered for storage nodes.
- *
- * @return
- */
- private Integer[] findResourcesWithAlertDefinitions() {
- List<Integer> resourceIdsWithAlertDefinitions = new ArrayList<Integer>();
- List<StorageNode> test2 = getStorageNodes();
+ @Override
+ public Integer[] findResourcesWithAlertDefinitions() {
+ return this.findResourcesWithAlertDefinitions(null);
+ }
+
+ @Override
+ public Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) {
+ List<StorageNode> initialStorageNodes;
+ if (storageNode == null) {
+ initialStorageNodes = getStorageNodes();
+ } else {
+ initialStorageNodes = Arrays.asList(storageNode);
+ }
Queue<Resource> unvisitedResources = new LinkedList<Resource>();
- for (StorageNode node : test2) {
- if (node.getResource() != null) {
- unvisitedResources.add(node.getResource());
+ for (StorageNode initialStorageNode : initialStorageNodes) {
+ if (initialStorageNode.getResource() != null) {
+ unvisitedResources.add(initialStorageNode.getResource());
}
}
- while(!unvisitedResources.isEmpty()){
+ List<Integer> resourceIdsWithAlertDefinitions = new ArrayList<Integer>();
+ while (!unvisitedResources.isEmpty()) {
Resource resource = unvisitedResources.poll();
if (resource.getAlertDefinitions() != null) {
resourceIdsWithAlertDefinitions.add(resource.getId());
}
- for(Resource child: resource.getChildResources()){
+ for (Resource child : resource.getChildResources()) {
unvisitedResources.add(child);
}
}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
index 6fca820..69b16c4 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
@@ -81,6 +81,15 @@ public interface StorageNodeManagerLocal {
PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject);
/**
+ * Fetches the list of Storage Node related alerts that have not yet been acknowledged for the
+ * specified storage node.
+ *
+ * @param subject subject
+ * @return storage nodes alerts not acknowledged
+ */
+ PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject, StorageNode storageNode);
+
+ /**
* Fetches all the Storage Node related alerts.
*
* @param subject subject
@@ -89,6 +98,39 @@ public interface StorageNodeManagerLocal {
PageList<Alert> findAllStorageNodeAlerts(Subject subject);
/**
+ * Fetches all the Storage Node related alerts for the specified storage node.
+ *
+ * @param subject subject
+ * @return all storage nodes alerts
+ */
+ PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode);
+
+
+ /**
+ * Find ids for all resources and sub-resources of Storage Nodes that
+ * have alert definitions. This can be used by the resource criteria queries to find
+ * all alerts triggered for storage nodes resources.
+ *
+ * @return resource ids
+ */
+ Integer[] findResourcesWithAlertDefinitions();
+
+ /**
+ * Find ids for all resources and sub-resources, of the specified storage node, that
+ * have alert definitions. This can be used by the resource criteria queries to find
+ * all alerts triggered for storage nodes resources.
+ *
+ * If storage node is null it find ids for all resources and sub-resources of Storage Nodes that
+ * have alert definitions. Please see {@link #findResourcesWithAlertDefinitions()} for more details.
+ *
+ * @param storageNode storage node
+ *
+ * @return resource ids
+ */
+ Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode);
+
+
+ /**
* <p>Prepares the node for subsequent upgrade.</p>
* <p> CAUTION: this method will set the RHQ server to maintenance mode, RHQ storage flushes all the data to disk
* and backup of all the keyspaces is created</p>
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java
index 7be1b07..75ac02b 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java
@@ -73,10 +73,27 @@ public interface StorageNodeManagerRemote {
PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject);
/**
+ * Fetches the list of Storage Node related alerts that have not yet been acknowledged for the
+ * specified storage node.
+ *
+ * @param subject subject
+ * @return storage nodes alerts not acknowledged
+ */
+ PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject, StorageNode storageNode);
+
+ /**
* Fetches all the Storage Node related alerts.
*
* @param subject subject
* @return all storage nodes alerts
*/
PageList<Alert> findAllStorageNodeAlerts(Subject subject);
+
+ /**
+ * Fetches all the Storage Node related alerts for the specified storage node.
+ *
+ * @param subject subject
+ * @return all storage nodes alerts
+ */
+ PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode);
}
commit c6c9e50398ffb5fc6d297ceffc369975a56b3ef9
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon Jul 22 14:09:52 2013 -0400
Add windows support storage install options and resource config update. Introduce
rhq-storage-wrapper.env to hold the configurable values (mimicing somewhat
cassandra-jvm.properties) and apply the values as token replacements in
rhq-storage-wrapper.conf.
Note that cassandra-jvm.properties is still kept up to date on windows and
can be used generically, as needed.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
index 6547043..60667cc 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
@@ -91,7 +91,14 @@ public class Deployer {
applyConfigChanges(confDir, "cassandra.yaml", tokens);
applyConfigChanges(confDir, "log4j-server.properties", tokens);
applyChangesToCassandraJvmProps(confDir, deploymentOptions);
-// applyConfigChanges(confDir, "cassandra-env.sh", tokens);
+
+ // For windows, update the service wrapper env. It may not ne necessary to have updated cassandra-jvm.properties
+ // as well as this file, but for now we'll update both, leaving the former as a dependably set file.
+ if (File.separatorChar == '\\') {
+ applyChangesToWindowsServiceWrapper(deployDir);
+ }
+
+ // applyConfigChanges(confDir, "cassandra-env.sh", tokens);
}
private void applyConfigChanges(File confDir, String fileName, Map<String, String> tokens)
@@ -109,8 +116,8 @@ public class Deployer {
rhqFile.delete();
} catch (IOException e) {
log.error("An unexpected error occurred while apply configuration changes to " + filteredFile, e);
- throw new DeploymentException("An unexpected error occurred while apply configuration changes to " +
- filteredFile, e);
+ throw new DeploymentException("An unexpected error occurred while apply configuration changes to "
+ + filteredFile, e);
}
}
@@ -132,8 +139,8 @@ public class Deployer {
String javaVersion = System.getProperty("java.version");
// The check here is taken right from cassandra-env.sh
- if ((!isOpenJDK() || javaVersion.compareTo("1.6.0") > 0) ||
- (javaVersion.equals("1.6.0") && getJavaPatchVersion() > 23)) {
+ if ((!isOpenJDK() || javaVersion.compareTo("1.6.0") > 0)
+ || (javaVersion.equals("1.6.0") && getJavaPatchVersion() > 23)) {
properties.put("java_agent", "-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar");
}
@@ -165,6 +172,29 @@ public class Deployer {
return Integer.parseInt(javaVersion.substring(startIndex + 1, javaVersion.length()));
}
+ public void applyChangesToWindowsServiceWrapper(File deployDir) throws DeploymentException {
+ File wrapperDir = new File(deployDir, "../bin/wrapper");
+ File wrapperEnvFile = new File(wrapperDir, "rhq-storage-wrapper.env");
+
+ try {
+ log.info("Applying configuration changes to " + wrapperEnvFile);
+
+ PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(wrapperEnvFile.getAbsolutePath());
+ Properties wrapperEnvProps = propertiesUpdater.loadExistingProperties();
+
+ wrapperEnvProps.setProperty("set.heap_min", "-Xms" + deploymentOptions.getHeapSize());
+ wrapperEnvProps.setProperty("set.heap_max", "-Xmx" + deploymentOptions.getHeapSize());
+ wrapperEnvProps.setProperty("set.heap_new", "-Xmn" + deploymentOptions.getHeapNewSize());
+ wrapperEnvProps.setProperty("set.thread_stack_size", "-Xss" + deploymentOptions.getStackSize());
+ wrapperEnvProps.setProperty("set.jmx_port", deploymentOptions.getJmxPort().toString());
+
+ propertiesUpdater.update(wrapperEnvProps);
+ } catch (IOException e) {
+ log.error("An error occurred while updating " + wrapperEnvFile, e);
+ throw new DeploymentException("An error occurred while updating " + wrapperEnvFile, e);
+ }
+ }
+
public void updateFilePerms() {
File deployDir = new File(deploymentOptions.getBasedir());
File binDir = new File(deployDir, "bin");
@@ -187,8 +217,8 @@ public class Deployer {
try {
authFile.delete();
- StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")),
- new FileWriter(authFile), true);
+ StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")), new FileWriter(authFile),
+ true);
} catch (IOException e) {
throw new RuntimeException("Failed to update " + authFile);
}
diff --git a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf
index 14f2ff1..de83364 100644
--- a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf
+++ b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf
@@ -66,23 +66,25 @@ wrapper.app.parameter.3="-Dcassandra-foreground=yes"
# Additional JVM parameters (quotes ARE needed)
wrapper.java.additional.1="-ea"
wrapper.java.additional.2="-javaagent:"%RHQ_STORAGE_HOME%\lib\jamm-0.2.5.jar""
-wrapper.java.additional.3="-Xms1G"
-wrapper.java.additional.4="-Xmx1G"
-wrapper.java.additional.5="-XX:+HeapDumpOnOutOfMemoryError"
-wrapper.java.additional.6="-XX:+UseParNewGC"
-wrapper.java.additional.7="-XX:+UseConcMarkSweepGC"
-wrapper.java.additional.8="-XX:+CMSParallelRemarkEnabled"
-wrapper.java.additional.9="-XX:SurvivorRatio=8"
-wrapper.java.additional.10="-XX:MaxTenuringThreshold=1"
-wrapper.java.additional.11="-XX:CMSInitiatingOccupancyFraction=75"
-wrapper.java.additional.12="-XX:+UseCMSInitiatingOccupancyOnly"
-wrapper.java.additional.13="-Dcom.sun.management.jmxremote.port=7299"
-wrapper.java.additional.14="-Dcom.sun.management.jmxremote.ssl=false"
-wrapper.java.additional.15="-Dcom.sun.management.jmxremote.authenticate=false"
-wrapper.java.additional.16="-Dlog4j.configuration=log4j-server.properties"
-wrapper.java.additional.17="-Dlog4j.defaultInitOverride=true"
-
-# We want to make sure the Storage Node starts in the casandra bin directory
+wrapper.java.additional.3="%heap_min%"
+wrapper.java.additional.4="%heap_max%"
+wrapper.java.additional.5="%heap_new%"
+wrapper.java.additional.6="%heap_dump_on_OOMError%"
+wrapper.java.additional.7="%heap_dump_dir%"
+wrapper.java.additional.8="-XX:+UseConcMarkSweepGC"
+wrapper.java.additional.9="-XX:+CMSParallelRemarkEnabled"
+wrapper.java.additional.10="-XX:SurvivorRatio=8"
+wrapper.java.additional.11="-XX:MaxTenuringThreshold=1"
+wrapper.java.additional.12="-XX:CMSInitiatingOccupancyFraction=75"
+wrapper.java.additional.13="-XX:+UseCMSInitiatingOccupancyOnly"
+wrapper.java.additional.14="-XX:+UseParNewGC"
+wrapper.java.additional.15="-Dcom.sun.management.jmxremote.port=%jmx_port%"
+wrapper.java.additional.16="-Dcom.sun.management.jmxremote.ssl=false"
+wrapper.java.additional.17="-Dcom.sun.management.jmxremote.authenticate=false"
+wrapper.java.additional.18="-Dlog4j.configuration=log4j-server.properties"
+wrapper.java.additional.19="-Dlog4j.defaultInitOverride=true"
+
+# We want to make sure the Storage Node starts in the cassandra bin directory
wrapper.working.dir=%RHQ_STORAGE_HOME%/bin
#*****************************************************************************
diff --git a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env
new file mode 100644
index 0000000..1441e0a
--- /dev/null
+++ b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env
@@ -0,0 +1,24 @@
+#*****************************************************************************
+# RHQ Storage Node Java Service Wrapper Environment Settings File
+#
+# This file specifies a set of environment variables that will be
+# applied to the Storage Node JVM.
+#
+# THIS FILE SHOULD NOT BE EDITED!
+#
+# This file represents the values managed as RHQ Storage Node resource
+# configuration values. Or, set by the installer.
+#
+#*****************************************************************************
+
+set.jmx_port=7299
+
+set.heap_min=-Xms512M
+set.heap_max=-Xms512M
+set.heap_new=-Xmn128M
+
+set.thread_stack_size=-Xss180k
+
+set.heap_dump_on_OOMError=-XX:+HeapDumpOnOutOfMemoryError
+
+set.heap_dump_dir=
diff --git a/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc b/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc
index ee0d448..6941358 100644
--- a/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc
+++ b/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc
@@ -4,11 +4,11 @@
#
# override and lower the initial memory profile
-wrapper.java.additional.18=-Xms128M
-wrapper.java.additional.19=-Xmx256M
+wrapper.java.additional.20=-Xms128M
+wrapper.java.additional.21=-Xmx256M
# enable remote debugging
-#wrapper.java.additional.20=-agentlib:jdwp=transport=dt_socket,address=8788,server=y,suspend=n
+#wrapper.java.additional.22=-agentlib:jdwp=transport=dt_socket,address=8788,server=y,suspend=n
# disable JVM startup timeout
wrapper.startup.timeout=0
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java
index 8d1771d..1667877 100644
--- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java
+++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java
@@ -19,10 +19,17 @@ import org.rhq.core.util.StringUtil;
public class StorageNodeConfigDelegate implements ConfigurationFacet {
private File jvmOptsFile;
+ private File wrapperEnvFile;
public StorageNodeConfigDelegate(File basedir) {
File confDir = new File(basedir, "conf");
jvmOptsFile = new File(confDir, "cassandra-jvm.properties");
+
+ // for windows, config props also get propagated to the wrapper env
+ if (isWindows()) {
+ File wrapperDir = new File(basedir, "../bin/wrapper");
+ wrapperEnvFile = new File(wrapperDir, "rhq-storage-wrapper.env");
+ }
}
@Override
@@ -56,6 +63,16 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
return config;
}
+ /**
+ * Ensure that the path uses only forward slash.
+ * @param path
+ * @return forward-slashed path, or null if path is null
+ */
+ private static String useForwardSlash(String path) {
+
+ return (null != path) ? path.replace('\\', '/') : null;
+ }
+
private String getHeapMinProp(Properties properties) {
String value = properties.getProperty("heap_min");
@@ -115,47 +132,14 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
@Override
public void updateResourceConfiguration(ConfigurationUpdateReport configurationUpdateReport) {
try {
- PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmOptsFile.getAbsolutePath());
- Properties properties = propertiesUpdater.loadExistingProperties();
-
Configuration config = configurationUpdateReport.getConfiguration();
- String maxHeapSize = config.getSimpleValue("maxHeapSize");
- if (!StringUtil.isEmpty(maxHeapSize)) {
- validateHeapArg("maxHeapSize", maxHeapSize);
- // We want min and max heap to be the same
- properties.setProperty("heap_min", "-Xms" + maxHeapSize);
- properties.setProperty("heap_max", "-Xmx" + maxHeapSize);
- }
-
- String heapNewSize = config.getSimpleValue("heapNewSize");
- if (!StringUtil.isEmpty(heapNewSize)) {
- validateHeapArg("heapNewSize", heapNewSize);
- properties.setProperty("heap_new", "-Xmn" + heapNewSize);
- }
+ updateCassandraJvmProps(config);
- String threadStackSize = config.getSimpleValue("threadStackSize");
- if (!StringUtil.isEmpty(threadStackSize)) {
- validateStackArg(threadStackSize);
- properties.setProperty("thread_stack_size", "-Xss" + threadStackSize + "k");
+ if (isWindows()) {
+ updateWrapperEnv(config);
}
- PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError");
- if (heapDumpOnOMMError != null) {
- if (heapDumpOnOMMError.getBooleanValue()) {
- properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError");
- } else {
- properties.setProperty("heap_dump_on_OOMError", "");
- }
- }
-
- String heapDumpDir = config.getSimpleValue("heapDumpDir");
- if (!StringUtil.isEmpty(heapDumpDir)) {
- properties.setProperty("heap_dump_dir", heapDumpDir);
- }
-
- propertiesUpdater.update(properties);
-
configurationUpdateReport.setStatus(ConfigurationUpdateStatus.SUCCESS);
} catch (IllegalArgumentException e) {
configurationUpdateReport.setErrorMessage("No configuration update was applied: " + e.getMessage());
@@ -164,6 +148,88 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
}
}
+ private void updateCassandraJvmProps(Configuration config) throws IOException {
+ PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmOptsFile.getAbsolutePath());
+ Properties properties = propertiesUpdater.loadExistingProperties();
+
+ String maxHeapSize = config.getSimpleValue("maxHeapSize");
+ if (!StringUtil.isEmpty(maxHeapSize)) {
+ validateHeapArg("maxHeapSize", maxHeapSize);
+ // We want min and max heap to be the same
+ properties.setProperty("heap_min", "-Xms" + maxHeapSize);
+ properties.setProperty("heap_max", "-Xmx" + maxHeapSize);
+ }
+
+ String heapNewSize = config.getSimpleValue("heapNewSize");
+ if (!StringUtil.isEmpty(heapNewSize)) {
+ validateHeapArg("heapNewSize", heapNewSize);
+ properties.setProperty("heap_new", "-Xmn" + heapNewSize);
+ }
+
+ String threadStackSize = config.getSimpleValue("threadStackSize");
+ if (!StringUtil.isEmpty(threadStackSize)) {
+ validateStackArg(threadStackSize);
+ properties.setProperty("thread_stack_size", "-Xss" + threadStackSize + "k");
+ }
+
+ PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError");
+ if (heapDumpOnOMMError != null) {
+ if (heapDumpOnOMMError.getBooleanValue()) {
+ properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError");
+ } else {
+ properties.setProperty("heap_dump_on_OOMError", "");
+ }
+ }
+
+ String heapDumpDir = useForwardSlash(config.getSimpleValue("heapDumpDir"));
+ if (!StringUtil.isEmpty(heapDumpDir)) {
+ properties.setProperty("heap_dump_dir", heapDumpDir);
+ }
+
+ propertiesUpdater.update(properties);
+ }
+
+ private void updateWrapperEnv(Configuration config) throws IOException {
+ PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(wrapperEnvFile.getAbsolutePath());
+ Properties properties = propertiesUpdater.loadExistingProperties();
+
+ String maxHeapSize = config.getSimpleValue("maxHeapSize");
+ if (!StringUtil.isEmpty(maxHeapSize)) {
+ validateHeapArg("maxHeapSize", maxHeapSize);
+ // We want min and max heap to be the same
+ properties.setProperty("set.heap_min", "-Xms" + maxHeapSize);
+ properties.setProperty("set.heap_max", "-Xmx" + maxHeapSize);
+ }
+
+ String heapNewSize = config.getSimpleValue("heapNewSize");
+ if (!StringUtil.isEmpty(heapNewSize)) {
+ validateHeapArg("heapNewSize", heapNewSize);
+ properties.setProperty("set.heap_new", "-Xmn" + heapNewSize);
+ }
+
+ String threadStackSize = config.getSimpleValue("threadStackSize");
+ if (!StringUtil.isEmpty(threadStackSize)) {
+ validateStackArg(threadStackSize);
+ properties.setProperty("set.thread_stack_size", "-Xss" + threadStackSize + "k");
+ }
+
+ PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError");
+ if (heapDumpOnOMMError != null) {
+ if (heapDumpOnOMMError.getBooleanValue()) {
+ properties.setProperty("set.heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError");
+ } else {
+ properties.setProperty("set.heap_dump_on_OOMError", "");
+ }
+ }
+
+ String heapDumpDir = useForwardSlash(config.getSimpleValue("heapDumpDir"));
+ if (!StringUtil.isEmpty(heapDumpDir)) {
+ properties.setProperty("set.heap_dump_dir", heapDumpDir);
+ }
+
+ propertiesUpdater.update(properties);
+ }
+
private void validateHeapArg(String name, String value) {
if (value.length() < 2) {
throw new IllegalArgumentException(value + " is not a legal value for the property [" + name + "]");
@@ -189,4 +255,8 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
throw new IllegalArgumentException(value + " is not a legal value for the property [threadStackSize]");
}
}
+
+ private boolean isWindows() {
+ return File.separatorChar == '\\';
+ }
}
commit 969ea38e7254d61903c699380bd066d6cad3e85e
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon Jul 22 14:06:39 2013 -0400
When recreating win services make sure they get started appropriately.
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
index 8c885b0..61c8a9c 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
@@ -36,12 +36,16 @@ import java.util.Properties;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.exec.DefaultExecuteResultHandler;
import org.apache.commons.exec.DefaultExecutor;
+import org.apache.commons.exec.ExecuteException;
import org.apache.commons.exec.Executor;
import org.apache.commons.exec.PumpStreamHandler;
+
import org.jboss.as.controller.client.ModelControllerClient;
+
import org.rhq.common.jbossas.client.controller.DeploymentJBossASClient;
import org.rhq.common.jbossas.client.controller.MCCHelper;
import org.rhq.server.control.ControlCommand;
+import org.rhq.server.control.RHQControlException;
/**
* Common code for commands that perform installs. Basically shared code for Install and Upgrade commands.
@@ -55,7 +59,7 @@ public abstract class AbstractInstall extends ControlCommand {
protected final String STORAGE_CONFIG_PROP = "rhqctl.install.storage-config";
- protected void installWindowsService(File workingDir, String batFile) throws Exception {
+ protected void installWindowsService(File workingDir, String batFile, boolean start) throws Exception {
Executor executor = new DefaultExecutor();
executor.setWorkingDirectory(workingDir);
executor.setStreamHandler(new PumpStreamHandler());
@@ -69,6 +73,11 @@ public abstract class AbstractInstall extends ControlCommand {
commandLine = getCommandLine(batFile, "install");
executor.execute(commandLine);
+
+ if (start) {
+ commandLine = getCommandLine(batFile, "start");
+ executor.execute(commandLine);
+ }
}
protected void validateCustomStorageDataDirectories(CommandLine commandLine, List<String> errors) {
@@ -100,6 +109,54 @@ public abstract class AbstractInstall extends ControlCommand {
}
}
+ protected void waitForProcessToStop(String pid) throws Exception {
+
+ if (isWindows() || pid == null) {
+ // For the moment we have no better way to just wait some time
+ Thread.sleep(10 * 1000L);
+ } else {
+ int tries = 5;
+ while (tries > 0) {
+ log.debug(".");
+ if (!isUnixPidRunning(pid)) {
+ break;
+ }
+ Thread.sleep(2 * 1000L);
+ tries--;
+ }
+ if (tries == 0) {
+ throw new RHQControlException("Process [" + pid
+ + "] did not finish yet. Terminate it manually and retry.");
+ }
+ }
+
+ }
+
+ protected boolean isUnixPidRunning(String pid) {
+
+ Executor executor = new DefaultExecutor();
+ executor.setWorkingDirectory(getBinDir());
+ executor.setStreamHandler(new PumpStreamHandler());
+ org.apache.commons.exec.CommandLine commandLine;
+
+ commandLine = new org.apache.commons.exec.CommandLine("/bin/kill").addArgument("-0").addArgument(pid);
+
+ try {
+ int code = executor.execute(commandLine);
+ if (code != 0) {
+ return false;
+ }
+ } catch (ExecuteException ee) {
+ if (ee.getExitValue() == 1) {
+ // return code 1 means process does not exist
+ return false;
+ }
+ } catch (IOException e) {
+ log.error("Checking for running process failed: " + e.getMessage());
+ }
+ return true;
+ }
+
protected void waitForRHQServerToInitialize() throws Exception {
try {
final long messageInterval = 30000L;
@@ -287,7 +344,6 @@ public abstract class AbstractInstall extends ControlCommand {
log.debug("Stopping RHQ server...");
-
Executor executor = new DefaultExecutor();
executor.setWorkingDirectory(serverBinDir);
executor.setStreamHandler(new PumpStreamHandler());
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java
index bb6aa40..0808db2 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java
@@ -142,7 +142,7 @@ public class Install extends AbstractInstall {
if (!isStorageInstalled()) {
installStorageNode(getStorageBasedir(), commandLine);
} else if (isWindows()) {
- installWindowsService(getBinDir(), "rhq-storage");
+ installWindowsService(getBinDir(), "rhq-storage", true);
}
if (!isServerInstalled()) {
@@ -150,7 +150,7 @@ public class Install extends AbstractInstall {
runRHQServerInstaller();
waitForRHQServerToInitialize();
} else if (isWindows()) {
- installWindowsService(getBinDir(), "rhq-server");
+ installWindowsService(getBinDir(), "rhq-server", true);
}
if (!isAgentInstalled()) {
@@ -158,13 +158,15 @@ public class Install extends AbstractInstall {
File agentBasedir = getAgentBasedir();
installAgent(agentBasedir);
configureAgent(agentBasedir, commandLine);
- if (Boolean.parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true"))) {
+ boolean start = Boolean.parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true"));
+ if (start) {
startAgent(agentBasedir, true);
} else {
log.info("The agent was installed but was told not to start automatically.");
}
} else if (isWindows()) {
- installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper");
+ boolean start = Boolean.parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true"));
+ installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper", start);
}
} else {
@@ -173,7 +175,7 @@ public class Install extends AbstractInstall {
log.info("The RHQ storage node is already installed in " + new File(getBaseDir(), "storage"));
if (isWindows()) {
- installWindowsService(getBinDir(), "rhq-storage");
+ installWindowsService(getBinDir(), "rhq-storage", true);
} else {
log.info("Skipping storage node installation.");
}
@@ -200,7 +202,7 @@ public class Install extends AbstractInstall {
log.warn("The RHQ server is already installed.");
if (isWindows()) {
- installWindowsService(getBinDir(), "rhq-server");
+ installWindowsService(getBinDir(), "rhq-server", true);
} else {
log.info("Skipping server installation.");
}
@@ -217,8 +219,10 @@ public class Install extends AbstractInstall {
if (isAgentInstalled() && !commandLine.hasOption(STORAGE_OPTION)) {
log.info("The RHQ agent is already installed in [" + getAgentBasedir() + "]");
+ boolean start = Boolean
+ .parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true"));
if (isWindows()) {
- installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper");
+ installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper", start);
} else {
log.info("Skipping agent installation.");
}
commit e1461de493712c30e454fa51f946a72cadf2c257
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Mon Jul 22 07:47:00 2013 -0500
Fix merge rebase issue.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
index 4744e96..3b0aa5b 100644
--- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
+++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
@@ -41,8 +41,8 @@ import org.mc4j.ems.connection.bean.attribute.EmsAttribute;
import org.mc4j.ems.connection.bean.operation.EmsOperation;
import org.rhq.core.domain.configuration.Configuration;
-import org.rhq.core.domain.configuration.Property;
import org.rhq.core.domain.configuration.ConfigurationUpdateStatus;
+import org.rhq.core.domain.configuration.Property;
import org.rhq.core.domain.configuration.PropertyList;
import org.rhq.core.domain.configuration.PropertyMap;
import org.rhq.core.domain.configuration.PropertySimple;
@@ -116,7 +116,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
if (!configurationUpdate.getStatus().equals(ConfigurationUpdateStatus.SUCCESS)) {
result.setErrorMessage(configurationUpdate.getErrorMessage());
-
+ }
+
return result;
}
commit 642421966cbea990df8cf0593e1fb82a4c1a98a8
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Mon Jul 22 07:40:53 2013 -0500
Attempt to improve the operation trigger for updates on the storage node configuration.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index c77e229..c2a7b46 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -64,6 +64,7 @@ import org.rhq.core.domain.measurement.MeasurementUnits;
import org.rhq.core.domain.operation.OperationRequestStatus;
import org.rhq.core.domain.operation.ResourceOperationHistory;
import org.rhq.core.domain.operation.bean.GroupOperationSchedule;
+import org.rhq.core.domain.operation.bean.ResourceOperationSchedule;
import org.rhq.core.domain.resource.Resource;
import org.rhq.core.domain.resource.ResourceType;
import org.rhq.core.domain.resource.group.ResourceGroup;
@@ -101,8 +102,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort";
private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
- private static final int OPERATION_QUERY_TIMEOUT = 1000;
- private static final int MAX_ITERATIONS = 5;
+ private static final int OPERATION_QUERY_TIMEOUT = 20000;
+ private static final int MAX_ITERATIONS = 6;
private static final String UPDATE_CONFIGURATION_OPERATION = "updateConfiguration";
private static final String RESTART_OPERATION = "restart";
@@ -636,8 +637,16 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
//scheduling the operation
long operationStartTime = System.currentTimeMillis();
- operationManager.scheduleResourceOperation(subject, storageNodeResource.getId(), operationToRun, 0, 0, 0, 0,
- parameters, "Run by StorageNodeManagerBean");
+
+ ResourceOperationSchedule newSchedule = new ResourceOperationSchedule();
+ newSchedule.setJobTrigger(JobTrigger.createNowTrigger());
+ newSchedule.setResource(storageNodeResource);
+ newSchedule.setOperationName(operationToRun);
+ newSchedule.setDescription("Run by StorageNodeManagerBean");
+ newSchedule.setParameters(parameters);
+
+ operationManager.scheduleResourceOperation(subject, newSchedule);
+ entityManager.flush();
//waiting for the operation result then return it
int iteration = 0;
commit a7e279884e1a7ee6c0782c1d5e79a27dcff26b76
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Mon Jul 22 07:39:55 2013 -0500
Implement the storage node operation for updating jvm options.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
index 006dd26..4744e96 100644
--- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
+++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
@@ -42,6 +42,7 @@ import org.mc4j.ems.connection.bean.operation.EmsOperation;
import org.rhq.core.domain.configuration.Configuration;
import org.rhq.core.domain.configuration.Property;
+import org.rhq.core.domain.configuration.ConfigurationUpdateStatus;
import org.rhq.core.domain.configuration.PropertyList;
import org.rhq.core.domain.configuration.PropertyMap;
import org.rhq.core.domain.configuration.PropertySimple;
@@ -102,7 +103,20 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
private OperationResult updateConfiguration(Configuration params) {
OperationResult result = new OperationResult("Configuration updated.");
- //TODO: implement updates to various sub-resources here
+
+ //update storage node jvm settings
+ Configuration config = new Configuration();
+ config.put(new PropertySimple("minHeapSize", params.getSimpleValue("heapSize")));
+ config.put(new PropertySimple("maxHeapSize", params.getSimpleValue("heapSize")));
+ config.put(new PropertySimple("heapNewSize", params.getSimpleValue("heapNewSize")));
+ config.put(new PropertySimple("threadStackSize", params.getSimpleValue("threadStackSize")));
+
+ ConfigurationUpdateReport configurationUpdate = new ConfigurationUpdateReport(config);
+ this.updateResourceConfiguration(configurationUpdate);
+
+ if (!configurationUpdate.getStatus().equals(ConfigurationUpdateStatus.SUCCESS)) {
+ result.setErrorMessage(configurationUpdate.getErrorMessage());
+
return result;
}
commit 26a7f7f476e03367d52e9e229f2e583c789e7bec
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Mon Jul 22 07:38:15 2013 -0500
Add two more properties to the list of configurable properties for the storaga node. The new properties are heap new size and thread stack size.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java
index e2c64f9..32d8ab3 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java
@@ -33,6 +33,8 @@ public class StorageNodeConfigurationComposite implements Serializable {
private StorageNode storageNode;
private int jmxPort;
private String heapSize;
+ private String threadStackSize;
+ private String heapNewSize;
public StorageNodeConfigurationComposite() {
// GWT needs this
@@ -85,14 +87,44 @@ public class StorageNodeConfigurationComposite implements Serializable {
this.heapSize = heapSize;
}
+ /**
+ * @return the threadStackSize
+ */
+ public String getThreadStackSize() {
+ return threadStackSize;
+ }
+
+ /**
+ * @param threadStackSize the threadStackSize to set
+ */
+ public void setThreadStackSize(String threadStackSize) {
+ this.threadStackSize = threadStackSize;
+ }
+
+ /**
+ * @return the heapNewSize
+ */
+ public String getHeapNewSize() {
+ return heapNewSize;
+ }
+
+ /**
+ * @param heapNewSize the heapNewSize to set
+ */
+ public void setHeapNewSize(String heapNewSize) {
+ this.heapNewSize = heapNewSize;
+ }
+
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("storageNode.addresss=").append(storageNode.getAddress()).append(", ");
+ builder.append("jmxPort=").append(jmxPort).append(",");
builder.append("heapSize=").append(heapSize).append(", ");
- builder.append("jmxPort=").append(jmxPort).append("");
+ builder.append("heapNewSize=").append(heapSize).append(", ");
+ builder.append("threadStackSize=").append(threadStackSize).append("");
return builder.toString();
}
}
commit a0285ac83b5b673dab0582f9ff52baaa746fd8cd
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Mon Jul 22 07:37:10 2013 -0500
Update the set of calculated metrics for Cassandra to incorporate latest feedback. There are three metrics now: total system used space percentage, data file disk space used percentage, and free to data disk space ratio.
Also, update the alert definition for disk space to monitor all three metrics.
diff --git a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java
index 8995ec0..37d10a8 100644
--- a/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java
+++ b/modules/enterprise/server/plugins/alertdef-rhq/src/main/java/org/rhq/enterprise/server/plugins/alertdef/AlertDefinitionServerPluginComponent.java
@@ -61,7 +61,9 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
private final Log log = LogFactory.getLog(AlertDefinitionServerPluginComponent.class);
- private static final String PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.PartitionDiskUsedPercentage";
+ private static final String DATA_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.DataDiskUsedPercentage";
+ private static final String TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.TotalDiskUsedPercentage";
+ private static final String FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME = "Calculated.FreeDiskToDataSizeRatio";
static private final List<InjectedTemplate> injectedTemplates;
static private final InjectedTemplate storageNodeHighHeapTemplate;
@@ -297,23 +299,42 @@ public class AlertDefinitionServerPluginComponent implements ServerPluginCompone
newTemplate.setRecoveryId(0);
newTemplate.setEnabled(true);
- AlertCondition ac = new AlertCondition();
- ac.setCategory(AlertConditionCategory.THRESHOLD);
- ac.setComparator(">");
- ac.setThreshold(0.5D);
+
+ AlertCondition dataDiskUsedAlertCondition = new AlertCondition();
+ dataDiskUsedAlertCondition.setCategory(AlertConditionCategory.THRESHOLD);
+ dataDiskUsedAlertCondition.setComparator(">");
+ dataDiskUsedAlertCondition.setThreshold(0.5D);
+
+ AlertCondition totalDiskUsedAlertCondition = new AlertCondition();
+ totalDiskUsedAlertCondition.setCategory(AlertConditionCategory.THRESHOLD);
+ totalDiskUsedAlertCondition.setComparator(">");
+ totalDiskUsedAlertCondition.setThreshold(0.75D);
+
+ AlertCondition freeSpaveDataRatioAlertCondition = new AlertCondition();
+ freeSpaveDataRatioAlertCondition.setCategory(AlertConditionCategory.THRESHOLD);
+ freeSpaveDataRatioAlertCondition.setComparator("<");
+ freeSpaveDataRatioAlertCondition.setThreshold(1.5D);
List<Integer> measurementDefinitionIds = new ArrayList<Integer>(1);
for (MeasurementDefinition d : resourceType.getMetricDefinitions()) {
- if (PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME.equals(d.getName())) {
+ if (DATA_DISK_USED_PERCENTAGE_METRIC_NAME.equals(d.getName())) {
measurementDefinitionIds.add(d.getId());
- ac.setMeasurementDefinition(d);
- ac.setName(d.getDisplayName());
+ dataDiskUsedAlertCondition.setMeasurementDefinition(d);
+ dataDiskUsedAlertCondition.setName(d.getDisplayName());
+ } else if (TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME.equals(d.getName())) {
+ measurementDefinitionIds.add(d.getId());
+ totalDiskUsedAlertCondition.setMeasurementDefinition(d);
+ totalDiskUsedAlertCondition.setName(d.getDisplayName());
+ } else if (FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME.equals(d.getName())) {
+ measurementDefinitionIds.add(d.getId());
+ freeSpaveDataRatioAlertCondition.setMeasurementDefinition(d);
+ freeSpaveDataRatioAlertCondition.setName(d.getDisplayName());
}
}
- assert null != ac.getMeasurementDefinition() : "Did not find expected measurement definition "
- + PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME + " for "
- + resourceType;
- newTemplate.addCondition(ac);
+
+ newTemplate.addCondition(dataDiskUsedAlertCondition);
+ newTemplate.addCondition(totalDiskUsedAlertCondition);
+ newTemplate.addCondition(freeSpaveDataRatioAlertCondition);
AlertDampening dampener = new AlertDampening(AlertDampening.Category.PARTIAL_COUNT);
dampener.setPeriod(15);
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java
index e5a2283..76ce2b2 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/StorageServiceComponent.java
@@ -58,7 +58,9 @@ import org.rhq.plugins.jmx.JMXComponent;
public class StorageServiceComponent extends ComplexConfigurationResourceComponent {
private static final String OWNERSHIP_METRIC_NAME = "Ownership";
- private static final String PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.PartitionDiskUsedPercentage";
+ private static final String DATA_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.DataDiskUsedPercentage";
+ private static final String TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME = "Calculated.TotalDiskUsedPercentage";
+ private static final String FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME = "Calculated.FreeDiskToDataSizeRatio";
private static final String DATA_FILE_LOCATIONS_NAME = "AllDataFileLocations";
private static final String LOAD_NAME = "Load";
@@ -156,6 +158,22 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone
@Override
protected void getValues(MeasurementReport report, Set<MeasurementScheduleRequest> requests, EmsBean bean) {
super.getValues(report, requests, bean);
+
+ EmsAttribute loadAttribute = bean.getAttribute(LOAD_NAME);
+ Object loadValue = loadAttribute.refresh();
+
+ EmsAttribute dataFileLocationAttribute = bean.getAttribute(DATA_FILE_LOCATIONS_NAME);
+ Object dataFileLocationValue = dataFileLocationAttribute.refresh();
+
+ double load = 0;
+ if (loadValue != null && dataFileLocationValue != null && dataFileLocationValue instanceof String[]) {
+ //Please visit for details: https://issues.apache.org/jira/browse/CASSANDRA-2749
+ //The average usage of all partitions with the data will be reported.
+ //Cassandra selects the partition with most free space for SStable flush and compaction.
+ load = Double.parseDouble(loadValue.toString());
+ load = load / 1024d; //transform in MB
+ }
+
for (MeasurementScheduleRequest request : requests) {
if (OWNERSHIP_METRIC_NAME.equals(request.getName()) && host != null) {
EmsAttribute attribute = bean.getAttribute(OWNERSHIP_METRIC_NAME);
@@ -179,49 +197,53 @@ public class StorageServiceComponent extends ComplexConfigurationResourceCompone
report.addData(new MeasurementDataNumeric(request, value.doubleValue()));
}
break;
- } else if (PARTITION_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())) {
-
-
- EmsAttribute loadAttribute = bean.getAttribute(LOAD_NAME);
- Object loadValue = loadAttribute.refresh();
-
- EmsAttribute dataFileLocationAttribute = bean.getAttribute(DATA_FILE_LOCATIONS_NAME);
- Object dataFileLocationValue = dataFileLocationAttribute.refresh();
-
- if (loadValue != null && dataFileLocationValue != null && dataFileLocationValue instanceof String[]) {
- //Please visit for details: https://issues.apache.org/jira/browse/CASSANDRA-2749
- //The average usage of all partitions with the data will be reported.
- //Cassandra selects the partition with most free space for SStable flush and compaction.
- double load = Double.parseDouble(loadValue.toString());
-
- report.addData(new MeasurementDataNumeric(request, getPartitionDiskUsedPercentage(load,
- (String[]) dataFileLocationValue)));
- }
+ } else if (DATA_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())
+ || TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())
+ || FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME.equals(request.getName())) {
+ double metricValue = getDiskUsageMetric(request, load, (String[]) dataFileLocationValue);
+ report.addData(new MeasurementDataNumeric(request, metricValue));
}
}
}
- private double getPartitionDiskUsedPercentage(double dataSize, String[] paths) {
+ private double getDiskUsageMetric(MeasurementScheduleRequest request, double dataSize, String[] paths) {
List<String> visitedMountPoints = new ArrayList<String>();
long totalDiskSpace = 0;
+ long totalFreeDiskSpace = 0;
+ long totalUsedDiskSpace = 0;
for (String path : paths) {
try {
FileSystemInfo fileSystemInfo = this.getResourceContext().getSystemInformation().getFileSystem(path);
if (!visitedMountPoints.contains(fileSystemInfo.getMountPoint())) {
visitedMountPoints.add(fileSystemInfo.getMountPoint());
+
+ //contrary to Sigar documentation this values are reported in MB and not bytes
totalDiskSpace += fileSystemInfo.getFileSystemUsage().getTotal();
+ totalFreeDiskSpace += fileSystemInfo.getFileSystemUsage().getFree();
+ totalUsedDiskSpace += fileSystemInfo.getFileSystemUsage().getUsed();
}
} catch (Exception e) {
log.error("Unable to determine file system usage information for data file location " + path, e);
}
}
+ double metricValue = 0;
+
+
if (totalDiskSpace != 0) {
- double rawPercentage = dataSize / ((double) totalDiskSpace);
- return Math.round(rawPercentage * 100.0) / 100.0;
+ double rawPercentage = 0;
+ if (DATA_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())) {
+ rawPercentage = dataSize / ((double) totalDiskSpace);
+ } else if (TOTAL_DISK_USED_PERCENTAGE_METRIC_NAME.equals(request.getName())) {
+ rawPercentage = ((double) totalUsedDiskSpace) / ((double) totalDiskSpace);
+ } else if (FREE_DISK_TO_DATA_SIZE_RATIO_METRIC_NAME.equals(request.getName())) {
+ rawPercentage = ((double) totalFreeDiskSpace) / (double) dataSize;
+ }
+
+ metricValue = Math.round(rawPercentage * 100d) / 100d;
}
- return 0;
+ return metricValue;
}
}
diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
index a1b3412..5bbebed 100644
--- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
@@ -186,9 +186,11 @@
</parameters>
</operation>
+ <metric property="Calculated.DataDiskUsedPercentage" displayName="Data File Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of disk space used by Cassandra data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
+ <metric property="Calculated.TotalDiskUsedPercentage" displayName="Total Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of total disk space used. The metric acounts overall disk usage (including system files), not just disk space used by Cassandra. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
+ <metric property="Calculated.FreeDiskToDataSizeRatio" displayName="Free Disk to Data Size Ratio" dataType="measurement" displayType="summary" description="Ratio of (Amount of Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
+
<metric property="CurrentGenerationNumber" dataType="trait" displayType="summary" description="Current generation number"/>
- <metric property="Calculated.PartitionDiskUsedPercentage" displayName="Data File Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of disk space used by the data files. If multiple data locations are specified then this will report
- the average utilization accross all the partitions that contain data files."/>
<metric property="ExceptionCount" measurementType="trendsup" dataType="measurement" displayType="summary" description="Exception Count"/>
<metric property="Initialized" dataType="trait" displayType="summary" description="Initialized"/>
<metric property="Joined" dataType="trait" displayType="summary" description="Joined"/>
commit 82d0075ed860dcc2f2aed57191e4fa64806caa97
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Mon Jul 22 14:39:39 2013 +0200
Entities < and > in the plugin descriptor were interpreted as HTML in the ConfigurationEditor and the content between them wasn't there.
diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
index e6bc1da..a1b3412 100644
--- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
@@ -159,7 +159,7 @@
<operation name="takeSnapshot"
description="Takes a snapshot of all keyspaces. A snapshot first flushes all in-memory writes to disk and then creates a hard
link of each SSTable file for each keyspace. Note that a column family can have multiple
- SSTables on disk. By default snapshots are stored in the <cassandra_data_dir>/<keyspace_name>/<column_family_name>/snapshots
+ SSTables on disk. By default snapshots are stored in the [cassandra_data_dir]/[keyspace_name]/[column_family_name]/snapshots
directory. On Linux/UNIX systems cassandra_data_dir defaults to /var/lib/cassandra/data">
<parameters>
<c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
commit 1a520b6c33dd2bde09d58ea7e51d04c83622f481
Author: John Sanda <jsanda(a)redhat.com>
Date: Mon Jul 22 07:17:20 2013 -0400
comment out exporting test ear
Might be nice to enable exporting test ear with a system property where the
location is also configurable.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java
index 2fd8624..8010c2b 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java
@@ -413,7 +413,8 @@ public abstract class AbstractEJB3Test extends Arquillian {
//System.out.println("** The Deployment EAR: " + testEar.toString(true) + "\n");
// Save the test EAR to a zip file for inspection (set file explicitly)
- exportZip(testEar, new File("/Users/jsanda/tmp/test-ear.ear"));
+ //String tmpDir = System.getProperty("java.io.tmpdir");
+ //exportZip(testEar, new File(tmpDir, "test.ear"));
return testEar;
}
commit df8669222aa3146b71b3252166af6bdd387806cd
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Mon Jul 22 10:38:44 2013 +0200
Add more units.
diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java
index 0ae4011..150b9e0 100644
--- a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java
+++ b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/Units.java
@@ -1,6 +1,6 @@
/*
* JBoss, Home of Professional Open Source.
- * Copyright 2009, Red Hat, Inc. and/or its affiliates, and
+ * Copyright 2009-2013, Red Hat, Inc. and/or its affiliates, and
* individual contributors as indicated by the @author tags. See the
* copyright.txt file in the distribution for a full listing of
* individual contributors.
@@ -26,10 +26,18 @@ package org.rhq.helpers.pluginAnnotations.agent;
* Metric Units.
*
* @author Galder Zamarreño
+ * @author Heiko W. Rupp
+ * See also org.rhq.core.domain.measurement.MeasurementUnits
* @since 4.0
*/
+@SuppressWarnings("unused")
public enum Units {
- NONE, MILLISECONDS, SECONDS, PERCENTAGE;
+ NONE, PERCENTAGE,
+ BYTES, KILOBYTES, MEGABYTES, GIGABYTES, TERABYTES, PETABYTES,
+ BITS, KILOBITS, MEGABITS, GIGABITS, TERABITS, PETABITS,
+ EPOCH_MILLISECONDS, EPOCH_SECONDS,
+ JIFFYS, NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS,
+ CELSIUS, KELVIN, FAHRENHEIT;
@Override
public String toString() {
commit 14edae37060b25ebcfed621deef0281c72b1cea2
Author: John Sanda <jsanda(a)redhat.com>
Date: Sun Jul 21 21:22:43 2013 -0400
adding resource operation to update internode auth conf file
The operation updates the file on disk and then invokes the JMX operation to
have the authenticator reload the configuration so that the changes can be
picked up without having to restart the node.
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
index 380da65..006dd26 100644
--- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
+++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
@@ -26,7 +26,12 @@
package org.rhq.plugins.storage;
import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.HashSet;
import java.util.List;
+import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -36,6 +41,7 @@ import org.mc4j.ems.connection.bean.attribute.EmsAttribute;
import org.mc4j.ems.connection.bean.operation.EmsOperation;
import org.rhq.core.domain.configuration.Configuration;
+import org.rhq.core.domain.configuration.Property;
import org.rhq.core.domain.configuration.PropertyList;
import org.rhq.core.domain.configuration.PropertyMap;
import org.rhq.core.domain.configuration.PropertySimple;
@@ -43,7 +49,10 @@ import org.rhq.core.pluginapi.configuration.ConfigurationFacet;
import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport;
import org.rhq.core.pluginapi.operation.OperationFacet;
import org.rhq.core.pluginapi.operation.OperationResult;
+import org.rhq.core.util.StringUtil;
import org.rhq.core.util.exception.ThrowableUtil;
+import org.rhq.core.util.file.FileUtil;
+import org.rhq.core.util.stream.StreamUtil;
import org.rhq.plugins.cassandra.CassandraNodeComponent;
import org.rhq.plugins.cassandra.util.KeyspaceService;
@@ -84,6 +93,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
return readRepair();
} else if (name.equals("updateConfiguration")) {
return updateConfiguration(parameters);
+ } else if (name.equals("updateKnownNodes")) {
+ return updateKnownNodes(parameters);
} else {
return super.invokeOperation(name, parameters);
}
@@ -95,6 +106,73 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
return result;
}
+ private OperationResult updateKnownNodes(Configuration params) {
+ OperationResult result = new OperationResult();
+
+ PropertyList propertyList = params.getList("ipAddresses");
+ Set<String> ipAddresses = new HashSet<String>();
+
+ for (Property property : propertyList.getList()) {
+ PropertySimple propertySimple = (PropertySimple) property;
+ ipAddresses.add(propertySimple.getStringValue());
+ }
+
+ log.info("Updating known nodes to " + ipAddresses);
+
+ File confDir = new File(getBasedir(), "conf");
+ File authFile = new File(confDir, "rhq-storage-auth.conf");
+ File authBackupFile = new File(confDir, "." + authFile.getName() + ".bak");
+
+ if (authBackupFile.exists()) {
+ if (log.isDebugEnabled()) {
+ log.debug(authBackupFile + " already exists. Deleting it now in preparation of creating new backup " +
+ "for " + authFile.getName());
+ }
+ if (!authBackupFile.delete()) {
+ String msg = "Failed to delete backup file " + authBackupFile + ". The operation will abort " +
+ "since " + authFile + " cannot reliably be backed up before making changes. Please delete " +
+ authBackupFile + " manually and reschedule the operation once the file has been removed.";
+ log.error(msg);
+ result.setErrorMessage(msg);
+
+ return result;
+ }
+ }
+
+ try {
+ FileUtil.copyFile(authFile, authBackupFile);
+ } catch (IOException e) {
+ String msg = "Failed to backup " + authFile + " prior to making updates. The operation will abort due " +
+ "to unexpected error";
+ log.error(msg, e);
+ result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e));
+ return result;
+ }
+
+ try {
+ StreamUtil.copy(new StringReader(StringUtil.collectionToString(ipAddresses, "\n")),
+ new FileWriter(authFile), true);
+ } catch (IOException e) {
+ log.error("An error occurred while updating " + authFile, e);
+ try {
+ FileUtil.copyFile(authBackupFile, authFile);
+ } catch (IOException e1) {
+ log.error("Failed to revert backup of " + authFile, e1);
+ }
+ result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " +
+ "it matches " + authBackupFile + " and then reschedule the operation.");
+ return result;
+ }
+
+ EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator");
+ EmsOperation emsOperation = authBean.getOperation("reloadConfiguration");
+ emsOperation.invoke();
+
+ result.setSimpleResult("Successfully updated the set of known nodes.");
+
+ return result;
+ }
+
private OperationResult nodeAdded(Configuration params) {
boolean runRepair = params.getSimple("runRepair").getBooleanValue();
boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue();
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
index 6ed31b7..8156d02 100644
--- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
@@ -93,6 +93,14 @@
</results>
</operation>
+ <operation name="updateKnownNodes">
+ <parameters>
+ <c:list-property name="ipAddresses">
+ <c:simple-property name="ipAddress"/>
+ </c:list-property>
+ </parameters>
+ </operation>
+
<operation name="prepareForUpgrade" description="Prepares the storage node for upgrade (this operation consists of following steps: 1) turning off the RPC server, 2) turning off the gossiper, 3) taking the snapshot (backuping the data), 4) invoking the drain operation">
<parameters>
<c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
commit 38777a49e6b5c5f92c54c3f41511f5f691d2a548
Author: John Sanda <jsanda(a)redhat.com>
Date: Sun Jul 21 21:21:45 2013 -0400
update cassandra deployer itest
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties
index a90d23c..774a831 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-jvm.properties
@@ -17,9 +17,9 @@ heap_dump_dir=""
thread_stack_size=-Xss180k
-java_agent=""
# Enable jamm when running on Java 6 patch version 23 or higher.
#java_agent="-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"
+java_agent=-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar
# GC tuning options
#
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml
index 15d08f2..b9e490a 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml
@@ -323,7 +323,7 @@ listen_address: localhost
# Internode authentication backend, implementing IInternodeAuthenticator;
# used to allow/disallow connections from peer nodes.
-# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
+internode_authenticator: org.rhq.cassandra.auth.RhqInternodeAuthenticator
# Whether to start the native transport server.
# Currently, only the thrift server is started by default because the native
commit 1750170844f6a68b11256ba001fe6cffb53439f6
Author: John Sanda <jsanda(a)redhat.com>
Date: Sun Jul 21 21:14:48 2013 -0400
temporarily disable the quartz job that is kicked off when a new node is added
I need to disable the job while working on the internode authentication stuff.
If the maintenance operations run on the nodes, the cluster can actually get
into a bad state because the nodes' authentication conf files have not been
updated which means the new node is not actually part of the cluster.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index 3f1ec69..c77e229 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -170,7 +170,7 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
entityManager.persist(storageNode);
- scheduleQuartzJob(storageNodes.size());
+// scheduleQuartzJob(storageNodes.size());
}
}
}
commit fe6da9b5511a3b4c97c2421079db04020c33c27f
Author: John Sanda <jsanda(a)redhat.com>
Date: Sun Jul 21 21:11:43 2013 -0400
pre-configure internode auth conf file
From testing I have done thus far it appears that a storage node should have
its own IP address included in the internode authentication config file. The
storage installer updates the auth file to include the node's address as well
as and seeds.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
index edf1430..c8bb2ef 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
@@ -34,6 +34,7 @@ import java.io.FileWriter;
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
+import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -109,6 +110,13 @@ public class CassandraClusterManager {
List<StorageNode> nodes = new ArrayList<StorageNode>(deploymentOptions.getNumNodes());
String seeds = collectionToString(calculateLocalIPAddresses(deploymentOptions.getNumNodes()));
+ Set<InetAddress> ipAddresses = null;
+
+ try {
+ ipAddresses = getClusterIPAddresses();
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to get cluster IP addresses", e);
+ }
for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) {
File basedir = new File(deploymentOptions.getClusterDir(), "node" + i);
@@ -141,7 +149,7 @@ public class CassandraClusterManager {
storageNode.setCqlPort(nodeOptions.getNativeTransportPort());
nodes.add(storageNode);
- updateStorageAuthConf(basedir);
+ deployer.updateStorageAuthConf(ipAddresses);
installedNodeDirs.add(basedir);
} catch (Exception e) {
@@ -193,6 +201,15 @@ public class CassandraClusterManager {
return i <= seedsArray.length ? seedsArray[i - 1] : ("127.0.0." + i);
}
+ private Set<InetAddress> getClusterIPAddresses() throws IOException {
+ Set<InetAddress> ipAddresses = new HashSet<InetAddress>();
+ for (String address : calculateLocalIPAddresses(deploymentOptions.getNumNodes())) {
+ ipAddresses.add(InetAddress.getByName(address));
+ }
+
+ return ipAddresses;
+ }
+
private List<StorageNode> calculateNodes() {
List<StorageNode> nodes = new ArrayList<StorageNode>(deploymentOptions.getNumNodes());
for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) {
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
index b01ebe9..6547043 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
@@ -30,13 +30,18 @@ import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
+import java.io.StringReader;
+import java.net.InetAddress;
+import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
+import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.rhq.core.util.PropertiesFileUpdate;
+import org.rhq.core.util.StringUtil;
import org.rhq.core.util.TokenReplacingReader;
import org.rhq.core.util.ZipUtil;
import org.rhq.core.util.stream.StreamUtil;
@@ -171,4 +176,22 @@ public class Deployer {
}
}
+ public void updateStorageAuthConf(Set<InetAddress> ipAddresses) {
+ File confDir = new File(deploymentOptions.getBasedir(), "conf");
+ File authFile = new File(confDir, "rhq-storage-auth.conf");
+
+ Set<String> addresses = new HashSet<String>(ipAddresses.size());
+ for (InetAddress ipAddress : ipAddresses) {
+ addresses.add(ipAddress.getHostAddress());
+ }
+
+ try {
+ authFile.delete();
+ StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")),
+ new FileWriter(authFile), true);
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to update " + authFile);
+ }
+ }
+
}
diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
index 5c5ac4e..113d66b 100644
--- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
+++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
@@ -34,10 +34,12 @@ import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
+import java.util.Set;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
@@ -386,6 +388,7 @@ public class StorageInstaller {
deployer.unzipDistro();
deployer.applyConfigChanges();
deployer.updateFilePerms();
+ deployer.updateStorageAuthConf(getAddresses(hostname, seeds));
log.info("Finished installing RHQ Storage Node.");
@@ -499,6 +502,19 @@ public class StorageInstaller {
return dir;
}
+ private Set<InetAddress> getAddresses(String hostname, String seeds) throws IOException {
+ Set<InetAddress> addresses = new HashSet<InetAddress>();
+ addresses.add(InetAddress.getByName(hostname));
+
+ if (!StringUtil.isEmpty(seeds)) {
+ for (String seed : seeds.split(",")) {
+ addresses.add(InetAddress.getByName(seed));
+ }
+ }
+
+ return addresses;
+ }
+
private PropertiesFileUpdate getServerProperties() {
String sysprop = System.getProperty("rhq.server.properties-file");
if (sysprop == null) {
commit b63c38cb0c19cd062d14c5e025164021f1e946cc
Author: John Sanda <jsanda(a)redhat.com>
Date: Sat Jul 20 09:53:10 2013 -0400
turn on internode authentication
This enabled internode authentication in cassandra.yaml and the cassandra-auth
module is packaged with our Cassandra distro. The authentication config file is
automatically updated for integrated tests. There is still work to be done for
multi-node dev-container (and production) deployments.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
index 2bde394..c12c567 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
@@ -113,6 +113,11 @@
<artifactId>snappy-java</artifactId>
<version>${cassandra.snappy.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.rhq</groupId>
+ <artifactId>rhq-cassandra-auth</artifactId>
+ <version>${project.version}</version>
+ </dependency>
</dependencies>
<executions>
<execution>
@@ -166,12 +171,15 @@
<delete file="${cassandra.dir}/lib/snappy-java-1.0.4.1.jar"/>
<copy file="${settings.localRepository}/org/xerial/snappy/snappy-java/${cassandra.snappy.version}/snappy-java-${cassandra.snappy.version}.jar"
todir="${cassandra.dir}/lib"/>
+ <copy file="${settings.localRepository}/org/rhq/rhq-cassandra-auth/${project.version}/rhq-cassandra-auth-${project.version}.jar"
+ todir="${cassandra.dir}/lib"/>
<move file="${project.build.outputDirectory}/cassandra/conf" todir="${cassandra.dir}"/>
<delete file="${cassandra.dir}/bin/cassandra"/>
<move file="${project.build.outputDirectory}/cassandra/bin/cassandra" todir="${cassandra.dir}/bin"/>
<delete dir="${project.build.outputDirectory}/cassandra"/>
<delete dir="${cassandra.dir}/javadoc"/>
<delete file="${cassandra.dir}/conf/cassandra-env.sh"/>
+ <touch file="${cassandra.dir}/conf/rhq-storage-auth.conf"/>
<zip basedir="${cassandra.dir}" destfile="${cassandra.distro.zip}"/>
<delete dir="${cassandra.dir}"/>
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties
index 612c65e..1faee9d 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-jvm.properties
@@ -17,9 +17,9 @@ heap_dump_dir=""
thread_stack_size="-Xss180k"
-java_agent=""
# Enable jamm when running on Java 6 patch version 23 or higher.
#java_agent="-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"
+java_agent=
# GC tuning options
#
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml
index 298db9d..da09e92 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml
@@ -323,7 +323,7 @@ listen_address: ${rhq.cassandra.listen.address}
# Internode authentication backend, implementing IInternodeAuthenticator;
# used to allow/disallow connections from peer nodes.
-# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
+internode_authenticator: org.rhq.cassandra.auth.RhqInternodeAuthenticator
# Whether to start the native transport server.
# Currently, only the thrift server is started by default because the native
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
index 338ef3a..edf1430 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
@@ -30,7 +30,9 @@ import static org.rhq.core.util.StringUtil.collectionToString;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileReader;
+import java.io.FileWriter;
import java.io.IOException;
+import java.io.StringReader;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Arrays;
@@ -50,6 +52,7 @@ import org.rhq.core.system.ProcessExecution;
import org.rhq.core.system.ProcessExecutionResults;
import org.rhq.core.system.SystemInfo;
import org.rhq.core.system.SystemInfoFactory;
+import org.rhq.core.util.StringUtil;
import org.rhq.core.util.file.FileUtil;
import org.rhq.core.util.stream.StreamUtil;
@@ -138,6 +141,8 @@ public class CassandraClusterManager {
storageNode.setCqlPort(nodeOptions.getNativeTransportPort());
nodes.add(storageNode);
+ updateStorageAuthConf(basedir);
+
installedNodeDirs.add(basedir);
} catch (Exception e) {
log.error("Failed to install node at " + basedir);
@@ -152,6 +157,21 @@ public class CassandraClusterManager {
return nodes;
}
+ private void updateStorageAuthConf(File basedir) {
+ File confDir = new File(basedir, "conf");
+ File authFile = new File(confDir, "rhq-storage-auth.conf");
+ authFile.delete();
+
+ Set<String> addresses = calculateLocalIPAddresses(deploymentOptions.getNumNodes());
+
+ try {
+ StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")),
+ new FileWriter(authFile), true);
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to update " + authFile);
+ }
+ }
+
private Set<String> calculateLocalIPAddresses(int numNodes) {
Set<String> addresses = new HashSet<String>();
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
index b222c82..b01ebe9 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
@@ -160,11 +160,6 @@ public class Deployer {
return Integer.parseInt(javaVersion.substring(startIndex + 1, javaVersion.length()));
}
- private boolean isLaterThanJava1_6() {
- String javaVersion = System.getProperty("java.version");
- return javaVersion.compareTo("1.6.0") > 0;
- }
-
public void updateFilePerms() {
File deployDir = new File(deploymentOptions.getBasedir());
File binDir = new File(deployDir, "bin");
commit 890a378fc11b04f3e7af64babfbfd1a7e306b7cd
Author: John Sanda <jsanda(a)redhat.com>
Date: Sat Jul 20 07:56:14 2013 -0400
initial commit for cassandra-auth module
This is the first cut at our IInternodeAuthenticator. It loads IP addresses
from a config file. There is a JMX operation to reload the config file that can
be used after the config file is updated.
diff --git a/modules/common/cassandra-auth/pom.xml b/modules/common/cassandra-auth/pom.xml
new file mode 100644
index 0000000..c53c752
--- /dev/null
+++ b/modules/common/cassandra-auth/pom.xml
@@ -0,0 +1,27 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.rhq</groupId>
+ <artifactId>rhq-common-parent</artifactId>
+ <version>4.9.0-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>rhq-cassandra-auth</artifactId>
+ <name>RHQ Cassandra Authentication</name>
+
+ <properties>
+ <moduleName>org.rhq.${project.artifactId}</moduleName>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.cassandra</groupId>
+ <artifactId>cassandra-all</artifactId>
+ <version>${cassandra.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ </dependencies>
+</project>
diff --git a/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticator.java b/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticator.java
new file mode 100644
index 0000000..56980f1
--- /dev/null
+++ b/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticator.java
@@ -0,0 +1,78 @@
+package org.rhq.cassandra.auth;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.net.InetAddress;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
+import org.apache.cassandra.auth.IInternodeAuthenticator;
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+/**
+ * @author John Sanda
+ */
+public class RhqInternodeAuthenticator implements IInternodeAuthenticator, RhqInternodeAuthenticatorMBean {
+
+ private final String MBEAN_NAME = "org.rhq.cassandra.auth:type=" + RhqInternodeAuthenticator.class.getSimpleName();
+
+ private final String CONF_FILE = "rhq-storage-auth.conf";
+
+ private File authConfFile;
+
+ private Set<InetAddress> addresses = new HashSet<InetAddress>();
+
+ public RhqInternodeAuthenticator() {
+ try {
+ authConfFile = new File(getClass().getResource("/" + CONF_FILE).toURI());
+ if (!authConfFile.exists()) {
+ throw new RuntimeException(authConfFile + " does not exist");
+ }
+
+ reloadConfiguration();
+ } catch (URISyntaxException e) {
+ throw new RuntimeException("Failed to load " + CONF_FILE, e);
+ }
+
+ try {
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName nameObj = new ObjectName(MBEAN_NAME);
+ mbs.registerMBean(this, nameObj);
+ } catch (Exception e) {
+ throw new RuntimeException("Failed to register MBean " + MBEAN_NAME, e);
+ }
+ }
+
+ @Override
+ public boolean authenticate(InetAddress address, int port) {
+ return addresses.contains(address);
+ }
+
+ @Override
+ public void reloadConfiguration() {
+ try {
+ addresses.clear();
+
+ BufferedReader reader = new BufferedReader(new FileReader(authConfFile));
+ String line = reader.readLine();
+
+ while (line != null) {
+ addresses.add(InetAddress.getByName(line));
+ line = reader.readLine();
+ }
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to load addresses from " + authConfFile, e);
+ }
+ }
+
+ @Override
+ public void validateConfiguration() throws ConfigurationException {
+ }
+}
diff --git a/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticatorMBean.java b/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticatorMBean.java
new file mode 100644
index 0000000..5e20389
--- /dev/null
+++ b/modules/common/cassandra-auth/src/main/java/org/rhq/cassandra/auth/RhqInternodeAuthenticatorMBean.java
@@ -0,0 +1,10 @@
+package org.rhq.cassandra.auth;
+
+/**
+ * @author John Sanda
+ */
+public interface RhqInternodeAuthenticatorMBean {
+
+ public void reloadConfiguration();
+
+}
diff --git a/modules/common/pom.xml b/modules/common/pom.xml
index bcbf862..7d12500 100644
--- a/modules/common/pom.xml
+++ b/modules/common/pom.xml
@@ -30,6 +30,7 @@
<module>ant-bundle</module>
<module>drift</module>
<module>jboss-as-dmr-client</module>
+ <module>cassandra-auth</module>
<module>cassandra-util</module>
<module>cassandra-jmx</module>
<module>cassandra-schema</module>
commit 8cf740355dfab9832b904984aaba4e52bf759951
Author: John Sanda <jsanda(a)redhat.com>
Date: Fri Jul 19 22:18:23 2013 -0400
add logic to enable jamm java agent, which mirrors logic in cassandra-env.sh
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
index 1e31e14..b222c82 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
@@ -125,6 +125,13 @@ public class Deployer {
properties.setProperty("thread_stack_size", "-Xss" + deploymentOptions.getStackSize());
properties.setProperty("jmx_port", deploymentOptions.getJmxPort().toString());
+ String javaVersion = System.getProperty("java.version");
+ // The check here is taken right from cassandra-env.sh
+ if ((!isOpenJDK() || javaVersion.compareTo("1.6.0") > 0) ||
+ (javaVersion.equals("1.6.0") && getJavaPatchVersion() > 23)) {
+ properties.put("java_agent", "-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar");
+ }
+
propertiesUpdater.update(properties);
} catch (IOException e) {
log.error("An error occurred while updating " + jvmPropsFile, e);
@@ -132,6 +139,32 @@ public class Deployer {
}
}
+ private boolean isOpenJDK() {
+ String javaVMName = System.getProperty("java.vm.name");
+ return javaVMName.startsWith("OpenJDK");
+ }
+
+ private boolean isJava1_6() {
+ String javaVersion = System.getProperty("java.version");
+ return javaVersion.startsWith("1.6.0");
+ }
+
+ private int getJavaPatchVersion() {
+ String javaVersion = System.getProperty("java.version");
+ int startIndex = javaVersion.indexOf('_');
+
+ if (startIndex == -1) {
+ return 0;
+ }
+
+ return Integer.parseInt(javaVersion.substring(startIndex + 1, javaVersion.length()));
+ }
+
+ private boolean isLaterThanJava1_6() {
+ String javaVersion = System.getProperty("java.version");
+ return javaVersion.compareTo("1.6.0") > 0;
+ }
+
public void updateFilePerms() {
File deployDir = new File(deploymentOptions.getBasedir());
File binDir = new File(deployDir, "bin");
commit 0517e453381aec1d30b6aaa62bec63a322dfa350
Author: Larry O'Leary <loleary(a)redhat.com>
Date: Fri Jul 19 19:05:25 2013 -0500
Updated TestLdapSettings to include changes/fixes introduced from the following BZs:
Bug 707047 - LDAP Group Member search doesn't escape special characters
Bug 981015 - LDAP auth fails if user's DN contains a backslash
Also includes a fix in where the user auth test would fail to switch back to the directory bind account and result in ACL failures on the following group lookup steps.
diff --git a/etc/dev-utils/TestLdapSettings.java b/etc/dev-utils/TestLdapSettings.java
index 4df79dc..2e29b3d 100644
--- a/etc/dev-utils/TestLdapSettings.java
+++ b/etc/dev-utils/TestLdapSettings.java
@@ -10,12 +10,14 @@ import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import java.io.PrintWriter;
import java.io.StringWriter;
+import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
+import javax.naming.CompositeName;
import javax.naming.Context;
import javax.naming.NamingEnumeration;
import javax.naming.directory.Attribute;
@@ -257,7 +259,17 @@ public class TestLdapSettings extends JFrame {
SearchResult si = (SearchResult) answer.next();
// Construct the UserDN
- userDN = si.getName() + "," + baseDNs[x];
+ userDN = null;
+
+ try {
+ userDN = si.getNameInNamespace();
+ } catch (UnsupportedOperationException use) {
+ userDN = new CompositeName(si.getName()).get(0);
+ if (si.isRelative()) {
+ userDN += "," + baseDNs[x];
+ }
+ }
+
msg = "STEP-2:PASS: The test user '"
+ testUserName
+ "' was succesfully located, and the following userDN will be used in authorization check:\n";
@@ -288,6 +300,21 @@ public class TestLdapSettings extends JFrame {
log(msg);
proceed=false;
}
+ try {
+ ctx.addToEnvironment(Context.SECURITY_PRINCIPAL, bindUserName);
+ ctx.addToEnvironment(Context.SECURITY_CREDENTIALS, bindPassword);
+ ctx.addToEnvironment(Context.SECURITY_AUTHENTICATION, "simple");
+ ctx.reconnect(null);
+ } catch (Exception ex) {
+ msg = "STEP-2:WARN: There was an error when switching back to the bind user '"
+ + bindUserName + "'\n";
+ msg += ex.getMessage();
+ if(enableVerboseDebugging.isSelected()){
+ msg = appendStacktraceToMsg(msg, ex);
+ }
+ log(msg);
+ }
+
}
// with authentication completed, now check authorization.
// validate filter components to list all available groups
@@ -433,7 +460,7 @@ public class TestLdapSettings extends JFrame {
Set<Map<String, String>> ret = new HashSet<Map<String, String>>();
String filter = String.format("(&(%s)(%s=%s))",
groupSearchFilter, groupMemberFilter,
- userDN);
+ LDAPStringUtil.encodeForFilter(userDN));
msg = "STEP-4:TESTING: about to do ldap search with filter \n'"
+ filter
+ "'\n to locate groups that test user IS authorized to access.";
@@ -638,3 +665,74 @@ public class TestLdapSettings extends JFrame {
return constraints;
}
}
+
+class LDAPStringUtil {
+
+ /**
+ * <p>Encode a string so that it can be used in an LDAP search filter.</p>
+ *
+ * <p>The following table shows the characters that are encoded and their
+ * encoded version.</p>
+ *
+ * <table>
+ * <tr><th align="center">Character</th><th>Encoded As</th></tr>
+ * <tr><td align="center">*</td><td>\2a</td></tr>
+ * <tr><td align="center">(</td><td>\28</td></tr>
+ * <tr><td align="center">)</td><td>\29</td></tr>
+ * <tr><td align="center">\</td><td>\5c</td></tr>
+ * <tr><td align="center"><code>null</code></td><td>\00</td></tr>
+ * </table>
+ *
+ * <p>In addition to encoding the above characters, any non-ASCII character
+ * (any character with a hex value greater then <code>0x7f</code>) is also
+ * encoded and rewritten as a UTF-8 character or sequence of characters in
+ * hex notation.</p>
+ *
+ * @param filterString a string that is to be encoded
+ * @return the encoded version of <code>filterString</code> suitable for use
+ * in a LDAP search filter
+ * @see <a href="http://tools.ietf.org/html/rfc4515">RFC 4515</a>
+ */
+ public static String encodeForFilter(final String filterString) {
+ if (filterString != null && filterString.length() > 0) {
+ StringBuilder encString = new StringBuilder(filterString.length());
+ for (int i = 0; i < filterString.length(); i++) {
+ char ch = filterString.charAt(i);
+ switch (ch) {
+ case '*': // encode a wildcard * character
+ encString.append("\\2a");
+ break;
+ case '(': // encode a open parenthesis ( character
+ encString.append("\\28");
+ break;
+ case ')': // encode a close parenthesis ) character
+ encString.append("\\29");
+ break;
+ case '\\': // encode a backslash \ character
+ encString.append("\\5c");
+ break;
+ case '\u0000': // encode a null character
+ encString.append("\\00");
+ break;
+ default:
+ if (ch <= 0x7f) { // an ASCII character
+ encString.append(ch);
+ } else if (ch >= 0x80) { // encode to UTF-8
+ try {
+ byte[] utf8bytes = String.valueOf(ch).getBytes("UTF8");
+ for (byte b : utf8bytes) {
+ encString.append(String.format("\\%02x", b));
+ }
+ } catch (UnsupportedEncodingException e) {
+ // ignore
+ }
+ }
+ }
+ }
+ return encString.toString();
+ }
+ return filterString;
+ }
+
+}
+
commit 5d3b55ed04d5da4eab12704c8391872cd93421ad
Author: John Sanda <jsanda(a)redhat.com>
Date: Fri Jul 19 15:35:34 2013 -0400
forgot to include new test resources in previous commit
diff --git a/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra-env.sh b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra-env.sh
new file mode 100644
index 0000000..cd415b3
--- /dev/null
+++ b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra-env.sh
@@ -0,0 +1,247 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+calculate_heap_sizes()
+{
+ case "`uname`" in
+ Linux)
+ system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'`
+ system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo`
+ ;;
+ FreeBSD)
+ system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ SunOS)
+ system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
+ system_cpu_cores=`psrinfo | wc -l`
+ ;;
+ Darwin)
+ system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ *)
+ # assume reasonable defaults for e.g. a modern desktop or
+ # cheap server
+ system_memory_in_mb="2048"
+ system_cpu_cores="2"
+ ;;
+ esac
+
+ # some systems like the raspberry pi don't report cores, use at least 1
+ if [ "$system_cpu_cores" -lt "1" ]
+ then
+ system_cpu_cores="1"
+ fi
+
+ # set max heap size based on the following
+ # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB))
+ # calculate 1/2 ram and cap to 1024MB
+ # calculate 1/4 ram and cap to 8192MB
+ # pick the max
+ half_system_memory_in_mb=`expr $system_memory_in_mb / 2`
+ quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2`
+ if [ "$half_system_memory_in_mb" -gt "1024" ]
+ then
+ half_system_memory_in_mb="1024"
+ fi
+ if [ "$quarter_system_memory_in_mb" -gt "8192" ]
+ then
+ quarter_system_memory_in_mb="8192"
+ fi
+ if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ]
+ then
+ max_heap_size_in_mb="$half_system_memory_in_mb"
+ else
+ max_heap_size_in_mb="$quarter_system_memory_in_mb"
+ fi
+ MAX_HEAP_SIZE="${max_heap_size_in_mb}M"
+
+ # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size)
+ max_sensible_yg_per_core_in_mb="100"
+ max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores`
+
+ desired_yg_in_mb=`expr $max_heap_size_in_mb / 4`
+
+ if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ]
+ then
+ HEAP_NEWSIZE="${max_sensible_yg_in_mb}M"
+ else
+ HEAP_NEWSIZE="${desired_yg_in_mb}M"
+ fi
+}
+
+# Determine the sort of JVM we'll be running on.
+
+java_ver_output=`"${JAVA:-java}" -version 2>&1`
+
+jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print $2}'`
+JVM_VERSION=${jvmver%_*}
+JVM_PATCH_VERSION=${jvmver#*_}
+
+jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'`
+case "$jvm" in
+ OpenJDK)
+ JVM_VENDOR=OpenJDK
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'`
+ ;;
+ "Java(TM)")
+ JVM_VENDOR=Oracle
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'`
+ ;;
+ *)
+ # Help fill in other JVM values
+ JVM_VENDOR=other
+ JVM_ARCH=unknown
+ ;;
+esac
+
+
+# Override these to set the amount of memory to allocate to the JVM at
+# start-up. For production use you may wish to adjust this for your
+# environment. MAX_HEAP_SIZE is the total amount of memory dedicated
+# to the Java heap; HEAP_NEWSIZE refers to the size of the young
+# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set
+# or not (if you set one, set the other).
+#
+# The main trade-off for the young generation is that the larger it
+# is, the longer GC pause times will be. The shorter it is, the more
+# expensive GC will be (usually).
+#
+# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause
+# times. If in doubt, and if you do not particularly want to tweak, go with
+# 100 MB per physical CPU core.
+
+#MAX_HEAP_SIZE="4G"
+#HEAP_NEWSIZE="800M"
+
+if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" ]; then
+ calculate_heap_sizes
+else
+ if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" ]; then
+ echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see cassandra-env.sh)"
+ exit 1
+ fi
+fi
+
+# Specifies the default port over which Cassandra will be available for
+# JMX connections.
+JMX_PORT="7399"
+
+
+# Here we create the arguments that will get passed to the jvm when
+# starting cassandra.
+
+# enable assertions. disabling this in production will give a modest
+# performance benefit (around 5%).
+JVM_OPTS="$JVM_OPTS -ea"
+
+# add the jamm javaagent
+if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" \> "1.6.0" ] \
+ || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION" -ge 23 ]
+then
+ JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"
+fi
+
+# enable thread priorities, primarily so we can give periodic tasks
+# a lower priority to avoid interfering with client workload
+JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities"
+# allows lowering thread priority without being root. see
+# http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround....
+JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42"
+
+# min and max heap sizes should be set to the same value to avoid
+# stop-the-world GC pauses during resize, and so that we can lock the
+# heap in memory on startup to prevent any of it from being swapped
+# out.
+JVM_OPTS="$JVM_OPTS -Xms512M"
+JVM_OPTS="$JVM_OPTS -Xmx512M"
+JVM_OPTS="$JVM_OPTS -Xmn128M"
+JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError"
+
+# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR
+if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then
+ JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof"
+fi
+
+
+startswith() { [ "${1#$2}" != "$1" ]; }
+
+if [ "`uname`" = "Linux" ] ; then
+ # reduce the per-thread stack size to minimize the impact of Thrift
+ # thread-per-client. (Best practice is for client connections to
+ # be pooled anyway.) Only do so on Linux where it is known to be
+ # supported.
+ # u34 and greater need 180k
+ JVM_OPTS="$JVM_OPTS -Xss180k"
+fi
+echo "xss = $JVM_OPTS"
+
+# GC tuning options
+JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC"
+JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC"
+JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled"
+JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8"
+JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1"
+JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75"
+JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
+JVM_OPTS="$JVM_OPTS -XX:+UseTLAB"
+# note: bash evals '1.7.x' as > '1.7' so this is really a >= 1.7 jvm check
+if [ "$JVM_VERSION" \> "1.7" ] ; then
+ JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark"
+fi
+
+# GC logging options -- uncomment to enable
+# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure"
+# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1"
+# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log"
+# If you are using JDK 6u34 7u2 or later you can enable GC log rotation
+# don't stick the date in the log name if rotation is on.
+# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc.log"
+# JVM_OPTS="$JVM_OPTS -XX:+UseGCLogFileRotation"
+# JVM_OPTS="$JVM_OPTS -XX:NumberOfGCLogFiles=10"
+# JVM_OPTS="$JVM_OPTS -XX:GCLogFileSize=10M"
+
+# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414
+# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414"
+
+# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See
+# http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version:
+# comment out this entry to enable IPv6 support).
+JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true"
+
+# jmx: metrics and administration interface
+#
+# add this if you're having trouble connecting:
+# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"
+#
+# see
+# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems...
+# for more on configuring JMX through firewalls, etc. (Short version:
+# get it working with no firewall first.)
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT"
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false"
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
+JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS"
diff --git a/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra.yaml b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra.yaml
new file mode 100644
index 0000000..8c9c68c
--- /dev/null
+++ b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/cassandra.yaml
@@ -0,0 +1,690 @@
+# Cassandra storage config YAML
+
+# NOTE:
+# See http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: rhq
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+num_tokens: 256
+
+# If you haven't specified num_tokens, or have set it to the default of 1 then
+# you should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node. If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+#initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+# throttle in KB's per second, per delivery thread
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# The following setting populates the page cache on memtable flush and compaction
+# WARNING: Enable this setting only when the whole node's data fits in memory.
+# Defaults to: false
+# populate_io_cache_on_flush: false
+
+# Authentication backend, implementing IAuthenticator; used to identify users
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+# PasswordAuthenticator}.
+#
+# - AllowAllAuthenticator performs no checks - set it to disable authentication.
+# - PasswordAuthenticator relies on username/password pairs to authenticate
+# users. It keeps usernames and hashed passwords in system_auth.credentials table.
+# Please increase system_auth keyspace replication factor if you use this authenticator.
+authenticator: org.apache.cassandra.auth.PasswordAuthenticator
+
+# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+# CassandraAuthorizer}.
+#
+# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+# increase system_auth keyspace replication factor if you use this authorizer.
+authorizer: org.apache.cassandra.auth.CassandraAuthorizer
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 2000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+permissions_validity_in_ms: 2000
+
+# The partitioner is responsible for distributing rows (by key) across
+# nodes in the cluster. Any IPartitioner may be used, including your
+# own as long as it is on the classpath. Out of the box, Cassandra
+# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
+# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
+#
+# - RandomPartitioner distributes rows across the cluster evenly by md5.
+# This is the default prior to 1.2 and is retained for compatibility.
+# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
+# Hash Function instead of md5. When in doubt, this is the best option.
+# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
+# scanning rows in key order, but the ordering can generate hot spots
+# for sequential insertion workloads.
+# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
+# - keys in a less-efficient format and only works with keys that are
+# UTF8-encoded Strings.
+# - CollatingOPP colates according to EN,US rules rather than lexical byte
+# ordering. Use this as an example if you need custom collation.
+#
+# See http://wiki.apache.org/cassandra/Operations for more on
+# partitioners and token selection.
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+ - target/rhq48/rhq-storage/data
+
+# commit log
+commitlog_directory: target/rhq48/rhq-storage/commit_log
+
+# policy for data disk failures:
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+# still inspectable via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must store the whole values of
+# its rows, so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# safe the keys cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The provider for the row cache to use.
+#
+# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
+#
+# SerializingCacheProvider serialises the contents of the row and stores
+# it in native memory, i.e., off the JVM Heap. Serialized rows take
+# significantly less memory than "live" rows in the JVM, so you can cache
+# more rows in a given memory footprint. And storing the cache off-heap
+# means you can use smaller heap sizes, reducing the impact of GC pauses.
+#
+# It is also valid to specify the fully-qualified class name to a class
+# that implements org.apache.cassandra.cache.IRowCacheProvider.
+#
+# Defaults to SerializingCacheProvider
+row_cache_provider: SerializingCacheProvider
+
+# saved caches
+saved_caches_directory: target/rhq48/rhq-storage/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentally from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: "<ip1>,<ip2>,<ip3>"
+ - seeds: "127.0.0.1"
+
+# emergency pressure valve: each time heap usage after a full (CMS)
+# garbage collection is above this fraction of the max, Cassandra will
+# flush the largest memtables.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+#
+# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
+# it is most effective under light to moderate load, or read-heavy
+# workloads; under truly massive write load, it will often be too
+# little, too late.
+flush_largest_memtables_at: 0.75
+
+# emergency pressure valve #2: the first time heap usage after a full
+# (CMS) garbage collection is above this fraction of the max,
+# Cassandra will reduce cache maximum _capacity_ to the given fraction
+# of the current _size_. Should usually be set substantially above
+# flush_largest_memtables_at, since that will have less long-term
+# impact on the system.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+reduce_cache_sizes_at: 0.85
+reduce_cache_capacity_to: 0.6
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+
+# Total memory to use for memtables. Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/3 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs. Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread. At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSD:s; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+storage_port: 7100
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+ssl_storage_port: 7101
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: 127.0.0.1
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+# Internode authentication backend, implementing IInternodeAuthenticator;
+# used to allow/disallow connections from peer nodes.
+# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
+
+# Whether to start the native transport server.
+# Currently, only the thrift server is started by default because the native
+# transport is considered beta.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+native_transport_port: 9142
+# The minimum and maximum threads for handling requests when the native
+# transport is used. The meaning is those is similar to the one of
+# rpc_min_threads and rpc_max_threads, though the default differ slightly and
+# are the ones below:
+# native_transport_min_threads: 16
+native_transport_max_threads: 128
+
+
+# Whether to start the thrift rpc server.
+start_rpc: false
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+#
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: 127.0.0.1
+# port for Thrift to listen for clients on
+rpc_port: 9160
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides three out-of-the-box options for the RPC Server:
+#
+# sync -> One thread per thrift connection. For a very large number of clients, memory
+# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size
+# per thread, and that will correspond to your use of virtual memory (but physical memory
+# may be limited depending on use of stack space).
+#
+# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
+# asynchronously using a small number of threads that does not vary with the amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are still
+# synchronous (one thread per active request).
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provide no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# See:
+# /proc/sys/net/core/wmem_max
+# /proc/sys/net/core/rmem_max
+# /proc/sys/net/ipv4/tcp_wmem
+# /proc/sys/net/ipv4/tcp_wmem
+# and: man tcp
+# internode_send_buff_size_in_bytes:
+# internode_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# Keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns. The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory. Larger rows will spill
+# over to disk and use a slower two-pass compaction process. A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise,
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable. Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 200 Mbps or 25 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 200
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 20000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 20000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 20000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because we need to flush all CFs
+# to make sure we can clear out anythink in the commitlog that could
+# cause truncated data to reappear.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 20000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts, If disabled cassandra will assuming the request
+# was forwarded to the replica instantly by the coordinator
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This improves cache locality
+# when disabling read repair, which can further improve throughput.
+# Only appropriate for single-datacenter deployments.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - GossipingPropertyFileSnitch
+# The rack and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via gossip. If
+# cassandra-topology.properties exists, it is used as a fallback, allowing
+# migration from the PropertyFileSnitch.
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's
+# IP address, respectively. Unless this happens to match your
+# deployment conventions (as it did Facebook's), this is best used
+# as an example of writing a custom Snitch class.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the Datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+# - throttle_limit -- The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# - default_weight -- default_weight is optional and allows for
+# overriding the default which is 1.
+# - weights -- Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time. The larger the interval,
+# the smaller and less effective the sampling will be. In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample. All the sampled entries
+# must fit in memory. Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs. This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
+index_interval: 128
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/J...
+#
+server_encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+ # require_client_auth: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # require_client_auth: false
+ # Set trustore and truststore_password if require_client_auth is true
+ # truststore: conf/.truststore
+ # truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+internode_compression: all
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: true
diff --git a/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/log4j-server.properties b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/log4j-server.properties
new file mode 100644
index 0000000..c2e6dab
--- /dev/null
+++ b/modules/common/cassandra-installer/src/test/resources/rhq48/storage/conf/log4j-server.properties
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# for production, you should probably set pattern to %c instead of %l.
+# (%l is slower.)
+
+# output messages into a rolling log file as well as stdout
+log4j.rootLogger=INFO,stdout,R
+
+# stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
+
+# rolling log file
+log4j.appender.R=org.apache.log4j.RollingFileAppender
+log4j.appender.R.maxFileSize=20MB
+log4j.appender.R.maxBackupIndex=50
+log4j.appender.R.layout=org.apache.log4j.PatternLayout
+log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
+# Edit the next line to point to your logs directory
+log4j.appender.R.File=target/rhq48/rhq-storage/logs/rhq-storage.log
+log4j.appender.R.Threshold=INFO
+
+# Application logging options
+#log4j.logger.org.apache.cassandra=DEBUG
+#log4j.logger.org.apache.cassandra.db=DEBUG
+#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
+
+# Adding this to avoid thrift logging disconnect errors.
+log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR
+
commit 656cc6066250f46ff51e16230c0f7c8263f55435
Author: John Sanda <jsanda(a)redhat.com>
Date: Fri Jul 19 13:43:46 2013 -0400
[BZ 983226] fixing upgrade regression introduced by use of cassandra-jvm.properties
This commit removes cassandra-env.sh from our Cassandra distro since we are no
longer using that script. The storage installer upgrade has been cleaned up
some so that it will get the jmx port from cassandra-env.sh for 4.8 installs
and then update cassandra-jvm.properties.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
index 42f6c8b..2bde394 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
@@ -171,6 +171,7 @@
<move file="${project.build.outputDirectory}/cassandra/bin/cassandra" todir="${cassandra.dir}/bin"/>
<delete dir="${project.build.outputDirectory}/cassandra"/>
<delete dir="${cassandra.dir}/javadoc"/>
+ <delete file="${cassandra.dir}/conf/cassandra-env.sh"/>
<zip basedir="${cassandra.dir}" destfile="${cassandra.distro.zip}"/>
<delete dir="${cassandra.dir}"/>
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra
index 742d9c0..ddbc099 100755
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/bin/cassandra
@@ -106,8 +106,7 @@ if [ -z "$CASSANDRA_CONF" -o -z "$CLASSPATH" ]; then
exit 1
fi
-if [ -f "$CASSANDRA_CONF/cassandra-env.sh" ]; then
- #. "$CASSANDRA_CONF/cassandra-env.sh"
+if [ -f "$CASSANDRA_CONF/cassandra-jvm.properties" ]; then
. "$CASSANDRA_CONF/cassandra-jvm.properties"
fi
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh
deleted file mode 100644
index 99b3128..0000000
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh
+++ /dev/null
@@ -1,247 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-calculate_heap_sizes()
-{
- case "`uname`" in
- Linux)
- system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'`
- system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo`
- ;;
- FreeBSD)
- system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
- system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
- system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
- ;;
- SunOS)
- system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
- system_cpu_cores=`psrinfo | wc -l`
- ;;
- Darwin)
- system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
- system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
- system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
- ;;
- *)
- # assume reasonable defaults for e.g. a modern desktop or
- # cheap server
- system_memory_in_mb="2048"
- system_cpu_cores="2"
- ;;
- esac
-
- # some systems like the raspberry pi don't report cores, use at least 1
- if [ "$system_cpu_cores" -lt "1" ]
- then
- system_cpu_cores="1"
- fi
-
- # set max heap size based on the following
- # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB))
- # calculate 1/2 ram and cap to 1024MB
- # calculate 1/4 ram and cap to 8192MB
- # pick the max
- half_system_memory_in_mb=`expr $system_memory_in_mb / 2`
- quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2`
- if [ "$half_system_memory_in_mb" -gt "1024" ]
- then
- half_system_memory_in_mb="1024"
- fi
- if [ "$quarter_system_memory_in_mb" -gt "8192" ]
- then
- quarter_system_memory_in_mb="8192"
- fi
- if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ]
- then
- max_heap_size_in_mb="$half_system_memory_in_mb"
- else
- max_heap_size_in_mb="$quarter_system_memory_in_mb"
- fi
- MAX_HEAP_SIZE="\${max_heap_size_in_mb}M"
-
- # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size)
- max_sensible_yg_per_core_in_mb="100"
- max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores`
-
- desired_yg_in_mb=`expr $max_heap_size_in_mb / 4`
-
- if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ]
- then
- HEAP_NEWSIZE="\${max_sensible_yg_in_mb}M"
- else
- HEAP_NEWSIZE="\${desired_yg_in_mb}M"
- fi
-}
-
-# Determine the sort of JVM we'll be running on.
-
-java_ver_output=`"\${JAVA:-java}" -version 2>&1`
-
-jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print $2}'`
-JVM_VERSION=\${jvmver%_*}
-JVM_PATCH_VERSION=\${jvmver#*_}
-
-jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'`
-case "$jvm" in
- OpenJDK)
- JVM_VENDOR=OpenJDK
- # this will be "64-Bit" or "32-Bit"
- JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'`
- ;;
- "Java(TM)")
- JVM_VENDOR=Oracle
- # this will be "64-Bit" or "32-Bit"
- JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'`
- ;;
- *)
- # Help fill in other JVM values
- JVM_VENDOR=other
- JVM_ARCH=unknown
- ;;
-esac
-
-
-# Override these to set the amount of memory to allocate to the JVM at
-# start-up. For production use you may wish to adjust this for your
-# environment. MAX_HEAP_SIZE is the total amount of memory dedicated
-# to the Java heap; HEAP_NEWSIZE refers to the size of the young
-# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set
-# or not (if you set one, set the other).
-#
-# The main trade-off for the young generation is that the larger it
-# is, the longer GC pause times will be. The shorter it is, the more
-# expensive GC will be (usually).
-#
-# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause
-# times. If in doubt, and if you do not particularly want to tweak, go with
-# 100 MB per physical CPU core.
-
-#MAX_HEAP_SIZE="4G"
-#HEAP_NEWSIZE="800M"
-
-if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" ]; then
- calculate_heap_sizes
-else
- if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" ]; then
- echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see cassandra-env.sh)"
- exit 1
- fi
-fi
-
-# Specifies the default port over which Cassandra will be available for
-# JMX connections.
-JMX_PORT="${rhq.cassandra.jmx.port}"
-
-
-# Here we create the arguments that will get passed to the jvm when
-# starting cassandra.
-
-# enable assertions. disabling this in production will give a modest
-# performance benefit (around 5%).
-JVM_OPTS="$JVM_OPTS -ea"
-
-# add the jamm javaagent
-if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" \\> "1.6.0" ] \\
- || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION" -ge 23 ]
-then
- JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"
-fi
-
-# enable thread priorities, primarily so we can give periodic tasks
-# a lower priority to avoid interfering with client workload
-JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities"
-# allows lowering thread priority without being root. see
-# http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround....
-JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42"
-
-# min and max heap sizes should be set to the same value to avoid
-# stop-the-world GC pauses during resize, and so that we can lock the
-# heap in memory on startup to prevent any of it from being swapped
-# out.
-JVM_OPTS="$JVM_OPTS -Xms${rhq.cassandra.max.heap.size}"
-JVM_OPTS="$JVM_OPTS -Xmx${rhq.cassandra.max.heap.size}"
-JVM_OPTS="$JVM_OPTS -Xmn${rhq.cassandra.heap.new.size}"
-JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError"
-
-# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR
-if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then
- JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof"
-fi
-
-
-startswith() { [ "\${1#$2}" != "$1" ]; }
-
-if [ "`uname`" = "Linux" ] ; then
- # reduce the per-thread stack size to minimize the impact of Thrift
- # thread-per-client. (Best practice is for client connections to
- # be pooled anyway.) Only do so on Linux where it is known to be
- # supported.
- # u34 and greater need 180k
- JVM_OPTS="$JVM_OPTS -Xss${rhq.cassandra.stack.size}"
-fi
-echo "xss = $JVM_OPTS"
-
-# GC tuning options
-JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC"
-JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC"
-JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled"
-JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8"
-JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1"
-JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75"
-JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
-JVM_OPTS="$JVM_OPTS -XX:+UseTLAB"
-# note: bash evals '1.7.x' as > '1.7' so this is really a >= 1.7 jvm check
-if [ "$JVM_VERSION" \\> "1.7" ] ; then
- JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark"
-fi
-
-# GC logging options -- uncomment to enable
-# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure"
-# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1"
-# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log"
-# If you are using JDK 6u34 7u2 or later you can enable GC log rotation
-# don't stick the date in the log name if rotation is on.
-# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc.log"
-# JVM_OPTS="$JVM_OPTS -XX:+UseGCLogFileRotation"
-# JVM_OPTS="$JVM_OPTS -XX:NumberOfGCLogFiles=10"
-# JVM_OPTS="$JVM_OPTS -XX:GCLogFileSize=10M"
-
-# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414
-# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414"
-
-# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See
-# http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version:
-# comment out this entry to enable IPv6 support).
-JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true"
-
-# jmx: metrics and administration interface
-#
-# add this if you're having trouble connecting:
-# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"
-#
-# see
-# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems...
-# for more on configuring JMX through firewalls, etc. (Short version:
-# get it working with no firewall first.)
-JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT"
-JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false"
-JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
-JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS"
diff --git a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
index 63282e4..5c5ac4e 100644
--- a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
+++ b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
@@ -25,8 +25,10 @@
package org.rhq.storage.installer;
+import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
+import java.io.FileReader;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
@@ -250,15 +252,32 @@ public class StorageInstaller {
File oldConfDir = new File(existingStorageDir, "conf");
File newConfDir = new File(storageBasedir, "conf");
+ File cassandraEnvFile = new File(oldConfDir, "cassandra-env.sh");
+
String cassandraYaml = "cassandra.yaml";
String cassandraJvmProps = "cassandra-jvm.properties";
File cassandraJvmPropsFile = new File(newConfDir, cassandraJvmProps);
String log4j = "log4j-server.properties";
replaceFile(new File(oldConfDir, cassandraYaml), new File(newConfDir, cassandraYaml));
- replaceFile(new File(oldConfDir, cassandraJvmProps), cassandraJvmPropsFile);
replaceFile(new File(oldConfDir, log4j), new File(newConfDir, log4j));
+ if (cassandraEnvFile.exists()) {
+ // Then this is an RHQ 4.8 install
+ jmxPort = parseJmxPortFromCassandrEnv(cassandraEnvFile);
+ Properties jvmProps = new Properties();
+ jvmProps.load(new FileInputStream(cassandraJvmPropsFile));
+ PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(
+ cassandraJvmPropsFile.getAbsolutePath());
+ jvmProps.setProperty("jmx_port", Integer.toString(jmxPort));
+
+ propertiesUpdater.update(jvmProps);
+
+ } else {
+ jmxPort = parseJmxPort(cassandraJvmPropsFile);
+ replaceFile(new File(oldConfDir, cassandraJvmProps), cassandraJvmPropsFile);
+ }
+
log.info("Finished installing RHQ Storage Node.");
log.info("Updating rhq-server.properties...");
@@ -268,8 +287,6 @@ public class StorageInstaller {
Map<String, Object> config = (Map<String, Object>) yaml.load(new FileInputStream(yamlFile));
hostname = (String) config.get("listen_address");
-
- jmxPort = parseJmxPort(cassandraJvmPropsFile);
} else {
if (cmdLine.hasOption("dir")) {
File basedir = new File(cmdLine.getOptionValue("dir"));
@@ -661,6 +678,60 @@ public class StorageInstaller {
}
}
+ private int parseJmxPortFromCassandrEnv(File cassandraEnvFile) {
+ Integer port = null;
+ if (isWindows()) {
+ // TODO
+ return defaultJmxPort;
+ } else {
+ BufferedReader reader = null;
+ try {
+ reader = new BufferedReader(new FileReader(cassandraEnvFile));
+ String line = reader.readLine();
+
+ while (line != null) {
+ if (line.startsWith("JMX_PORT")) {
+ int startIndex = "JMX_PORT=\"".length();
+ int endIndex = line.lastIndexOf("\"");
+
+ if (startIndex == -1 || endIndex == -1) {
+ log.error("Failed to parse the JMX port. Make sure that you have the JMX port defined on its "
+ + "own line as follows, JMX_PORT=\"<jmx-port>\"");
+ throw new RuntimeException("Cannot determine JMX port");
+ }
+ try {
+ port = Integer.parseInt(line.substring(startIndex, endIndex));
+ } catch (NumberFormatException e) {
+ log.error("The JMX port must be an integer. [" + port + "] is an invalid value");
+ throw new RuntimeException("The JMX port has an invalid value");
+ }
+ return port;
+ }
+ line = reader.readLine();
+ }
+ log.error("Failed to parse the JMX port. Make sure that you have the JMX port defined on its "
+ + "own line as follows, JMX_PORT=\"<jmx-port>\"");
+ throw new RuntimeException("Cannot determine JMX port");
+ } catch (IOException e) {
+ log.error("Failed to parse JMX port. There was an unexpected IO error", e);
+ throw new RuntimeException("Failed to parse JMX port due to IO error: " + e.getMessage());
+ } finally {
+ try {
+ if (reader != null) {
+ reader.close();
+ }
+ } catch (IOException e) {
+ if (log.isDebugEnabled()) {
+ log.debug("An error occurred closing the " + BufferedReader.class.getName() + " used to "
+ + "parse the JMX port", e);
+ } else {
+ log.warn("There was error closing the reader used to parse the JMX port: " + e.getMessage());
+ }
+ }
+ }
+ }
+ }
+
private int parseJmxPort(File cassandraJvmOptsFile) {
Integer port = null;
if (isWindows()) {
diff --git a/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java b/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java
index 3fd13c7..a7921c0 100644
--- a/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java
+++ b/modules/common/cassandra-installer/src/test/java/org/rhq/storage/installer/StorageInstallerTest.java
@@ -2,11 +2,13 @@ package org.rhq.storage.installer;
import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
import java.io.File;
import java.io.FileInputStream;
+import java.io.FileOutputStream;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.Properties;
@@ -20,41 +22,53 @@ import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import org.rhq.cassandra.CassandraClusterManager;
+import org.rhq.core.util.MessageDigestGenerator;
import org.rhq.core.util.file.FileUtil;
+import org.rhq.core.util.stream.StreamUtil;
/**
* @author John Sanda
*/
public class StorageInstallerTest {
+ private MessageDigestGenerator digestGenerator;
+
private File basedir;
+ private File serverDir;
+
private File storageDir;
private StorageInstaller installer;
@BeforeMethod
public void initDirs(Method test) throws Exception {
+ digestGenerator = new MessageDigestGenerator(MessageDigestGenerator.SHA_256);
+
File dir = new File(getClass().getResource(".").toURI());
basedir = new File(dir, getClass().getSimpleName() + "/" + test.getName());
FileUtil.purge(basedir, true);
basedir.mkdirs();
- System.setProperty("rhq.server.basedir", basedir.getAbsolutePath());
+ serverDir = new File(basedir, "rhq-server");
- File serverPropsFile = new File(basedir, "rhq-server.properties");
+ System.setProperty("rhq.server.basedir", serverDir.getAbsolutePath());
+
+ File serverPropsFile = new File(serverDir, "rhq-server.properties");
FileUtils.touch(serverPropsFile);
System.setProperty("rhq.server.properties-file", serverPropsFile.getAbsolutePath());
- storageDir = new File(basedir, "rhq-storage");
+ storageDir = new File(serverDir, "rhq-storage");
installer = new StorageInstaller();
}
@AfterMethod
public void shutdownStorageNode() throws Exception {
- CassandraClusterManager ccm = new CassandraClusterManager();
- ccm.killNode(storageDir);
+ if (FileUtils.getFile(storageDir, "bin", "cassandra.pid").exists()) {
+ CassandraClusterManager ccm = new CassandraClusterManager();
+ ccm.killNode(storageDir);
+ }
}
@Test
@@ -97,6 +111,65 @@ public class StorageInstallerTest {
assertTrue(savedCachesDir.exists(), "Expected to find saved_caches directory at " + savedCachesDir);
}
+ @Test
+ public void upgradeFromRHQ48Install() throws Exception {
+ File rhq48ServerDir = new File(basedir, "rhq48-server");
+ File rhq48StorageDir = new File(rhq48ServerDir, "rhq-storage");
+ File rhq48StorageConfDir = new File(rhq48StorageDir, "conf");
+
+ File oldCassandraYamlFile = new File(rhq48StorageConfDir, "cassandra.yaml");
+ File oldCassandraEnvFile = new File(rhq48StorageConfDir, "cassandra-env.sh");
+ File oldLog4JFile = new File(rhq48StorageConfDir, "log4j-server.properties");
+
+ rhq48StorageConfDir.mkdirs();
+ StreamUtil.copy(getClass().getResourceAsStream("/rhq48/storage/conf/cassandra.yaml"),
+ new FileOutputStream(oldCassandraYamlFile), true);
+ StreamUtil.copy(getClass().getResourceAsStream("/rhq48/storage/conf/cassandra-env.sh"),
+ new FileOutputStream(oldCassandraEnvFile));
+ StreamUtil.copy(getClass().getResourceAsStream("/rhq48/storage/conf/log4j-server.properties"),
+ new FileOutputStream(oldLog4JFile));
+
+ CommandLineParser parser = new PosixParser();
+
+ String[] args = {
+ "--upgrade", rhq48ServerDir.getAbsolutePath(),
+ "--dir", storageDir.getAbsolutePath()
+ };
+
+ CommandLine cmdLine = parser.parse(installer.getOptions(), args);
+ int status = installer.run(cmdLine);
+
+ assertEquals(status, 0, "Expected to get back a status code of 0 for a successful upgrade");
+ assertNodeIsRunning();
+
+ File binDir = new File(storageDir, "bin");
+ assertTrue(binDir.exists(), "Expected to find bin directory at " + binDir);
+
+ File libDir = new File(storageDir, "lib");
+ assertTrue(libDir.exists(), "Expected to find lib directory at " + libDir);
+
+ File confDir = new File(storageDir, "conf");
+ assertTrue(confDir.exists(), "Expected to find conf directory at " + confDir);
+
+ File newCassandraYamlFile = new File(confDir, "cassandra.yaml");
+ assertEquals(sha256(oldCassandraYamlFile), sha256(newCassandraYamlFile), newCassandraYamlFile +
+ " does not match the original version");
+
+ File newLog4JFile = new File(confDir, "log4j-server.properties");
+ assertEquals(sha256(oldLog4JFile), sha256(newLog4JFile), newLog4JFile + " does not match the original version");
+
+ assertFalse(new File(confDir, "cassandra-env.sh").exists(), "cassandra-env.sh should not be used after RHQ 4.8.0");
+
+ File cassandraJvmPropsFile = new File(confDir, "cassandra-jvm.properties");
+ Properties properties = new Properties();
+ properties.load(new FileInputStream(cassandraJvmPropsFile));
+
+ // If this check fails, make sure that the expected value matches the value in
+ // src/test/resources/rhq48/storage/conf/cassandra-env.sh
+ assertEquals(properties.getProperty("jmx_port"), "7399", "Failed to update the JMX port in " +
+ cassandraJvmPropsFile);
+ }
+
private void assertNodeIsRunning() {
try {
installer.verifyNodeIsUp("127.0.0.1", 7299, 3, 1000);
@@ -106,7 +179,7 @@ public class StorageInstallerTest {
}
private void assertRhqServerPropsUpdated() {
- File serverPropsFile = new File(basedir, "rhq-server.properties");
+ File serverPropsFile = new File(serverDir, "rhq-server.properties");
Properties properties = new Properties();
try {
@@ -120,4 +193,12 @@ public class StorageInstallerTest {
assertEquals(seeds, "127.0.0.1|7299|9142");
}
+ private String sha256(File file) {
+ try {
+ return digestGenerator.calcDigestString(file);
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to calculate SHA-256 hash for " + file.getPath(), e);
+ }
+ }
+
}
commit 8ad6e86b9ba704eb468646fd7fffcf5dd082e2c4
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Fri Jul 19 18:13:31 2013 +0200
Add annotation and processing for configuration properties. Also shuffle some classes around.
diff --git a/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java
new file mode 100644
index 0000000..4b5a3ab
--- /dev/null
+++ b/modules/helpers/pluginAnnotations/src/main/java/org/rhq/helpers/pluginAnnotations/agent/ConfigProperty.java
@@ -0,0 +1,49 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+package org.rhq.helpers.pluginAnnotations.agent;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * A configuration property for resource or plugin config.
+ * Currently only property simple are supported.
+ * @author Heiko W. Rupp
+ */
+(a)Retention(RetentionPolicy.RUNTIME)
+@Target( { ElementType.FIELD})
+public @interface ConfigProperty {
+
+ public Scope scope() default Scope.PLUGIN;
+ String property() default "";
+ String displayName() default "";
+ String description() default "";
+ boolean readOnly() default false;
+ String defaultValue() default "";
+ RhqType rhqType() default RhqType.VOID;
+
+
+ public enum Scope {
+ PLUGIN,
+ RESOURCE;
+ }
+}
diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java
index 3c90b7b..16ec437 100644
--- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java
+++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/AnnotationProcessor.java
@@ -19,15 +19,23 @@
package org.rhq.helpers.pluginGen;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
import java.util.List;
+import org.rhq.helpers.pluginAnnotations.agent.ConfigProperty;
+import org.rhq.helpers.pluginAnnotations.agent.Metric;
+import org.rhq.helpers.pluginAnnotations.agent.Operation;
+import org.rhq.helpers.pluginAnnotations.agent.Parameter;
+import org.rhq.helpers.pluginAnnotations.agent.RhqType;
+
/**
* Processor that scans a directory for annotated classes and generates metrics etc. from them.
* @author Heiko W. Rupp
*/
public class AnnotationProcessor {
- private List<Class> classList;
private final DirectoryClassLoader classLoader;
public AnnotationProcessor(String baseDirectory) {
@@ -36,9 +44,119 @@ public class AnnotationProcessor {
}
public void populate(Props props) {
- classList = classLoader.findClasses();
+ List<Class> classList = classLoader.findClasses();
+
+ populateMetrics(props, classList);
+ populateOperations(props, classList);
+ populateConfigurations(props, classList);
+ }
+
+ public void populateMetrics(Props props, List<Class> classes) {
+ for (Class<?> clazz : classes) {
+ for (Field field : clazz.getDeclaredFields()) {
+ Metric metricAnnot = field.getAnnotation(Metric.class);
+ addMetric(props, metricAnnot, field.getName());
+ }
+
+ for (Method method : clazz.getDeclaredMethods()) {
+ Metric metricAnnot = method.getAnnotation(Metric.class);
+ addMetric(props, metricAnnot, method.getName());
+ }
+ }
+ }
+
+ public void populateOperations(Props props, List<Class> classes) {
+ for (Class<?> clazz : classes) {
+ for (Method method : clazz.getDeclaredMethods()) {
+ Operation operationAnnot = method.getAnnotation(Operation.class);
+ if (operationAnnot != null) {
+ String property = operationAnnot.name();
+ if (property.isEmpty()) {
+ property = method.getName();
+ }
+ Props.OperationProps op = new Props.OperationProps(property);
+ op.setDisplayName(operationAnnot.displayName());
+ op.setDescription(operationAnnot.description());
+ RhqType type = RhqType.findType(method.getReturnType());
+ if (type != RhqType.VOID) {
+ Props.SimpleProperty simpleProperty = new Props.SimpleProperty(type.getRhqName());
+ op.setResult(simpleProperty);
+ }
- props.populateMetrics(classList);
- props.populateOperations(classList);
+ Class[] types = method.getParameterTypes();
+ int i=0;
+ for (Annotation[] annotations : method.getParameterAnnotations() ) {
+ for (Annotation annotation : annotations) {
+ if (annotation instanceof Parameter) {
+ Parameter parameter = (Parameter) annotation;
+ Props.SimpleProperty simpleProperty = new Props.SimpleProperty(parameter.name());
+ simpleProperty.setDescription(parameter.description());
+ Class typeClass = types[i];
+ RhqType rhqType = RhqType.findType(typeClass);
+ if (parameter.type()!=RhqType.VOID){
+ rhqType = parameter.type();
+ }
+ simpleProperty.setType(rhqType.getRhqName());
+ op.getParams().add(simpleProperty);
+ }
+ }
+ i++;
+ }
+ props.getOperations().add(op);
+ }
+
+ }
+ }
+ }
+
+ public void populateConfigurations(Props props, List<Class> classes) {
+ for (Class<?> clazz : classes) {
+ for (Field field : clazz.getDeclaredFields()) {
+ ConfigProperty configProperty = field.getAnnotation(ConfigProperty.class);
+ if (configProperty!=null) {
+ String name = configProperty.property();
+ if(name.isEmpty()) {
+ name = field.getName();
+ }
+ Props.SimpleProperty property = new Props.SimpleProperty(name);
+ property.setDescription(configProperty.description());
+ property.setDisplayName(configProperty.displayName());
+ Class type = field.getType();
+ RhqType rhqType = RhqType.findType(type);
+ if (configProperty.rhqType()!=RhqType.VOID) {
+ rhqType = configProperty.rhqType();
+ }
+ property.setType(rhqType.getRhqName());
+
+ switch (configProperty.scope()){
+ case PLUGIN:
+ props.getPluginConfig().add(property);
+ break;
+ case RESOURCE:
+ props.getResourceConfig().add(property);
+ break;
+ default:
+ throw new IllegalStateException("Unknown scope: " +configProperty.scope().name());
+ }
+ }
+ }
+ }
}
+
+ private void addMetric(Props props, Metric metricAnnot, String name) {
+ if (metricAnnot != null) {
+ String property = metricAnnot.property();
+ if (property.isEmpty()) {
+ property = name;
+ }
+ Props.MetricProps metric = new Props.MetricProps(property);
+ metric.setDisplayName(metricAnnot.displayName());
+ metric.setDisplayType(metricAnnot.displayType());
+ metric.setDataType(metricAnnot.dataType());
+ metric.setDescription(metricAnnot.description());
+ metric.setUnits(metricAnnot.units());
+ props.getMetrics().add(metric);
+ }
+ }
+
}
diff --git a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java
index 95be574..c88fbc0 100644
--- a/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java
+++ b/modules/helpers/pluginGen/src/main/java/org/rhq/helpers/pluginGen/Props.java
@@ -18,20 +18,12 @@
*/
package org.rhq.helpers.pluginGen;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
import java.util.HashSet;
import java.util.LinkedHashSet;
-import java.util.List;
import java.util.Set;
import org.rhq.helpers.pluginAnnotations.agent.DataType;
import org.rhq.helpers.pluginAnnotations.agent.DisplayType;
-import org.rhq.helpers.pluginAnnotations.agent.Metric;
-import org.rhq.helpers.pluginAnnotations.agent.Operation;
-import org.rhq.helpers.pluginAnnotations.agent.Parameter;
-import org.rhq.helpers.pluginAnnotations.agent.RhqType;
import org.rhq.helpers.pluginAnnotations.agent.Units;
/**
@@ -94,7 +86,8 @@ public class Props {
/** Embedded children */
private Set<Props> children = new HashSet<Props>();
- private Set<SimpleProperty> simpleProps = new LinkedHashSet<SimpleProperty>();
+ private Set<SimpleProperty> pluginConfig = new LinkedHashSet<SimpleProperty>();
+ private Set<SimpleProperty> resourceConfig = new LinkedHashSet<SimpleProperty>();
private Set<Template> templates = new HashSet<Template>();
@@ -291,12 +284,12 @@ public class Props {
this.rhqVersion = rhqVersion;
}
- public Set<SimpleProperty> getSimpleProps() {
- return simpleProps;
+ public Set<SimpleProperty> getPluginConfig() {
+ return pluginConfig;
}
- public void setSimpleProps(Set<SimpleProperty> simpleProps) {
- this.simpleProps = simpleProps;
+ public void setPluginConfig(Set<SimpleProperty> pluginConfig) {
+ this.pluginConfig = pluginConfig;
}
public Set<Template> getTemplates() {
@@ -307,7 +300,15 @@ public class Props {
this.templates = templates;
}
- public Set<MetricProps> getMetrics() {
+ public Set<SimpleProperty> getResourceConfig() {
+ return resourceConfig;
+ }
+
+ public void setResourceConfig(Set<SimpleProperty> resourceConfig) {
+ this.resourceConfig = resourceConfig;
+ }
+
+ public Set<MetricProps> getMetrics() {
return metrics;
}
@@ -363,79 +364,6 @@ public class Props {
this.scanForAnnotations = scanForAnnotations;
}
- public void populateMetrics(List<Class> classes) {
- for (Class<?> clazz : classes) {
- for (Field field : clazz.getDeclaredFields()) {
- Metric metricAnnot = field.getAnnotation(Metric.class);
- addMetric(metricAnnot, field.getName());
- }
-
- for (Method method : clazz.getDeclaredMethods()) {
- Metric metricAnnot = method.getAnnotation(Metric.class);
- addMetric(metricAnnot, method.getName());
- }
- }
- }
-
- public void populateOperations(List<Class> classes) {
- for (Class<?> clazz : classes) {
- for (Method method : clazz.getDeclaredMethods()) {
- Operation operationAnnot = method.getAnnotation(Operation.class);
- if (operationAnnot != null) {
- String property = operationAnnot.name();
- if (property.isEmpty()) {
- property = method.getName();
- }
- OperationProps op = new OperationProps(property);
- op.setDisplayName(operationAnnot.displayName());
- op.setDescription(operationAnnot.description());
- RhqType type = RhqType.findType(method.getReturnType());
- if (type != RhqType.VOID) {
- SimpleProperty simpleProperty = new SimpleProperty(type.getRhqName());
- op.setResult(simpleProperty);
- }
-
- Class[] types = method.getParameterTypes();
- int i=0;
- for (Annotation[] annotations : method.getParameterAnnotations() ) {
- for (Annotation annotation : annotations) {
- if (annotation instanceof Parameter) {
- Parameter parameter = (Parameter) annotation;
- SimpleProperty simpleProperty = new SimpleProperty(parameter.name());
- simpleProperty.setDescription(parameter.description());
- Class typeClass = types[i];
- RhqType rhqType = RhqType.findType(typeClass);
- if (parameter.type()!=RhqType.VOID){
- rhqType = parameter.type();
- }
- simpleProperty.setType(rhqType.getRhqName());
- op.getParams().add(simpleProperty);
- }
- }
- i++;
- }
- operations.add(op);
- }
-
- }
- }
- }
-
- private void addMetric(Metric metricAnnot, String name) {
- if (metricAnnot != null) {
- String property = metricAnnot.property();
- if (property.isEmpty()) {
- property = name;
- }
- MetricProps metric = new MetricProps(property);
- metric.setDisplayName(metricAnnot.displayName());
- metric.setDisplayType(metricAnnot.displayType());
- metric.setDataType(metricAnnot.dataType());
- metric.setDescription(metricAnnot.description());
- metric.setUnits(metricAnnot.units());
- metrics.add(metric);
- }
- }
@Override
public String toString() {
@@ -466,7 +394,7 @@ public class Props {
sb.append(", dependsOnJmxPlugin=").append(dependsOnJmxPlugin);
sb.append(", rhqVersion='").append(rhqVersion).append('\'');
sb.append(", children=").append(children);
- sb.append(", simpleProps=").append(simpleProps);
+ sb.append(", simpleProps=").append(pluginConfig);
sb.append(", templates=").append(templates);
sb.append(", runsInsides=").append(runsInsides);
sb.append('}');
diff --git a/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl b/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl
index 5579806..ce35ebe 100644
--- a/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl
+++ b/modules/helpers/pluginGen/src/main/resources/descriptorMain.ftl
@@ -45,9 +45,9 @@ name="${props.name}"
</runs-inside>
</#if>
- <#if props.simpleProps?has_content>
+ <#if props.pluginConfig?has_content>
<plugin-configuration>
- <#list props.simpleProps as simpleProps>
+ <#list props.pluginConfig as simpleProps>
<c:simple-property name="${simpleProps.name}" description="${simpleProps.description}" <#if simpleProps.type??>type="${simpleProps.type}"</#if> <#if simpleProps.readOnly>readOnly="true"</#if>/>
</#list>
<!-- The template section is only for manual resource additions, and default parameters and the ones presented to the user. -->
@@ -106,4 +106,12 @@ name="${props.name}"
<!-- TODO supply your configuration parameters -->
<c:simple-property name="dummy"/>
</resource-configuration>
- </#if>
\ No newline at end of file
+ </#if>
+
+<#if props.resourceConfig?has_content>
+ <resource-configuration>
+ <#list props.resourceConfig as simpleProps>
+ <c:simple-property name="${simpleProps.name}" description="${simpleProps.description}" <#if simpleProps.type??>type="${simpleProps.type}"</#if> <#if simpleProps.readOnly>readOnly="true"</#if>/>
+ </#list>
+ </resource-configuration>
+</#if>
diff --git a/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java b/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java
index fbc571b..5027dbb 100644
--- a/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java
+++ b/modules/helpers/pluginGen/src/test/java/org/rhq/helpers/pluginGen/test/FooBean.java
@@ -19,6 +19,7 @@
package org.rhq.helpers.pluginGen.test;
+import org.rhq.helpers.pluginAnnotations.agent.ConfigProperty;
import org.rhq.helpers.pluginAnnotations.agent.DataType;
import org.rhq.helpers.pluginAnnotations.agent.DisplayType;
import org.rhq.helpers.pluginAnnotations.agent.MeasurementType;
@@ -26,6 +27,7 @@ import org.rhq.helpers.pluginAnnotations.agent.Metric;
import org.rhq.helpers.pluginAnnotations.agent.Operation;
import org.rhq.helpers.pluginAnnotations.agent.Parameter;
import org.rhq.helpers.pluginAnnotations.agent.RhqType;
+import org.rhq.helpers.pluginAnnotations.agent.Units;
/**
* Just a sample
@@ -34,7 +36,8 @@ import org.rhq.helpers.pluginAnnotations.agent.RhqType;
public class FooBean {
- @Metric(description = "How often was this bean invoked", displayType = DisplayType.SUMMARY, measurementType = MeasurementType.DYNAMIC)
+ @Metric(description = "How often was this bean invoked", displayType = DisplayType.SUMMARY, measurementType = MeasurementType.DYNAMIC,
+ units = Units.SECONDS)
int invocationCount;
@Metric(description = "Just a foo", dataType = DataType.TRAIT)
@@ -51,5 +54,11 @@ public class FooBean {
invocationCount -= by;
}
+ @ConfigProperty(scope = ConfigProperty.Scope.PLUGIN, displayName="The Password",
+ readOnly = false, property="thePassword",description = "A password", rhqType = RhqType.PASSWORD)
+ String password;
+
+ @ConfigProperty(scope = ConfigProperty.Scope.RESOURCE)
+ int defaultSteps;
}
commit 039a42a2e696bb1bb0b170bec87ed95fe13921e2
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Fri Jul 19 16:40:48 2013 +0200
API Checks - Changing StorageNode.QUERY_FIND_BY_ADDRESS. The constants for native queries shouldn't be part of the public API.
diff --git a/modules/core/domain/intentional-api-changes-since-4.8.0.xml b/modules/core/domain/intentional-api-changes-since-4.8.0.xml
index f21a45f..a8a77b4 100644
--- a/modules/core/domain/intentional-api-changes-since-4.8.0.xml
+++ b/modules/core/domain/intentional-api-changes-since-4.8.0.xml
@@ -1,3 +1,11 @@
<?xml version="1.0"?>
<differences>
+ <difference>
+ <className>org/rhq/core/domain/cloud/StorageNode</className>
+ <differenceType>6003</differenceType><!-- Value of compile-time constant has changed -->
+ <field>QUERY_FIND_BY_ADDRESS</field>
+ <justification>
+ Changing StorageNode.QUERY_FIND_BY_ADDRESS. The constants for native queries shouldn't be part of the public API.
+ </justification>
+ </difference>
</differences>
commit 78eb557ae8f799b628769d76ccece61b6cb452a4
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Fri Jul 19 16:17:53 2013 +0200
[BZ 959587] - Alert definition should display units when entering a value; e.g. 'seconds' or 'megabytes' - Adding "BaseUnits" field to the popup form. The tooltip displays all allowed units from the same MeasurementUnits.Family.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java b/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java
index 31811ab..d66f23b 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/measurement/MeasurementUnits.java
@@ -22,6 +22,9 @@
*/
package org.rhq.core.domain.measurement;
+import java.util.ArrayList;
+import java.util.List;
+
import org.rhq.core.domain.measurement.util.MeasurementConversionException;
/**
@@ -117,6 +120,16 @@ public enum MeasurementUnits {
return null;
}
+
+ public List<MeasurementUnits> getFamilyUnits() {
+ List<MeasurementUnits> returnList = new ArrayList<MeasurementUnits>();
+ for (MeasurementUnits units : MeasurementUnits.values()) {
+ if (units.family == family) {
+ returnList.add(units);
+ }
+ }
+ return returnList;
+ }
public boolean isComparableTo(MeasurementUnits other) {
return family == other.family;
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java
index 870140d..f4fbcad 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/definitions/ConditionEditor.java
@@ -44,6 +44,8 @@ import com.smartgwt.client.widgets.form.fields.SelectItem;
import com.smartgwt.client.widgets.form.fields.SpacerItem;
import com.smartgwt.client.widgets.form.fields.StaticTextItem;
import com.smartgwt.client.widgets.form.fields.TextItem;
+import com.smartgwt.client.widgets.form.fields.events.ChangedEvent;
+import com.smartgwt.client.widgets.form.fields.events.ChangedHandler;
import com.smartgwt.client.widgets.layout.HLayout;
import com.smartgwt.client.widgets.toolbar.ToolStrip;
@@ -613,6 +615,7 @@ public class ConditionEditor extends EnhancedVLayout {
}
absoluteValue.setShowIfCondition(ifFunc);
formItems.add(absoluteValue);
+ formItems.add(buildBaseUnitsItem(metricDropDownMenu, ifFunc, editMode));
} else {
String noMetricsStr = MSG.view_alert_definition_condition_editor_metric_nometrics();
StaticTextItem noMetrics = buildHelpTextItem(THRESHOLD_NO_METRICS_ITEMNAME, noMetricsStr, ifFunc);
@@ -663,6 +666,7 @@ public class ConditionEditor extends EnhancedVLayout {
formItems.add(absoluteLowValue);
formItems.add(absoluteHighValue);
+ formItems.add(buildBaseUnitsItem(metricDropDownMenu, ifFunc, editMode));
} else {
String noMetricsStr = MSG.view_alert_definition_condition_editor_metric_nometrics();
StaticTextItem noMetrics = buildHelpTextItem(RANGE_NO_METRICS_ITEMNAME, noMetricsStr, ifFunc);
@@ -1220,6 +1224,40 @@ public class ConditionEditor extends EnhancedVLayout {
return comparatorSelection;
}
+ private StaticTextItem buildBaseUnitsItem(final SelectItem metricDropDownMenu, FormItemIfFunction ifFunc,
+ boolean editMode) {
+ String baseUnits = MSG.view_alert_definition_condition_editor_common_baseUnits();
+ final StaticTextItem baseUnitsItem = new StaticTextItem("baseUnits", baseUnits);
+ baseUnitsItem.setHoverWidth(200);
+ baseUnitsItem.setShowIfCondition(ifFunc);
+
+ metricDropDownMenu.addChangedHandler(new ChangedHandler() {
+ public void onChanged(ChangedEvent event) {
+ MeasurementDefinition measDef = getMeasurementDefinition(form.getValueAsString(metricDropDownMenu
+ .getName()));
+ baseUnitsItem.setValue(measDef.getUnits() == MeasurementUnits.NONE ? MSG
+ .view_alert_definition_condition_editor_common_baseUnits_none()
+ : measDef.getUnits() == MeasurementUnits.MILLISECONDS ? MeasurementUnits.SECONDS : measDef
+ .getUnits());
+ List<MeasurementUnits> availableUnits = measDef.getUnits().getFamilyUnits();
+ baseUnitsItem.setTooltip(MSG.view_alert_definition_condition_editor_common_baseUnits_availableUnits()
+ + (availableUnits.isEmpty() || availableUnits.get(0) == MeasurementUnits.NONE ? MSG
+ .view_alert_definition_condition_editor_common_baseUnits_none() : availableUnits));
+ }
+ });
+ // initialize the field with proper value
+ MeasurementUnits units = editMode ? existingCondition.getMeasurementDefinition().getUnits()
+ : ConditionEditor.this.resourceType.getMetricDefinitions().iterator().next().getUnits();
+ baseUnitsItem.setValue(units == MeasurementUnits.NONE ? MSG
+ .view_alert_definition_condition_editor_common_baseUnits_none()
+ : units == MeasurementUnits.MILLISECONDS ? MeasurementUnits.SECONDS : units);
+ List<MeasurementUnits> availableUnits = units.getFamilyUnits();
+ baseUnitsItem.setTooltip(MSG.view_alert_definition_condition_editor_common_baseUnits_availableUnits()
+ + (availableUnits.isEmpty() || availableUnits.get(0) == MeasurementUnits.NONE ? MSG
+ .view_alert_definition_condition_editor_common_baseUnits_none() : availableUnits));
+ return baseUnitsItem;
+ }
+
private StaticTextItem buildHelpTextItem(String itemName, String helpText, FormItemIfFunction ifFunc) {
StaticTextItem help = new StaticTextItem(itemName);
help.setShowTitle(false);
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties
index ed6b130..19d3fa5 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties
@@ -919,6 +919,9 @@ view_alert_definition_condition_editor_availabilityDuration_tooltip_duration = T
view_alert_definition_condition_editor_availability_tooltip = Specify the availability state change that will trigger the condition.
view_alert_definition_condition_editor_availability_value = Availability
view_alert_definition_condition_editor_common_avg = Average
+view_alert_definition_condition_editor_common_baseUnits = Base Units
+view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units:
+view_alert_definition_condition_editor_common_baseUnits_none = None
view_alert_definition_condition_editor_common_max = Maximum
view_alert_definition_condition_editor_common_min = Minimum
view_alert_definition_condition_editor_common_regex = Regular Expression
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties
index 1950003..f71f907 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties
@@ -933,6 +933,9 @@ view_alert_definition_condition_editor_availabilityDuration_tooltip_duration = P
view_alert_definition_condition_editor_availability_tooltip = Specifikujte stav dostupnosti, který poté splní podmínku.
view_alert_definition_condition_editor_availability_value = Dostupnost
view_alert_definition_condition_editor_common_avg = Průměr
+view_alert_definition_condition_editor_common_baseUnits = Základní jednotky
+view_alert_definition_condition_editor_common_baseUnits_availableUnits = Dostupné jednotky:
+view_alert_definition_condition_editor_common_baseUnits_none = Źádné
view_alert_definition_condition_editor_common_max = Maximum
view_alert_definition_condition_editor_common_min = Minimum
view_alert_definition_condition_editor_common_regex = Regulární výraz
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties
index bd8e0b4..524dcc0 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties
@@ -844,6 +844,9 @@ view_alert_definition_condition_editor_avilability_option_up = Wird verfügbar
view_alert_definition_condition_editor_avilability_tooltip = Geben Sie die Änderung der Verfügbarkeit an, die die Bedingung auslösen soll.
view_alert_definition_condition_editor_avilability_value = Verfügbarkeit
view_alert_definition_condition_editor_common_avg = Durchschnitt
+##view_alert_definition_condition_editor_common_baseUnits = Base Units
+##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units:
+##view_alert_definition_condition_editor_common_baseUnits_none = None
view_alert_definition_condition_editor_common_max = Maximum
view_alert_definition_condition_editor_common_min = Minimum
view_alert_definition_condition_editor_common_regex = Regulärer Ausdruck
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties
index 1c0005b..cb0c35f 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties
@@ -908,6 +908,9 @@ view_alert_definition_condition_editor_availabilityDuration_tooltip_duration =
view_alert_definition_condition_editor_availability_tooltip = 条件のトリガーとなるアベイラビリティ状態の変化を指定します
view_alert_definition_condition_editor_availability_value = アベイラビリティ
view_alert_definition_condition_editor_common_avg = 平均
+##view_alert_definition_condition_editor_common_baseUnits = Base Units
+##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units:
+##view_alert_definition_condition_editor_common_baseUnits_none = None
view_alert_definition_condition_editor_common_max = 最大
view_alert_definition_condition_editor_common_min = 最小
view_alert_definition_condition_editor_common_regex = 正規表現
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties
index 5118a2d..32dc73d 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties
@@ -797,6 +797,9 @@ view_alert_definition_condition_editor_availabilityDuration_tooltip_duration =
view_alert_definition_condition_editor_availability_tooltip = 조건 트리거하는 가용성 상태의 변화를 지정합니다.
view_alert_definition_condition_editor_availability_value = 가용성
view_alert_definition_condition_editor_common_avg = 평균
+##view_alert_definition_condition_editor_common_baseUnits = Base Units
+##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units:
+##view_alert_definition_condition_editor_common_baseUnits_none = None
view_alert_definition_condition_editor_common_max = 최대
view_alert_definition_condition_editor_common_min = 최소
view_alert_definition_condition_editor_common_regex = 정규 표현식
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties
index bfd95e8..f4419e1 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties
@@ -916,6 +916,9 @@ view_alert_common_tab_recovery = Recupera\u00E7\u00E3o
view_alert_definition_condition_editor_availability_tooltip = Especifica a mudan\u00E7a de estado na disponibilidade do recurso que ir\u00E1 disparar a condi\u00E7\u00E3o.
view_alert_definition_condition_editor_availability_value = Disponibilidade
view_alert_definition_condition_editor_common_avg = M\u00E9dio
+##view_alert_definition_condition_editor_common_baseUnits = Base Units
+##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units:
+##view_alert_definition_condition_editor_common_baseUnits_none = None
view_alert_definition_condition_editor_common_max = M\u00E1ximo
view_alert_definition_condition_editor_common_min = M\u00EDnimo
view_alert_definition_condition_editor_common_regex = Express\u00E3o Regular
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties
index e03e1a9..d75b76e 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties
@@ -881,6 +881,9 @@
#view_alert_definition_condition_editor_availability_tooltip = Specify the availability state change that will trigger the condition.
#view_alert_definition_condition_editor_availability_value = Availability
#view_alert_definition_condition_editor_common_avg = Average
+##view_alert_definition_condition_editor_common_baseUnits = Base Units
+##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units:
+##view_alert_definition_condition_editor_common_baseUnits_none = None
#view_alert_definition_condition_editor_common_max = Maximum
#view_alert_definition_condition_editor_common_min = Minimum
#view_alert_definition_condition_editor_delete_confirm = Delete the selected alert condition(s)?
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties
index 82ddc2a..a9a24df 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties
@@ -902,6 +902,9 @@ view_alert_common_tab_recovery = \u8fd8\u539f
view_alert_definition_condition_editor_availability_tooltip = Specify the availability state change that will trigger the condition.
view_alert_definition_condition_editor_availability_value = \u53ef\u7528\u6027
view_alert_definition_condition_editor_common_avg = \u5e73\u5747
+##view_alert_definition_condition_editor_common_baseUnits = Base Units
+##view_alert_definition_condition_editor_common_baseUnits_availableUnits = Available units:
+##view_alert_definition_condition_editor_common_baseUnits_none = None
view_alert_definition_condition_editor_common_max = \u6700\u5927
view_alert_definition_condition_editor_common_min = \u6700\u5c0f
view_alert_definition_condition_editor_common_regex = \u6b63\u5219\u8868\u8fbe\u5f0f
commit 1f6cf05d57599ded2d196241284b4b5167886c80
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Fri Jul 19 12:43:35 2013 +0200
[BZ 980091] - link in the alert to the parent on which the alert was created - Adding the link to AlertDetailsView if the alert is defined by parent definition. Parent definition could mean group alert definition, autogroup alert definition or template definition
diff --git a/modules/enterprise/gui/coregui/.externalToolBuilders/org.eclipse.wst.jsdt.core.javascriptValidator.launch b/modules/enterprise/gui/coregui/.externalToolBuilders/org.eclipse.wst.jsdt.core.javascriptValidator.launch
new file mode 100644
index 0000000..627021f
--- /dev/null
+++ b/modules/enterprise/gui/coregui/.externalToolBuilders/org.eclipse.wst.jsdt.core.javascriptValidator.launch
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<launchConfiguration type="org.eclipse.ant.AntBuilderLaunchConfigurationType">
+<booleanAttribute key="org.eclipse.ui.externaltools.ATTR_BUILDER_ENABLED" value="false"/>
+<stringAttribute key="org.eclipse.ui.externaltools.ATTR_DISABLED_BUILDER" value="org.eclipse.wst.jsdt.core.javascriptValidator"/>
+<mapAttribute key="org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS"/>
+<booleanAttribute key="org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED" value="true"/>
+</launchConfiguration>
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java
index 17dd6d6..d66baea 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDataSource.java
@@ -57,6 +57,7 @@ import org.rhq.core.domain.util.PageList;
import org.rhq.enterprise.gui.coregui.client.CoreGUI;
import org.rhq.enterprise.gui.coregui.client.ImageManager;
import org.rhq.enterprise.gui.coregui.client.LinkManager;
+import org.rhq.enterprise.gui.coregui.client.admin.templates.AlertDefinitionTemplateTypeView;
import org.rhq.enterprise.gui.coregui.client.components.form.DateFilterItem;
import org.rhq.enterprise.gui.coregui.client.components.table.TimestampCellFormatter;
import org.rhq.enterprise.gui.coregui.client.gwt.AlertGWTServiceAsync;
@@ -74,6 +75,8 @@ import org.rhq.enterprise.gui.coregui.client.util.RPCDataSource;
* @author John Mazzitelli
*/
public class AlertDataSource extends RPCDataSource<Alert, AlertCriteria> {
+
+ private static final String FIELD_PARENT = "parent"; // may be template or group alert def parent
public static final String PRIORITY_ICON_HIGH = ImageManager.getAlertIcon(AlertPriority.HIGH);
public static final String PRIORITY_ICON_MEDIUM = ImageManager.getAlertIcon(AlertPriority.MEDIUM);
@@ -359,6 +362,7 @@ public class AlertDataSource extends RPCDataSource<Alert, AlertCriteria> {
}
criteria.addFilterEntityContext(entityContext);
criteria.fetchConditionLogs(true);
+// criteria.fetchGroupAlertDefinition(true);
return criteria;
}
@@ -410,6 +414,22 @@ public class AlertDataSource extends RPCDataSource<Alert, AlertCriteria> {
record.setAttribute(AncestryUtil.RESOURCE_ANCESTRY, resource.getAncestry());
record.setAttribute(AncestryUtil.RESOURCE_TYPE_ID, resource.getResourceType().getId());
+ AlertDefinition groupAlertDefinition = alertDefinition.getGroupAlertDefinition();
+ Integer parentId = alertDefinition.getParentId();
+ if (groupAlertDefinition != null && groupAlertDefinition.getGroup() != null) {
+ boolean isAutogroup = groupAlertDefinition.getGroup().getAutoGroupParentResource() != null;
+ record.setAttribute(FIELD_PARENT, (isAutogroup ? "#Resource/AutoGroup/" : "#ResourceGroup/")
+ + groupAlertDefinition.getGroup().getId() + "/Alerts/Definitions/" + groupAlertDefinition.getId());
+ record.setLinkText(MSG.view_alert_definition_for_group());
+ } else if (parentId != null && parentId.intValue() != 0) {
+ record.setAttribute(
+ FIELD_PARENT,
+ LinkManager.getAdminTemplatesEditLink(AlertDefinitionTemplateTypeView.VIEW_ID.getName(), resource
+ .getResourceType().getId())
+ + "/" + parentId);
+ record.setLinkText(MSG.view_alert_definition_for_type());
+ }
+
Set<AlertConditionLog> conditionLogs = from.getConditionLogs();
String conditionText;
String conditionValue;
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java
index 68a0953..ed98d94 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/alert/AlertDetailsView.java
@@ -40,8 +40,12 @@ import com.smartgwt.client.widgets.tab.Tab;
import com.smartgwt.client.widgets.tab.TabSet;
import org.rhq.core.domain.alert.Alert;
+import org.rhq.core.domain.alert.AlertDefinition;
import org.rhq.core.domain.alert.notification.ResultState;
import org.rhq.core.domain.criteria.AlertCriteria;
+import org.rhq.core.domain.criteria.AlertDefinitionCriteria;
+import org.rhq.core.domain.criteria.ResourceGroupCriteria;
+import org.rhq.core.domain.resource.group.ResourceGroup;
import org.rhq.core.domain.util.PageList;
import org.rhq.enterprise.gui.coregui.client.BookmarkableView;
import org.rhq.enterprise.gui.coregui.client.CoreGUI;
@@ -83,7 +87,14 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie
@Override
public void onSuccess(PageList<Alert> result) {
Alert alert = result.get(0);
- show(alert);
+ Integer parentId = alert.getAlertDefinition().getParentId();
+ AlertDefinition groupAlertDefinition = alert.getAlertDefinition().getGroupAlertDefinition();
+ if (groupAlertDefinition != null || (parentId != null && parentId.intValue() != 0)) {
+ fetchDefinitionWithGroupAndTemplate(alert);
+ } else {
+ show(alert);
+ }
+
}
@Override
@@ -92,6 +103,24 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie
}
});
}
+
+ private void fetchDefinitionWithGroupAndTemplate(final Alert alert) {
+ AlertDefinitionCriteria criteria = new AlertDefinitionCriteria();
+ criteria.addFilterAlertId(alert.getId());
+ criteria.fetchGroupAlertDefinition(true);
+ criteria.fetchResourceType(true);
+ GWTServiceLookup.getAlertDefinitionService().findAlertDefinitionsByCriteria(criteria, new AsyncCallback<PageList<AlertDefinition>>() {
+ public void onSuccess(PageList<AlertDefinition> result) {
+ alert.getAlertDefinition().setGroupAlertDefinition(result.get(0).getGroupAlertDefinition());
+ alert.getAlertDefinition().setResourceType(result.get(0).getResourceType());
+ show(alert);
+ }
+
+ public void onFailure(Throwable caught) {
+ CoreGUI.getErrorHandler().handleError(MSG.view_alert_details_loadFailed(), caught);
+ }
+ });
+ }
private void show(Alert alert) {
destroyMembers();
@@ -102,7 +131,7 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie
addMember(getDetailsTabSet(record));
}
- private TabSet getDetailsTabSet(Record record) {
+ private TabSet getDetailsTabSet(ListGridRecord record) {
TabSet tabset = new NamedTabSet();
Tab generalTab = new NamedTab(new ViewName("general", MSG.view_alert_common_tab_general()));
@@ -121,7 +150,7 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie
return tabset;
}
- private DynamicForm getDetailsTableForAlert(Record record) {
+ private DynamicForm getDetailsTableForAlert(ListGridRecord record) {
DynamicForm form = new DynamicForm();
form.setNumCols(4);
form.setHeight("15%");
@@ -189,6 +218,13 @@ public class AlertDetailsView extends EnhancedVLayout implements BookmarkableVie
MSG.view_alert_details_field_resource_ancestry());
resourceAncestryItem.setValue(record.getAttribute("resourceAncestry"));
items.add(resourceAncestryItem);
+
+ String parentUrl = record.getAttribute("parent");
+ if (parentUrl != null) {
+ StaticTextItem parentItem = new StaticTextItem("parent", "Parent Definition");
+ parentItem.setValue(LinkManager.getHref(parentUrl, record.getLinkText()));
+ items.add(parentItem);
+ }
form.setItems(items.toArray(new FormItem[items.size()]));
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties
index 84e9c6e..ed6b130 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties
@@ -1080,6 +1080,7 @@ view_alert_definitions_update_failure = Alert definition update failed
view_alert_definitions_update_success = Alert definition successfully updated
view_alert_details_field_ack_at = Acknowledged at
view_alert_details_field_ack_by = Acknowledged by
+view_alert_details_field_parent_definition = Parent definition
view_alert_details_field_recovery_info = Recovery Info
view_alert_details_field_resource_ancestry = Resource Ancestry
view_alert_details_field_watched_resource = Watched Resource
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties
index d6a102b..1950003 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties
@@ -1094,6 +1094,7 @@ view_alert_definitions_update_failure = Nepodařilo se změnit definici výstrah
view_alert_definitions_update_success = Úspěšně změněna definice výstrahy
view_alert_details_field_ack_at = Potvrzeno v
view_alert_details_field_ack_by = Potvrzeno kým
+view_alert_details_field_parent_definition = Definice rodiče
view_alert_details_field_recovery_info = Informace o obnově
view_alert_details_field_resource_ancestry = Původ zdroje
view_alert_details_field_watched_resource = Sledovaný zdroj
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties
index 9237f76..1c0005b 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties
@@ -1069,6 +1069,7 @@ view_alert_definitions_update_failure = アラート定義の更新に失敗し
view_alert_definitions_update_success = アラート定義の更新に成功しました
view_alert_details_field_ack_at = 次の場所で確認済みです
view_alert_details_field_ack_by = 次の人によって確認済みです
+##view_alert_details_field_parent_definition = Parent definition
view_alert_details_field_recovery_info = リカバリ情報
##view_alert_details_field_resource_ancestry = Resource Ancestry
##view_alert_details_field_watched_resource = Watched Resource
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties
index bd34f9b..5118a2d 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties
@@ -935,6 +935,7 @@ view_alert_definitions_table_title_group = 그룹 경고 정의
view_alert_definitions_table_title_resource = 리소스 경고 정의
view_alert_details_field_ack_at = 다음 위치에서 확인했습니다
view_alert_details_field_ack_by = 다음 사람에 의해 확인되었습니다
+##view_alert_details_field_parent_definition = Parent definition
view_alert_details_field_recovery_info = 복구 정보
view_alert_details_field_resource_ancestry = 리소스 조상
view_alert_details_field_watched_resource = 관심 리소스
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties
index 1330eb7..bfd95e8 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties
@@ -1078,6 +1078,7 @@ view_alert_definitions_update_success = Defini\u00E7\u00E3o do alerta atualizada
view_alert_details_breadcrumb = Detalhes
view_alert_details_field_ack_at = Verificado em
view_alert_details_field_ack_by = Verificado por
+##view_alert_details_field_parent_definition = Parent definition
view_alert_details_field_recovery_info = Informa\u00E7\u00E3o de Recupera\u00E7\u00E3o
##view_alert_details_field_resource_ancestry = Resource Ancestry
##view_alert_details_field_watched_resource = Watched Resource
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties
index 72a287b..e03e1a9 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties
@@ -1042,6 +1042,7 @@
#view_alert_definitions_update_success = Alert definition successfully updated
#view_alert_details_field_ack_at = Acknowledged at
#view_alert_details_field_ack_by = Acknowledged by
+##view_alert_details_field_parent_definition = Parent definition
#view_alert_details_field_recovery_info = Recovery Info
#view_alert_details_loadFailed = Failed to fetch alert details
#view_alerts_ack_confirm = Acknowledge the selected alert(s)?
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties
index 10f9b0a..82ddc2a 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties
@@ -1063,6 +1063,7 @@ view_alert_definitions_update_failure = \u544a\u8b66\u5b9a\u4e49\u66f4\u65b0\u59
view_alert_definitions_update_success = \u544a\u8b66\u5b9a\u4e49\u66f4\u65b0\u6210\u529f
view_alert_details_field_ack_at = \u786e\u8ba4\u4e8e
view_alert_details_field_ack_by = \u786e\u8ba4\u8005
+##view_alert_details_field_parent_definition = Parent definition
view_alert_details_field_recovery_info = \u8fd8\u539f\u4fe1\u606f
##view_alert_details_field_resource_ancestry = Resource Ancestry
##view_alert_details_field_watched_resource = Watched Resource
commit 67c442b7baa5db4cdd3715fd89db9ea01b9e2fa8
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Fri Jul 19 12:41:13 2013 +0200
Removing deprecated annotation, adding orphanRemoval=true attribute instead.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java
index 4742c17..44a8eac 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/Alert.java
@@ -253,9 +253,8 @@ public class Alert implements Serializable {
@ManyToOne
private AlertDefinition alertDefinition;
- @OneToMany(mappedBy = "alert", cascade = CascadeType.ALL)
+ @OneToMany(mappedBy = "alert", cascade = CascadeType.ALL, orphanRemoval=true)
@OrderBy
- @org.hibernate.annotations.Cascade(org.hibernate.annotations.CascadeType.DELETE_ORPHAN)
// primary key
private Set<AlertConditionLog> conditionLogs = new LinkedHashSet<AlertConditionLog>();
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java
index 0f2d0b1..be98df5 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDampening.java
@@ -86,8 +86,7 @@ public class AlertDampening implements java.io.Serializable {
// This is required for cascade behavior. We want to be able to cascade delete the AlertDampeningEvents when an
// AlertDefinition is removed from the db, due to deleting a Resource from inventory.
- @OneToMany(mappedBy = "alertDefinition", cascade = { CascadeType.REFRESH, CascadeType.REMOVE })
- @org.hibernate.annotations.Cascade(org.hibernate.annotations.CascadeType.DELETE_ORPHAN)
+ @OneToMany(mappedBy = "alertDefinition", cascade = { CascadeType.REFRESH, CascadeType.REMOVE }, orphanRemoval = true)
private Set<AlertDampeningEvent> alertDampeningEvents = new HashSet<AlertDampeningEvent>();
protected AlertDampening() {
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java
index 02362ce..3445d9e 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/alert/AlertDefinition.java
@@ -293,7 +293,6 @@ public class AlertDefinition implements Serializable {
// do not cascade remove - group removal will be detaching children alert defs from the group def,
// and then letting the children be deleted slowly by existing alert def removal mechanisms
- @SuppressWarnings("unused")
@OneToMany(mappedBy = "groupAlertDefinition", fetch = FetchType.LAZY, cascade = { CascadeType.PERSIST })
@OrderBy
private Set<AlertDefinition> groupAlertDefinitionChildren = new LinkedHashSet<AlertDefinition>();
@@ -361,11 +360,10 @@ public class AlertDefinition implements Serializable {
// referencing AlertConditionLog records.
private Set<AlertCondition> conditions = new LinkedHashSet<AlertCondition>(1); // Most alerts will only have one condition.
- @OneToMany(mappedBy = "alertDefinition", cascade = CascadeType.ALL)
+ @OneToMany(mappedBy = "alertDefinition", cascade = CascadeType.ALL, orphanRemoval = true)
// Although similar to AlertCondition, we do use DELETE_ORPHAN here. The reason is because AlertNotificationLog
// does not refer back to the AlertNotification record and therefore the notification logs are not affected
// by the loss of the AlertNotification that spawned the notification.
- @org.hibernate.annotations.Cascade(org.hibernate.annotations.CascadeType.DELETE_ORPHAN)
private List<AlertNotification> alertNotifications = new ArrayList<AlertNotification>();
/**
commit 92a286d611085d96ba1710c442035445dda5318a
Author: Jirka Kremser <jkremser(a)redhat.com>
Date: Thu Jul 18 18:37:48 2013 +0200
[BZ 980091] - link in the alert to the parent on which the alert was created - Adding a new filter on AlertDefinitionCriteria (filter by alert id).
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java
index 854efaf..9d4e51d 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/criteria/AlertDefinitionCriteria.java
@@ -48,6 +48,7 @@ public class AlertDefinitionCriteria extends Criteria {
private String filterName;
private String filterDescription;
private AlertPriority filterPriority;
+ private Integer filterAlertId; // requires overrides
private NonBindingOverrideFilter filterAlertTemplateOnly; // requires overrides - finds only alert templates
private Integer filterAlertTemplateParentId; // requires overrides
private Integer filterAlertTemplateResourceTypeId; // requires overrides
@@ -75,6 +76,10 @@ public class AlertDefinitionCriteria extends Criteria {
private PageOrdering sortResourceName; // requires sort override
public AlertDefinitionCriteria() {
+ filterOverrides.put("alertId", ""
+ + "id IN ( SELECT alert.alertDefinition.id " //
+ + " FROM Alert alert " //
+ + " WHERE alert.id = ? )");
filterOverrides.put("alertTemplateOnly", "resourceType IS NOT NULL");
filterOverrides.put("alertTemplateParentId", "parentId = ?");
filterOverrides.put("alertTemplateResourceTypeId", "resourceType.id = ?");
@@ -113,6 +118,10 @@ public class AlertDefinitionCriteria extends Criteria {
public void addFilterAlertTemplateParentId(Integer filterAlertTemplateParentId) {
this.filterAlertTemplateParentId = filterAlertTemplateParentId;
}
+
+ public void addFilterAlertId(Integer filterAlertId) {
+ this.filterAlertId = filterAlertId;
+ }
public void addFilterAlertTemplateResourceTypeId(Integer filterAlertTemplateResourceTypeId) {
this.filterAlertTemplateResourceTypeId = filterAlertTemplateResourceTypeId;
commit b3aa6d8a54378fad077d2052e9f9620768723a4d
Author: Michael Burman <yak(a)iki.fi>
Date: Fri Jul 19 12:27:38 2013 +0200
BZ980076 Check if storage is really running after a crash
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java
index 8988d98..5528619 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/ControlCommand.java
@@ -1,7 +1,7 @@
/*
*
* * RHQ Management Platform
- * * Copyright (C) 2005-2012 Red Hat, Inc.
+ * * Copyright (C) 2005-2013 Red Hat, Inc.
* * All rights reserved.
* *
* * This program is free software; you can redistribute it and/or modify
@@ -41,6 +41,10 @@ import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
+import org.apache.commons.exec.DefaultExecutor;
+import org.apache.commons.exec.ExecuteException;
+import org.apache.commons.exec.Executor;
+import org.apache.commons.exec.PumpStreamHandler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -171,10 +175,15 @@ public abstract class ControlCommand {
return getStorageBasedir().exists();
}
- protected String getStoragePid() throws IOException {
+ protected File getStoragePidFile() {
File storageBasedir = getStorageBasedir();
File storageBinDir = new File(storageBasedir, "bin");
File pidFile = new File(storageBinDir, "cassandra.pid");
+ return pidFile;
+ }
+
+ protected String getStoragePid() throws IOException {
+ File pidFile = getStoragePidFile();
if (pidFile.exists()) {
return StreamUtil.slurp(new FileReader(pidFile));
@@ -282,4 +291,77 @@ public abstract class ControlCommand {
return inUse;
}
+
+ protected void waitForProcessToStop(String pid) throws Exception {
+
+ if (isWindows() || pid==null) {
+ // For the moment we have no better way to just wait some time
+ Thread.sleep(10*1000L);
+ } else {
+ int tries = 5;
+ while (tries > 0) {
+ log.debug(".");
+ if (!isUnixPidRunning(pid)) {
+ break;
+ }
+ Thread.sleep(2*1000L);
+ tries--;
+ }
+ if (tries==0) {
+ throw new RHQControlException("Process [" + pid + "] did not finish yet. Terminate it manually and retry.");
+ }
+ }
+
+ }
+
+ protected void killPid(String pid) throws IOException {
+ Executor executor = new DefaultExecutor();
+ executor.setWorkingDirectory(getBinDir());
+ executor.setStreamHandler(new PumpStreamHandler());
+ org.apache.commons.exec.CommandLine commandLine;
+
+ commandLine = new org.apache.commons.exec.CommandLine("kill").addArgument(pid);
+ executor.execute(commandLine);
+ }
+
+ protected boolean isUnixPidRunning(String pid) {
+
+ Executor executor = new DefaultExecutor();
+ executor.setWorkingDirectory(getBinDir());
+ executor.setStreamHandler(new PumpStreamHandler());
+ org.apache.commons.exec.CommandLine commandLine = new org.apache.commons.exec.CommandLine("/bin/kill")
+ .addArgument("-0")
+ .addArgument(pid);
+
+ try {
+ int code = executor.execute(commandLine);
+ if (code!=0) {
+ return false;
+ }
+ } catch (ExecuteException ee ) {
+ if (ee.getExitValue()==1) {
+ // return code 1 means process does not exist
+ return false;
+ }
+ } catch (IOException e) {
+ log.error("Checking for running process failed: " + e.getMessage());
+ }
+ return true;
+ }
+
+ protected boolean isStorageRunning() throws IOException {
+ String pid = getStoragePid();
+ if(pid == null) {
+ return false;
+ } else if(pid != null && !isUnixPidRunning(pid)) {
+ // There is a phantom pidfile
+ File pidFile = getStoragePidFile();
+ if(!pidFile.delete()) {
+ throw new RHQControlException("Could not delete storage pidfile " + pidFile.getAbsolutePath());
+ }
+ return false;
+ } else {
+ return true;
+ }
+ }
}
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
index fe7cdd4..8c885b0 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
@@ -1,7 +1,7 @@
/*
*
* * RHQ Management Platform
- * * Copyright (C) 2005-2012 Red Hat, Inc.
+ * * Copyright (C) 2005-2013 Red Hat, Inc.
* * All rights reserved.
* *
* * This program is free software; you can redistribute it and/or modify
@@ -36,16 +36,12 @@ import java.util.Properties;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.exec.DefaultExecuteResultHandler;
import org.apache.commons.exec.DefaultExecutor;
-import org.apache.commons.exec.ExecuteException;
import org.apache.commons.exec.Executor;
import org.apache.commons.exec.PumpStreamHandler;
-
import org.jboss.as.controller.client.ModelControllerClient;
-
import org.rhq.common.jbossas.client.controller.DeploymentJBossASClient;
import org.rhq.common.jbossas.client.controller.MCCHelper;
import org.rhq.server.control.ControlCommand;
-import org.rhq.server.control.RHQControlException;
/**
* Common code for commands that perform installs. Basically shared code for Install and Upgrade commands.
@@ -104,55 +100,6 @@ public abstract class AbstractInstall extends ControlCommand {
}
}
- protected void waitForProcessToStop(String pid) throws Exception {
-
- if (isWindows() || pid==null) {
- // For the moment we have no better way to just wait some time
- Thread.sleep(10*1000L);
- } else {
- int tries = 5;
- while (tries > 0) {
- log.debug(".");
- if (!isUnixPidRunning(pid)) {
- break;
- }
- Thread.sleep(2*1000L);
- tries--;
- }
- if (tries==0) {
- throw new RHQControlException("Process [" + pid + "] did not finish yet. Terminate it manually and retry.");
- }
- }
-
- }
-
- protected boolean isUnixPidRunning(String pid) {
-
- Executor executor = new DefaultExecutor();
- executor.setWorkingDirectory(getBinDir());
- executor.setStreamHandler(new PumpStreamHandler());
- org.apache.commons.exec.CommandLine commandLine;
-
- commandLine = new org.apache.commons.exec.CommandLine("/bin/kill")
- .addArgument("-0")
- .addArgument(pid);
-
- try {
- int code = executor.execute(commandLine);
- if (code!=0) {
- return false;
- }
- } catch (ExecuteException ee ) {
- if (ee.getExitValue()==1) {
- // return code 1 means process does not exist
- return false;
- }
- } catch (IOException e) {
- log.error("Checking for running process failed: " + e.getMessage());
- }
- return true;
- }
-
protected void waitForRHQServerToInitialize() throws Exception {
try {
final long messageInterval = 30000L;
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java
index 7e83c9e..f15b13e 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Start.java
@@ -1,7 +1,7 @@
/*
*
* * RHQ Management Platform
- * * Copyright (C) 2005-2012 Red Hat, Inc.
+ * * Copyright (C) 2005-2013 Red Hat, Inc.
* * All rights reserved.
* *
* * This program is free software; you can redistribute it and/or modify
@@ -26,7 +26,6 @@
package org.rhq.server.control.command;
import java.io.File;
-import java.io.FileReader;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Options;
@@ -137,14 +136,13 @@ public class Start extends ControlCommand {
log.debug("Failed to start storage service", e);
}
} else {
-
File storageBinDir = new File(getStorageBasedir(), "bin");
- File pidFile = new File(storageBinDir, "cassandra.pid");
+ File pidFile = getStoragePidFile();
// For now we are duplicating logic in the status command. This code will be
// replaced when we implement a rhq-storage.sh script.
- if (pidFile.exists()) {
- String pid = StreamUtil.slurp(new FileReader(pidFile));
+ if (isStorageRunning()) {
+ String pid = getStoragePid();
System.out.println("RHQ storage node (pid " + pid + ") is running");
} else {
commandLine = getCommandLine(false, "cassandra", "-p", pidFile.getAbsolutePath());
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java
index 53d0374..908aa8e 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Stop.java
@@ -1,7 +1,7 @@
/*
*
* * RHQ Management Platform
- * * Copyright (C) 2005-2012 Red Hat, Inc.
+ * * Copyright (C) 2005-2013 Red Hat, Inc.
* * All rights reserved.
* *
* * This program is free software; you can redistribute it and/or modify
@@ -126,13 +126,13 @@ public class Stop extends AbstractInstall {
log.debug("Failed to stop storage service", e);
}
} else {
- String pid = getStoragePid();
- if (pid != null) {
+ if(isStorageRunning()) {
+ String pid = getStoragePid();
+
System.out.println("Stopping RHQ storage node...");
System.out.println("RHQ storage node (pid=" + pid + ") is stopping...");
- commandLine = new org.apache.commons.exec.CommandLine("kill").addArgument(pid);
- executor.execute(commandLine);
+ killPid(pid);
waitForProcessToStop(pid);
commit bf8587569567d261791f272a672f5722d7dca8c0
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Thu Jul 18 23:37:57 2013 +0200
[as7] Fix domain api version discovery
Look for the first node of type element instead of simply calling getFirstChild
diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java
index 329246e..3c71135 100644
--- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java
+++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/helper/HostConfiguration.java
@@ -1,6 +1,6 @@
/*
* RHQ Management Platform
- * Copyright (C) 2012 Red Hat, Inc.
+ * Copyright (C) 2005-2013 Red Hat, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -13,11 +13,14 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
package org.rhq.modules.plugins.jbossas7.helper;
+import static org.rhq.core.util.StringUtil.EMPTY_STRING;
+import static org.w3c.dom.Node.ELEMENT_NODE;
+
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
@@ -33,6 +36,7 @@ import javax.xml.xpath.XPathFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.w3c.dom.Document;
+import org.w3c.dom.Node;
import org.rhq.core.pluginapi.util.CommandLineOption;
import org.rhq.modules.plugins.jbossas7.AS7CommandLine;
@@ -234,11 +238,15 @@ public class HostConfiguration {
}
public String getDomainApiVersion() {
-
- String version = document.getFirstChild().getAttributes().getNamedItem("xmlns").getTextContent();
-
- version = version.substring(version.lastIndexOf(':')+1);
- return version;
+ // Look for the first child node of type element (<host> in domain mode or <server> in standalone mode)
+ // We can't just call getFirstChild because first child could be a node of type comment
+ for (Node childNode = document.getFirstChild(); childNode != null; childNode = childNode.getNextSibling()) {
+ if (childNode.getNodeType() == ELEMENT_NODE) {
+ String xmlns = childNode.getAttributes().getNamedItem("xmlns").getTextContent();
+ return xmlns.substring(xmlns.lastIndexOf(':') + 1);
+ }
+ }
+ return EMPTY_STRING;
}
/**
commit 5283e5356d2ca4bda6e3aeea9d323c186030baa9
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Thu Jul 18 09:41:39 2013 +0200
Bug 969621 - EAP 6 managed plug-in is unable to discover EAP servers when more then one is running on a single host
diff --git a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java
index cc13406..cd9f276 100644
--- a/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java
+++ b/modules/plugins/jboss-as-7/src/main/java/org/rhq/modules/plugins/jbossas7/BaseProcessDiscovery.java
@@ -1,6 +1,6 @@
/*
* RHQ Management Platform
- * Copyright (C) 2005-2012 Red Hat, Inc.
+ * Copyright (C) 2005-2013 Red Hat, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -13,8 +13,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
package org.rhq.modules.plugins.jbossas7;
@@ -42,6 +42,7 @@ import org.rhq.core.domain.configuration.Configuration;
import org.rhq.core.domain.configuration.PropertyList;
import org.rhq.core.domain.configuration.PropertyMap;
import org.rhq.core.domain.configuration.PropertySimple;
+import org.rhq.core.domain.resource.ResourceUpgradeReport;
import org.rhq.core.pluginapi.event.log.LogFileEventResourceComponentHelper;
import org.rhq.core.pluginapi.inventory.DiscoveredResourceDetails;
import org.rhq.core.pluginapi.inventory.InvalidPluginConfigurationException;
@@ -49,6 +50,8 @@ import org.rhq.core.pluginapi.inventory.ManualAddFacet;
import org.rhq.core.pluginapi.inventory.ProcessScanResult;
import org.rhq.core.pluginapi.inventory.ResourceDiscoveryComponent;
import org.rhq.core.pluginapi.inventory.ResourceDiscoveryContext;
+import org.rhq.core.pluginapi.upgrade.ResourceUpgradeContext;
+import org.rhq.core.pluginapi.upgrade.ResourceUpgradeFacet;
import org.rhq.core.pluginapi.util.CommandLineOption;
import org.rhq.core.pluginapi.util.FileUtils;
import org.rhq.core.pluginapi.util.JavaCommandLine;
@@ -66,12 +69,15 @@ import org.rhq.modules.plugins.jbossas7.json.Result;
* Abstract base discovery component for the two server types - "JBossAS7 Host Controller" and
* "JBossAS7 Standalone Server".
*/
-public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent, ManualAddFacet {
+public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent, ManualAddFacet, ResourceUpgradeFacet {
private static final String JBOSS_AS_PREFIX = "jboss-as-";
private static final String JBOSS_EAP_PREFIX = "jboss-eap-";
private static final String WILDFLY_PREFIX = "wildfly-";
+ private static final String LOCAL_RESOURCE_KEY_PREFIX = "hostConfig: ";
+ private static final String REMOTE_RESOURCE_KEY_PREFIX = "hostPort: ";
+
private static final String HOME_DIR_SYSPROP = "jboss.home.dir";
private static final String RHQADMIN = "rhqadmin";
@@ -202,7 +208,7 @@ public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent
setStartScriptPluginConfigProps(process, commandLine, pluginConfig, agentProcess);
setUserAndPasswordPluginConfigProps(serverPluginConfig, hostConfig, baseDir);
- String key = baseDir.getPath();
+ String key = createKeyForLocalResource(serverPluginConfig);
HostPort hostPort = hostConfig.getDomainControllerHostPort(commandLine);
String name = buildDefaultResourceName(hostPort, managementHostPort, productType);
String description = buildDefaultResourceDescription(hostPort, productType);
@@ -455,7 +461,7 @@ public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent
HostPort managementHostPort = new HostPort(false);
managementHostPort.host = hostname;
managementHostPort.port = port;
- String key = hostname + ":" + port;
+ String key = createKeyForRemoteResource(hostname + ":" + port);
String name = buildDefaultResourceName(hostPort, managementHostPort, productType);
String version = productInfo.getProductVersion();
String description = buildDefaultResourceDescription(hostPort, productType);
@@ -469,6 +475,43 @@ public abstract class BaseProcessDiscovery implements ResourceDiscoveryComponent
return detail;
}
+ @Override
+ public ResourceUpgradeReport upgrade(ResourceUpgradeContext inventoriedResource) {
+ String currentResourceKey = inventoriedResource.getResourceKey();
+ Configuration pluginConfiguration = inventoriedResource.getPluginConfiguration();
+ ServerPluginConfiguration serverPluginConfiguration = new ServerPluginConfiguration(pluginConfiguration);
+
+ if (currentResourceKey.startsWith(LOCAL_RESOURCE_KEY_PREFIX)
+ || currentResourceKey.startsWith(REMOTE_RESOURCE_KEY_PREFIX)) {
+ // Resource key already in right format
+ return null;
+ }
+
+ ResourceUpgradeReport report = new ResourceUpgradeReport();
+
+ if (new File(currentResourceKey).isDirectory()) {
+ // Old key format for a local resource (key is base dir)
+ report.setNewResourceKey(createKeyForLocalResource(serverPluginConfiguration));
+ } else if (currentResourceKey.contains(":")) {
+ // Old key format for a remote (manually added) resource (key is base dir)
+ report.setNewResourceKey(createKeyForRemoteResource(currentResourceKey));
+ } else {
+ log.warn("Unknown format, cannot upgrade resource key [" + currentResourceKey + "]");
+ return null;
+ }
+
+ return report;
+ }
+
+ private String createKeyForRemoteResource(String hostPort) {
+ return REMOTE_RESOURCE_KEY_PREFIX + hostPort;
+ }
+
+ private String createKeyForLocalResource(ServerPluginConfiguration serverPluginConfiguration) {
+ return LOCAL_RESOURCE_KEY_PREFIX
+ + serverPluginConfiguration.getHostConfigFile().getAbsolutePath();
+ }
+
private <T>T getServerAttribute(ASConnection connection, String attributeName) {
Operation op = new ReadAttribute(null, attributeName);
Result res = connection.execute(op);
commit e6fa8c734c2bfee9ad46a2b7683d4c86b4897e5c
Author: John Sanda <jsanda(a)redhat.com>
Date: Thu Jul 18 07:23:34 2013 -0400
take 2 at dependency clean up
JNA libraries were getting pulled into the ear as transitive dependencies. JNA
is used by the cassandra-ccm-core module to shutdown Cassandra. This
functionality though is only used in test code.
diff --git a/modules/enterprise/server/jar/pom.xml b/modules/enterprise/server/jar/pom.xml
index c648685..f418afc 100644
--- a/modules/enterprise/server/jar/pom.xml
+++ b/modules/enterprise/server/jar/pom.xml
@@ -77,6 +77,14 @@
<groupId>org.rhq</groupId>
<artifactId>rhq-core-plugin-api</artifactId>
</exclusion>
+ <exclusion>
+ <groupId>net.java.dev.jna</groupId>
+ <artifactId>jna</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>net.java.dev.jna</groupId>
+ <artifactId>platform</artifactId>
+ </exclusion>
</exclusions>
</dependency>
diff --git a/modules/enterprise/server/server-metrics/pom.xml b/modules/enterprise/server/server-metrics/pom.xml
index 2245978..97496f7 100644
--- a/modules/enterprise/server/server-metrics/pom.xml
+++ b/modules/enterprise/server/server-metrics/pom.xml
@@ -47,18 +47,6 @@
<dependencies>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>rhq-cassandra-ccm-core</artifactId>
- <version>${project.version}</version>
- <exclusions>
- <exclusion>
- <groupId>org.apache.cassandra</groupId>
- <artifactId>cassandra-clientutil</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>${project.groupId}</groupId>
<artifactId>rhq-cassandra-ccm-testng</artifactId>
<version>${project.version}</version>
<scope>test</scope>
commit 375f6f66fcc9214cc3d13605c17859ac11e3fd45
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Thu Jul 18 05:04:00 2013 -0500
Temporary fix for the server jar name for dev profile (it was still using the old name).
diff --git a/modules/enterprise/server/jar/pom.xml b/modules/enterprise/server/jar/pom.xml
index 7c236c7..c648685 100644
--- a/modules/enterprise/server/jar/pom.xml
+++ b/modules/enterprise/server/jar/pom.xml
@@ -776,7 +776,7 @@
<properties>
<rhq.rootDir>../../../..</rhq.rootDir>
<rhq.containerDir>${rhq.rootDir}/${rhq.devContainerServerPath}</rhq.containerDir>
- <rhq.deploymentName>${project.build.finalName}-ejb3.jar</rhq.deploymentName>
+ <rhq.deploymentName>rhq-server.jar</rhq.deploymentName>
<rhq.deploymentDir>${rhq.containerDir}/${rhq.earDeployDir}/${rhq.deploymentName}</rhq.deploymentDir>
</properties>
commit b59d1684eb12c4e1c5a78309b4ec40eeb461b7d5
Author: John Sanda <jsanda(a)redhat.com>
Date: Wed Jul 17 20:17:14 2013 -0400
fix test ear deployment error that resulted from renaming server jar
The ear deployment was failing because classes were not getting added to the
rhq-server.jar in the test ear.
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java
index 4dfbd46..2fd8624 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/AbstractEJB3Test.java
@@ -341,7 +341,7 @@ public abstract class AbstractEJB3Test extends Arquillian {
// merge rhq.ear into testEar but include only the EJB jars and the supporting libraries. Note that we
// don't include the services sar because tests are responsible for prepare/unprepare of all required services,
// we don't want the production services performing any unexpected work.
- testEar = testEar.merge(rhqEar, Filters.include("/lib.*|/rhq.*ejb3\\.jar.*"));
+ testEar = testEar.merge(rhqEar, Filters.include("/lib.*|/rhq.*ejb3\\.jar.*|/rhq-server.jar.*"));
// remove startup beans and shutdown listeners, we don't want this to be a full server deployment. The tests
// start/stop what they need, typically with test services or mocks.
testEar.delete(ArchivePaths
@@ -413,7 +413,7 @@ public abstract class AbstractEJB3Test extends Arquillian {
//System.out.println("** The Deployment EAR: " + testEar.toString(true) + "\n");
// Save the test EAR to a zip file for inspection (set file explicitly)
- //exportZip(testEar, new File("c:/temp/test-ear.ear"));
+ exportZip(testEar, new File("/Users/jsanda/tmp/test-ear.ear"));
return testEar;
}
commit c855df20b787a6db0a7b9aed7b1fb5597eb0e2d6
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Wed Jul 17 17:09:55 2013 -0400
Work on the windows rhq48 cassandra upgrade patch
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat
index e0312dc..c190503 100644
--- a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat
+++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.bat
@@ -1,14 +1,54 @@
@echo off
+
+rem ===========================================================================
+rem RHQ Storage Node (Cassandra) Windows 4.8.0 upgrade patch script
+rem
+rem WHO NEEDS TO RUN THE PATCH?
+rem
+rem Run this patch script if you are:
+rem - Running RHQ 4.8.0 on Windows
+rem - Planning to upgrade and maintain your Storage node data
+rem
+rem PREQUISITES:
+rem
+rem This patch requires the installation of Python. Download Python 2.7.5 from:
+rem
+rem http://www.python.org/download/releases/2.7.5/
+rem
+rem Install as directed. Note that Python will need to be on your PATH to run this patch. Also,
+rem this patch will not work with Python3.
+rem
+rem Edit <rhq-install-dir>\rhq-storage\conf\cassandra.yaml to ensure the following is true:
+rem
+rem start_rpc: true
+rem
+rem Note the setting of rpc_port. By default it is 9160, which is fine.
+rem
+rem RUNNING THE PATCH:
+rem
+rem > cd <patch-dir>
+rem > rhq48-storage-patch.bat <rhq-480-server-dir> <storage-node-ip-address> <thrift-rpc-port> <jmx-port>
+rem
+rem For example:
+rem > rhq48-storage-patch.bat c:\rhq-server-4.8.0 127.0.0.1 9160 7299
+rem
+rem Review the output carefully. There should be no errors (be careful, the script may still have completed).
+rem If errors are encountered fix the issue and rerun the patch.
+rem
+rem When done, you can again edit cassandra.yaml and reset start_rpc: false
+rem
+rem ===========================================================================
+
setlocal
if /i "%4" == "" (
- echo Usage: rhq48-storage-patch.bat ^<rhq-server-dir^> ^<storage-ip-address^> ^<cql-port^> ^<jmx-port^>
+ echo Usage: rhq48-storage-patch.bat ^<rhq-server-dir^> ^<storage-ip-address^> ^<thrift-rpc-port^> ^<jmx-port^>
exit /B 1
)
set RHQ_SERVER_DIR=%1
-set CQL_HOSTNAME=%2
-set CQL_PORT=%3
+set CQLSH_HOST=%2
+set CQLSH_PORT=%3
set JMX_PORT=%4
set USERNAME="rhqadmin"
set PASSWORD="rhqadmin"
@@ -29,7 +69,7 @@ echo Waiting for RHQ Storage Node to start up..
rem Sleep is not implemented in all Windows prompts, this one won't work in Vista
choice /n /c y /d y /t 3
-set RHQ_STORAGE_BIN=%RHQ_SERVER_DIR%\rhq-storage\bin\
+set RHQ_STORAGE_BIN=%RHQ_SERVER_DIR%\rhq-storage\bin
set CQLSH_PATH=%RHQ_STORAGE_BIN%\cqlsh
set NODETOOL_PATH=%RHQ_STORAGE_BIN%\nodetool.bat
commit 11b8ae9cff9ca5085bfe317a01653cc8b81d8bb7
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Wed Jul 17 14:14:02 2013 -0500
Initial implementation for the Storage Node configuration composite. The final resource operation still needs to be implemented but the rest of the functionality is implemented.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java
index 14043db..575edc74 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNode.java
@@ -90,7 +90,7 @@ public class StorageNode implements Serializable {
public static final long serialVersionUID = 1L;
public static final String QUERY_FIND_ALL = "StorageNode.findAll";
- public static final String QUERY_FIND_BY_ADDRESS = "StorageNode.findByName";
+ public static final String QUERY_FIND_BY_ADDRESS = "StorageNode.findByAddress";
public static final String QUERY_FIND_ALL_NOT_INSTALLED = "StorageNode.findAllCloudMembers";
public static final String QUERY_DELETE_BY_ID = "StorageNode.deleteById";
public static final String QUERY_FIND_ALL_NORMAL = "StorageNode.findAllNormalCloudMembers";
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java
new file mode 100644
index 0000000..e2c64f9
--- /dev/null
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeConfigurationComposite.java
@@ -0,0 +1,98 @@
+/*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation, and/or the GNU Lesser
+ * General Public License, version 2.1, also as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License and the GNU Lesser General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * and the GNU Lesser General Public License along with this program;
+ * if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+package org.rhq.core.domain.cloud;
+
+import java.io.Serializable;
+
+/**
+ * @author Stefan Negrea
+ */
+public class StorageNodeConfigurationComposite implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private StorageNode storageNode;
+ private int jmxPort;
+ private String heapSize;
+
+ public StorageNodeConfigurationComposite() {
+ // GWT needs this
+ }
+
+ public StorageNodeConfigurationComposite(StorageNode storageNode) {
+ this.storageNode = storageNode;
+ }
+
+ /**
+ * @return associated storage node
+ */
+ public StorageNode getStorageNode() {
+ return storageNode;
+ }
+
+ /**
+ * @param storageNode storage node
+ */
+ protected void setStorageNode(StorageNode storageNode) {
+ this.storageNode = storageNode;
+ }
+
+
+ /**
+ * @return the JMX port
+ */
+ public int getJmxPort() {
+ return jmxPort;
+ }
+
+ /**
+ * @param jmxPort JMX port to set
+ */
+ public void setJmxPort(int jmxPort) {
+ this.jmxPort = jmxPort;
+ }
+
+ /**
+ * @return the heap size
+ */
+ public String getHeapSize() {
+ return heapSize;
+ }
+
+ /**
+ * @param heapSize heap size to set
+ */
+ public void setHeapSize(String heapSize) {
+ this.heapSize = heapSize;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("storageNode.addresss=").append(storageNode.getAddress()).append(", ");
+ builder.append("heapSize=").append(heapSize).append(", ");
+ builder.append("jmxPort=").append(jmxPort).append("");
+ return builder.toString();
+ }
+}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index fa51fe1..3f1ec69 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -51,14 +51,18 @@ import org.rhq.core.domain.authz.Permission;
import org.rhq.core.domain.cloud.Server;
import org.rhq.core.domain.cloud.StorageNode;
import org.rhq.core.domain.cloud.StorageNode.OperationMode;
+import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite;
import org.rhq.core.domain.cloud.StorageNodeLoadComposite;
import org.rhq.core.domain.common.JobTrigger;
import org.rhq.core.domain.configuration.Configuration;
import org.rhq.core.domain.criteria.AlertCriteria;
import org.rhq.core.domain.criteria.ResourceGroupCriteria;
+import org.rhq.core.domain.criteria.ResourceOperationHistoryCriteria;
import org.rhq.core.domain.criteria.StorageNodeCriteria;
import org.rhq.core.domain.measurement.MeasurementAggregate;
import org.rhq.core.domain.measurement.MeasurementUnits;
+import org.rhq.core.domain.operation.OperationRequestStatus;
+import org.rhq.core.domain.operation.ResourceOperationHistory;
import org.rhq.core.domain.operation.bean.GroupOperationSchedule;
import org.rhq.core.domain.resource.Resource;
import org.rhq.core.domain.resource.ResourceType;
@@ -72,6 +76,7 @@ import org.rhq.enterprise.server.auth.SubjectManagerLocal;
import org.rhq.enterprise.server.authz.RequiredPermission;
import org.rhq.enterprise.server.authz.RequiredPermissions;
import org.rhq.enterprise.server.cloud.instance.ServerManagerLocal;
+import org.rhq.enterprise.server.configuration.ConfigurationManagerLocal;
import org.rhq.enterprise.server.measurement.MeasurementDataManagerLocal;
import org.rhq.enterprise.server.operation.OperationManagerLocal;
import org.rhq.enterprise.server.resource.ResourceTypeManagerLocal;
@@ -96,6 +101,11 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
private static final String RHQ_STORAGE_JMX_PORT_PROPERTY = "jmxPort";
private static final String RHQ_STORAGE_ADDRESS_PROPERTY = "host";
+ private static final int OPERATION_QUERY_TIMEOUT = 1000;
+ private static final int MAX_ITERATIONS = 5;
+ private static final String UPDATE_CONFIGURATION_OPERATION = "updateConfiguration";
+ private static final String RESTART_OPERATION = "restart";
+
@PersistenceContext(unitName = RHQConstants.PERSISTENCE_UNIT_NAME)
private EntityManager entityManager;
@@ -120,6 +130,9 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@EJB
private AlertManagerLocal alertManager;
+ @EJB
+ private ConfigurationManagerLocal configurationManager;
+
@Override
public void linkResource(Resource resource) {
List<StorageNode> storageNodes = this.getStorageNodes();
@@ -438,6 +451,19 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
entityManager.flush();
}
+ private StorageNode findStorageNodeByAddress(String address) {
+ TypedQuery<StorageNode> query = entityManager.<StorageNode> createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS,
+ StorageNode.class);
+ query.setParameter("address", address);
+ List<StorageNode> result = query.getResultList();
+
+ if (result != null && result.size() > 0) {
+ return result.get(0);
+ }
+
+ return null;
+ }
+
private StorageNodeLoadComposite.MeasurementAggregateWithUnits getMeasurementAggregateWithUnits(Subject subject,
int schedId, MeasurementUnits units, long beginTime, long endTime) {
MeasurementAggregate measurementAggregate = measurementManager.getAggregate(subject, schedId, beginTime,
@@ -559,4 +585,91 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return resourceIdsWithAlertDefinitions.toArray(new Integer[resourceIdsWithAlertDefinitions.size()]);
}
+
+ @Override
+ public StorageNodeConfigurationComposite retrieveConfiguration(Subject subject, StorageNode storageNode) {
+ StorageNodeConfigurationComposite configuration = new StorageNodeConfigurationComposite(storageNode);
+
+ if (storageNode != null && storageNode.getResource() != null) {
+ Resource storageNodeResource = storageNode.getResource();
+ Configuration storageNodeConfiguration = configurationManager.getResourceConfiguration(subject,
+ storageNodeResource.getId());
+
+ configuration.setHeapSize(storageNodeConfiguration.getSimpleValue("maxHeapSize"));
+ configuration.setJmxPort(storageNode.getJmxPort());
+ }
+
+ return configuration;
+ }
+
+ @Override
+ public boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration) {
+ StorageNode storageNode = findStorageNodeByAddress(storageNodeConfiguration.getStorageNode().getAddress());
+
+ if (storageNode != null && storageNode.getResource() != null) {
+ Resource storageNodeResource = storageNode.getResource();
+ Configuration parameters = new Configuration();
+ parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + "");
+ parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + "");
+
+ boolean updateConfigurationResult = runOperationAndWaitForResult(subject, storageNodeResource,
+ UPDATE_CONFIGURATION_OPERATION, parameters);
+
+ if (updateConfigurationResult) {
+ boolean restartResult = runOperationAndWaitForResult(subject, storageNodeResource, RESTART_OPERATION,
+ null);
+
+ if (restartResult) {
+ storageNode.setJmxPort(storageNodeConfiguration.getJmxPort());
+ entityManager.persist(storageNode);
+
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ private boolean runOperationAndWaitForResult(Subject subject, Resource storageNodeResource, String operationToRun,
+ Configuration parameters) {
+
+ //scheduling the operation
+ long operationStartTime = System.currentTimeMillis();
+ operationManager.scheduleResourceOperation(subject, storageNodeResource.getId(), operationToRun, 0, 0, 0, 0,
+ parameters, "Run by StorageNodeManagerBean");
+
+ //waiting for the operation result then return it
+ int iteration = 0;
+ boolean successResultFound = false;
+ while (iteration < MAX_ITERATIONS && !successResultFound) {
+ ResourceOperationHistoryCriteria criteria = new ResourceOperationHistoryCriteria();
+ criteria.addFilterResourceIds(storageNodeResource.getId());
+ criteria.addFilterStartTime(operationStartTime);
+ criteria.addFilterOperationName(operationToRun);
+ criteria.addFilterStatus(OperationRequestStatus.SUCCESS);
+ criteria.setPageControl(PageControl.getUnlimitedInstance());
+
+ PageList<ResourceOperationHistory> results = operationManager.findResourceOperationHistoriesByCriteria(
+ subject, criteria);
+
+ if (results != null && results.size() > 0) {
+ successResultFound = true;
+ }
+
+ if (successResultFound) {
+ break;
+ } else {
+ try {
+ Thread.sleep(OPERATION_QUERY_TIMEOUT);
+ } catch (Exception e) {
+ log.error(e);
+ }
+ }
+
+ iteration++;
+ }
+
+ return successResultFound;
+ }
}
\ No newline at end of file
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
index a9b2514..6fca820 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
@@ -25,6 +25,7 @@ import javax.ejb.Local;
import org.rhq.core.domain.alert.Alert;
import org.rhq.core.domain.auth.Subject;
import org.rhq.core.domain.cloud.StorageNode;
+import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite;
import org.rhq.core.domain.cloud.StorageNodeLoadComposite;
import org.rhq.core.domain.criteria.StorageNodeCriteria;
import org.rhq.core.domain.resource.Resource;
@@ -56,6 +57,10 @@ public interface StorageNodeManagerLocal {
*/
StorageNodeLoadComposite getLoad(Subject subject, StorageNode node, long beginTime, long endTime);
+ StorageNodeConfigurationComposite retrieveConfiguration(Subject subject, StorageNode storageNode);
+
+ boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration);
+
/**
* Fetches the list of StorageNode entities based on provided criteria.
*
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java
index 72432db..7be1b07 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java
@@ -23,6 +23,7 @@ import javax.ejb.Remote;
import org.rhq.core.domain.alert.Alert;
import org.rhq.core.domain.auth.Subject;
import org.rhq.core.domain.cloud.StorageNode;
+import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite;
import org.rhq.core.domain.cloud.StorageNodeLoadComposite;
import org.rhq.core.domain.criteria.StorageNodeCriteria;
import org.rhq.core.domain.util.PageList;
@@ -48,6 +49,10 @@ public interface StorageNodeManagerRemote {
*/
StorageNodeLoadComposite getLoad(Subject subject, StorageNode node, long beginTime, long endTime);
+ StorageNodeConfigurationComposite retrieveConfiguration(Subject subject, StorageNode storageNode);
+
+ boolean updateConfiguration(Subject subject, StorageNodeConfigurationComposite storageNodeConfiguration);
+
/**
* <p>Fetches the list of {@link StorageNode} entities based on provided criteria.</p>
*
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java
index 6b1940d..54ca4c2 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/scheduler/jobs/StorageNodeMaintenanceJob.java
@@ -71,7 +71,6 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
private final static String RUN_REPAIR_PROPERTY = "runRepair";
private final static String UPDATE_SEEDS_LIST = "updateSeedsList";
private final static String SEEDS_LIST = "seedsList";
- private final static String SUCCEED_PROPERTY = "succeed";
private static final String USERNAME_PROP = "rhq.cassandra.username";
private static final String PASSWORD_PROP = "rhq.cassandra.password";
@@ -169,8 +168,6 @@ public class StorageNodeMaintenanceJob extends AbstractStatefulJob {
List<Resource> childResources = LookupUtil.getResourceManager().findResourcesByCriteria(
LookupUtil.getSubjectManager().getOverlord(), c);
-
-
for (Resource childResource : childResources) {
if (STORAGE_SERVICE.equals(childResource.getName())) {
try {
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
index 6194146..380da65 100644
--- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
+++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
@@ -82,11 +82,19 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
return prepareForUpgrade(parameters);
} else if (name.equals("readRepair")) {
return readRepair();
+ } else if (name.equals("updateConfiguration")) {
+ return updateConfiguration(parameters);
} else {
return super.invokeOperation(name, parameters);
}
}
+ private OperationResult updateConfiguration(Configuration params) {
+ OperationResult result = new OperationResult("Configuration updated.");
+ //TODO: implement updates to various sub-resources here
+ return result;
+ }
+
private OperationResult nodeAdded(Configuration params) {
boolean runRepair = params.getSimple("runRepair").getBooleanValue();
boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue();
@@ -222,12 +230,12 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
}
return result;
}
-
+
private OperationResult prepareForUpgrade(Configuration parameters) throws Exception {
EmsConnection emsConnection = getEmsConnection();
EmsBean storageService = emsConnection.getBean("org.apache.cassandra.db:type=StorageService");
Class<?>[] emptyParams = new Class<?>[0];
-
+
if (log.isDebugEnabled()) {
log.debug("Disabling native transport...");
}
@@ -249,7 +257,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
snapshotName = System.currentTimeMillis() + "";
}
operation.invoke(snapshotName, new String[] {});
-
+
// max 2 sec
waitForTaskToComplete(500, 10, 150);
@@ -261,7 +269,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
return new OperationResult();
}
-
+
private void waitForTaskToComplete(int initialWaiting, int maxTries, int sleepMillis) {
// initial waiting
try {
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
index 5a60d19..6ed31b7 100644
--- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
@@ -73,7 +73,7 @@
</c:list-property>
</results>
</operation>
-
+
<operation name="addNodeMaintenance">
<parameters>
<c:simple-property name="runRepair" type="boolean" default="true"/>
@@ -111,6 +111,16 @@
</parameters>
</operation>
+ <operation name="updateConfiguration" description="Updates the node configuration. Will require a separate server restart for the settings to take effect.">
+ <parameters>
+ <c:simple-property name="heapSize" type="string" description="The heap size to be used for both -Xms and -Xmx JVM options."/>
+ <c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/>
+ </parameters>
+ <results>
+ <c:simple-property name="operationResult" description="Outcome of updating the configuration."/>
+ </results>
+ </operation>
+
<resource-configuration>
<c:group name="MemorySettings">
<c:simple-property name="minHeapSize"
@@ -195,3 +205,4 @@
<service name="Keyspace" sourcePlugin="Cassandra" sourceType="Keyspace"/>
</server>
</plugin>
+
commit b268d00034cb7af8d929006e8ac4688eeae58549
Author: John Sanda <jsanda(a)redhat.com>
Date: Wed Jul 17 13:10:58 2013 -0400
Revert "cleaning up dependencies"
This reverts commit e798901b83e37cd267a2d11489664569918ef90d.
diff --git a/modules/common/cassandra-schema/pom.xml b/modules/common/cassandra-schema/pom.xml
index 55dedb8..8e3b44a 100644
--- a/modules/common/cassandra-schema/pom.xml
+++ b/modules/common/cassandra-schema/pom.xml
@@ -15,6 +15,12 @@
<dependencies>
<dependency>
<groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-ccm-core</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
<artifactId>rhq-cassandra-util</artifactId>
<version>${project.version}</version>
</dependency>
diff --git a/modules/enterprise/server/server-metrics/pom.xml b/modules/enterprise/server/server-metrics/pom.xml
index 97496f7..2245978 100644
--- a/modules/enterprise/server/server-metrics/pom.xml
+++ b/modules/enterprise/server/server-metrics/pom.xml
@@ -47,6 +47,18 @@
<dependencies>
<dependency>
<groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-ccm-core</artifactId>
+ <version>${project.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.cassandra</groupId>
+ <artifactId>cassandra-clientutil</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
<artifactId>rhq-cassandra-ccm-testng</artifactId>
<version>${project.version}</version>
<scope>test</scope>
commit a05f561f5fcac94b3308937b07d3b1e1e3f84d58
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Wed Jul 17 16:59:03 2013 +0200
Print simpler log message when component invocation is interrupted and log level is higher than debug
diff --git a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java
index a9fe1c2..3669947 100644
--- a/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java
+++ b/modules/core/plugin-container/src/main/java/org/rhq/core/pc/inventory/ResourceContainer.java
@@ -671,8 +671,19 @@ public class ResourceContainer implements Serializable {
public void markContextInterrupted() {
localContext.markInterrupted();
- LOG.warn("Invocation has been marked interrupted for method [" + method + "] on resource ["
- + resourceContainer.getResource() + "]");
+ LOG.warn(getContextInterruptedWarningMessage(LOG.isDebugEnabled()));
+ }
+
+ private String getContextInterruptedWarningMessage(boolean detailed) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Invocation has been marked interrupted for method [");
+ if (detailed) {
+ sb.append(method.toGenericString());
+ } else {
+ sb.append(method.getDeclaringClass().getSimpleName()).append(".").append(method.getName());
+ }
+ sb.append("] on resource [").append(resourceContainer.getResource()).append("]");
+ return sb.toString();
}
}
}
10 years, 10 months
[rhq] Branch 'mtho11/consolidated-metrics' - 19 commits - etc/scripts modules/common modules/core modules/enterprise modules/plugins pom.xml
by mike thompson
etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh | 42 +
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java | 44 +-
modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java | 44 +-
modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java | 2
modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java | 159 +++++++
modules/core/client-api/src/test/resources/test-hibernate.xml | 8
modules/core/client-api/src/test/resources/test-jbossas.xml | 10
modules/core/client-api/src/test/resources/test-subcategories-nested.xml | 60 ++
modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java | 58 +-
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java | 32 +
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java | 5
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java | 2
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java | 2
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties | 1
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties | 1
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties | 1
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties | 1
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties | 1
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties | 1
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties | 1
modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties | 1
modules/enterprise/server/appserver/pom.xml | 11
modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf | 36 -
modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env | 24 +
modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc | 6
modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml | 11
modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java | 37 -
modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml | 14
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java | 83 ++-
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java | 42 +
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java | 17
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java | 1
modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java | 66 ++-
modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java | 18
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 15
modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml | 2
modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java | 7
modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java | 12
modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java | 7
modules/plugins/rhq-storage/pom.xml | 113 +++++
modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java | 148 ++++++
modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java | 142 ++++--
modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml | 13
modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java | 216 ++++++++++
modules/plugins/rhq-storage/src/test/resources/log4j.properties | 42 +
pom.xml | 6
46 files changed, 1387 insertions(+), 178 deletions(-)
New commits:
commit 18fd1fb33345694e18ae21d0f3632a8b8a34d376
Merge: 05024a9 b537244
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Tue Jul 23 21:33:31 2013 -0700
Merge branch 'master' into mtho11/consolidated-metrics
commit 05024a983a883423e6680ee0efcaa47445614666
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Tue Jul 23 17:56:20 2013 -0700
i18n the consolidated metrics
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java
index 8797d59..010e06f 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java
@@ -116,7 +116,7 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re
setListGridFields(fields.toArray(new ListGridField[0]));
addTableAction(MSG.view_measureTable_getLive(), new ShowLiveDataTableAction(this));
- addExtraWidget(addToDashboardMenu, false);
+ //addExtraWidget(addToDashboardMenu, false);
}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java
index 431e076..fbb9bff 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsViewDataSource.java
@@ -78,7 +78,7 @@ public class MetricsViewDataSource extends RPCDataSource<MetricDisplaySummary, C
public ArrayList<ListGridField> getListGridFields() {
ArrayList<ListGridField> fields = new ArrayList<ListGridField>(7);
- ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, "chart");
+ ListGridField sparklineField = new ListGridField(FIELD_SPARKLINE, MSG.chart_metrics_sparkline_header());
sparklineField.setCellFormatter(new CellFormatter() {
@Override
public String format(Object value, ListGridRecord record, int rowNum, int colNum) {
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties
index cf54939..abcfa62 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages.properties
@@ -23,6 +23,7 @@ chart_ie_not_supported = Charting is not available for this browser.
chart_metrics= Metrics
chart_metrics_collapse_tooltip= Click here to see additional tabular availability data.
chart_metrics_expand_tooltip= Click here to collapse additional availability detail.
+chart_metrics_sparkline_header= Chart
chart_no_data_label = No Data
chart_single_value_label = Value
chart_slider_button_bar_day = Day
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties
index ac8a91e..3a79eeb 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_cs.properties
@@ -51,6 +51,7 @@ chart_hover_time_format = %H:%M:%S
##chart_metrics= Metrics
##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data.
##chart_metrics_expand_tooltip= Click here to collapse additional availability detail.
+##chart_metrics_sparkline_header= Chart
##chart_no_data_label = No Data
##chart_single_value_label = Value
##chart_slider_button_bar_day = Day
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties
index a775ed1..e79504c 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_de.properties
@@ -26,6 +26,7 @@ chart_ie_not_supported = Charting ist bei diesem Browser nicht unterstützt
##chart_metrics= Metrics
##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data.
##chart_metrics_expand_tooltip= Click here to collapse additional availability detail.
+##chart_metrics_sparkline_header= Chart
chart_no_data_label = Keine Daten vorhanden
##chart_single_value_label = Value
chart_slider_button_bar_day = Tag
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties
index cf88ed6..3f1f701 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ja.properties
@@ -27,6 +27,7 @@
##chart_metrics= Metrics
##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data.
##chart_metrics_expand_tooltip= Click here to collapse additional availability detail.
+##chart_metrics_sparkline_header= Chart
##chart_no_data_label = No Data
##chart_single_value_label = Value
##chart_slider_button_bar_minute = Min
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties
index 1849bb7..3d40d83 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ko.properties
@@ -25,6 +25,7 @@
##chart_metrics= Metrics
##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data.
##chart_metrics_expand_tooltip= Click here to collapse additional availability detail.
+##chart_metrics_sparkline_header= Chart
##chart_no_data_label = No Data
##chart_single_value_label = Value
##chart_slider_button_bar_minute = Min
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties
index 517c787..de43fa4 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_pt.properties
@@ -24,6 +24,7 @@
##chart_metrics= Metrics
##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data.
##chart_metrics_expand_tooltip= Click here to collapse additional availability detail.
+##chart_metrics_sparkline_header= Chart
##chart_no_data_label = No Data
##chart_single_value_label = Value
##chart_slider_button_bar_minute = Min
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties
index 6311cef..669d8d6 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_ru.properties
@@ -24,6 +24,7 @@
##chart_metrics= Metrics
##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data.
##chart_metrics_expand_tooltip= Click here to collapse additional availability detail.
+##chart_metrics_sparkline_header= Chart
##chart_no_data_label = No Data
##chart_single_value_label = Value
##chart_slider_button_bar_minute = Min
diff --git a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties
index 67006d0..b15023f 100644
--- a/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties
+++ b/modules/enterprise/gui/coregui/src/main/resources/org/rhq/enterprise/gui/coregui/client/Messages_zh.properties
@@ -24,6 +24,7 @@
##chart_metrics= Metrics
##chart_metrics_collapse_tooltip= Click here to see additional tabular availability data.
##chart_metrics_expand_tooltip= Click here to collapse additional availability detail.
+##chart_metrics_sparkline_header= Chart
##chart_no_data_label = No Data
##chart_single_value_label = Value
##chart_slider_button_bar_minute = Min
commit b537244bad778a80f6fdf92880abc245eed465ec
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Jul 23 22:23:06 2013 -0400
initial commit for StorageNodeComponentITest
This is a first stab at some integration tests for the storage plugin.
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml
index a88f56e..df79e40 100644
--- a/modules/plugins/rhq-storage/pom.xml
+++ b/modules/plugins/rhq-storage/pom.xml
@@ -10,11 +10,16 @@
<groupId>org.rhq</groupId>
<artifactId>rhq-rhqstorage-plugin</artifactId>
- <packaging>jar</packaging>
<name>RHQ Storage Plugin</name>
<description>A plugin for managing RHQ Storage Nodes</description>
+ <properties>
+ <pc.basedir>${project.build.directory}/plugin-container</pc.basedir>
+ <pc.plugins.dir>${pc.basedir}/plugins</pc.plugins.dir>
+ <pc.lib.dir>${pc.basedir}/lib</pc.lib.dir>
+ </properties>
+
<dependencies>
<dependency>
<groupId>${rhq.groupId}</groupId>
@@ -27,7 +32,6 @@
<groupId>${rhq.groupId}</groupId>
<artifactId>rhq-cassandra-plugin</artifactId>
<version>${project.version}</version>
- <!--<scope>provided</scope>-->
</dependency>
<dependency>
@@ -35,8 +39,113 @@
<artifactId>org-mc4j-ems</artifactId>
<scope>provided</scope>
</dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-ccm-core</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-platform-plugin</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-schema</artifactId>
+ <version>${project.version}</version>
+ </dependency>
</dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <phase>pre-integration-test</phase>
+ <configuration>
+ <target>
+ <property name="sigar.dir" value="${project.build.directory/sigar}"/>
+
+ <mkdir dir="${pc.basedir}"/>
+ <mkdir dir="${pc.lib.dir}"/>
+ <mkdir dir="${pc.plugins.dir}"/>
+
+ <copy file="${org.rhq:rhq-platform-plugin:jar}" todir="${pc.plugins.dir}"/>
+ <copy file="${org.rhq:rhq-jmx-plugin:jar}" todir="${pc.plugins.dir}"/>
+ <copy file="${org.rhq:rhq-cassandra-plugin:jar}" todir="${pc.plugins.dir}"/>
+ <copy file="${project.build.directory}/${project.build.finalName}.jar" todir="${pc.plugins.dir}"/>
+
+ <unzip src="${org.hyperic:sigar-dist:zip}" dest="${sigar.dir}">
+ <patternset>
+ <include name="**/lib/sigar.jar" />
+ <include name="**/lib/bcel*.jar" />
+ <include name="**/lib/*.so" />
+ <include name="**/lib/*.sl" />
+ <include name="**/lib/*.dll" />
+ <include name="**/lib/*.dylib" />
+ </patternset>
+ </unzip>
+ <move todir="${pc.lib.dir}" flatten="true">
+ <fileset dir="${sigar.dir}" includes="**/lib/*"/>
+ </move>
+ <delete dir="${sigar.dir}"/>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <version>2.13</version>
+ <executions>
+ <execution>
+ <id>integration-test</id>
+ <goals>
+ <goal>integration-test</goal>
+ </goals>
+ <configuration>
+ <includes>
+ <include>**/*ITest.java</include>
+ </includes>
+ <argLine>-Djava.library.path=${pc.lib.dir}</argLine>
+ <systemPropertyVariables>
+ <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir>
+ </systemPropertyVariables>
+ </configuration>
+ </execution>
+ <execution>
+ <id>verify</id>
+ <goals>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <testFailureIgnore>false</testFailureIgnore>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <excludes>
+ <exclude>**/*ITest.java</exclude>
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
<profiles>
<profile>
<id>dev</id>
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
new file mode 100644
index 0000000..cd9f148
--- /dev/null
+++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
@@ -0,0 +1,216 @@
+package org.rhq.plugins.storage;
+
+import static java.util.Arrays.asList;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNotNull;
+
+import java.io.File;
+import java.net.InetAddress;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
+
+import org.testng.annotations.AfterSuite;
+import org.testng.annotations.BeforeSuite;
+import org.testng.annotations.Test;
+
+import org.rhq.cassandra.CassandraClusterManager;
+import org.rhq.cassandra.ClusterInitService;
+import org.rhq.cassandra.Deployer;
+import org.rhq.cassandra.DeploymentOptions;
+import org.rhq.cassandra.DeploymentOptionsFactory;
+import org.rhq.cassandra.schema.SchemaManager;
+import org.rhq.core.clientapi.server.discovery.InventoryReport;
+import org.rhq.core.domain.cloud.StorageNode;
+import org.rhq.core.domain.configuration.Configuration;
+import org.rhq.core.domain.measurement.Availability;
+import org.rhq.core.domain.measurement.AvailabilityType;
+import org.rhq.core.domain.resource.Resource;
+import org.rhq.core.domain.resource.ResourceType;
+import org.rhq.core.pc.PluginContainer;
+import org.rhq.core.pc.PluginContainerConfiguration;
+import org.rhq.core.pc.inventory.InventoryManager;
+import org.rhq.core.pc.operation.OperationContextImpl;
+import org.rhq.core.pc.operation.OperationManager;
+import org.rhq.core.pc.operation.OperationServicesAdapter;
+import org.rhq.core.pc.plugin.FileSystemPluginFinder;
+import org.rhq.core.pluginapi.operation.OperationServicesResult;
+import org.rhq.core.pluginapi.operation.OperationServicesResultCode;
+import org.rhq.core.pluginapi.util.ProcessExecutionUtility;
+import org.rhq.core.system.ProcessExecution;
+import org.rhq.core.system.ProcessExecutionResults;
+import org.rhq.core.system.SystemInfo;
+import org.rhq.core.system.SystemInfoFactory;
+
+/**
+ * @author John Sanda
+ */
+public class StorageNodeComponentITest {
+
+ private File basedir;
+
+ private Resource storageNode;
+
+ @BeforeSuite
+ public void deployStorageNodeAndPluginContainer() throws Exception {
+ basedir = new File("target", "rhq-storage");
+
+ deployStorageNode();
+
+ initPluginContainer();
+ }
+
+ private void deployStorageNode() throws Exception {
+ DeploymentOptionsFactory factory = new DeploymentOptionsFactory();
+ DeploymentOptions deploymentOptions = factory.newDeploymentOptions();
+ String address = "127.0.0.1";
+
+ deploymentOptions.setSeeds(address);
+ deploymentOptions.setListenAddress(address);
+ deploymentOptions.setRpcAddress(address);
+ deploymentOptions.setBasedir(basedir.getAbsolutePath());
+ deploymentOptions.setCommitLogDir(new File(basedir, "commit_log").getAbsolutePath());
+ deploymentOptions.setDataDir(new File(basedir, "data").getAbsolutePath());
+ deploymentOptions.setSavedCachesDir(new File(basedir, "saved_caches").getAbsolutePath());
+ deploymentOptions.setCommitLogDir(new File(basedir, "logs").getAbsolutePath());
+ deploymentOptions.setLoggingLevel("DEBUG");
+ deploymentOptions.setNativeTransportPort(9142);
+ deploymentOptions.setJmxPort(7399);
+ deploymentOptions.setHeapSize("256M");
+ deploymentOptions.setHeapNewSize("64M");
+
+ deploymentOptions.load();
+
+ Deployer deployer = new Deployer();
+ deployer.setDeploymentOptions(deploymentOptions);
+
+ deployer.unzipDistro();
+ deployer.applyConfigChanges();
+ deployer.updateFilePerms();
+ deployer.updateStorageAuthConf(Sets.newHashSet(InetAddress.getByName(address)));
+
+ File binDir = new File(basedir, "bin");
+ SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
+
+ File startScript = new File(binDir, "cassandra");
+ ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript);
+
+ startScriptExe.addArguments(asList("-p", "cassandra.pid"));
+ startScriptExe.setCaptureOutput(true);
+ ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe);
+
+ assertEquals(results.getExitCode(), (Integer) 0, "Cassandra failed to start: " + results.getCapturedOutput());
+
+ StorageNode storageNode = new StorageNode();
+ storageNode.parseNodeInformation("127.0.0.1|7399|9142");
+
+ ClusterInitService clusterInitService = new ClusterInitService();
+ clusterInitService.waitForClusterToStart(asList(storageNode));
+
+ SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142");
+ schemaManager.install();
+ schemaManager.updateTopology(true);
+ }
+
+ private void initPluginContainer() {
+ PluginContainerConfiguration pcConfig = new PluginContainerConfiguration();
+ File pluginsDir = new File(System.getProperty("pc.plugins.dir"));
+ pcConfig.setPluginDirectory(pluginsDir);
+ pcConfig.setPluginFinder(new FileSystemPluginFinder(pluginsDir));
+
+ pcConfig.setInsideAgent(false);
+ PluginContainer.getInstance().setConfiguration(pcConfig);
+ PluginContainer.getInstance().initialize();
+ }
+
+ @AfterSuite
+ public void ShutdownPluginContainerAndStorageNode() throws Exception {
+ PluginContainer.getInstance().shutdown();
+ shutdownStorageNodeIfNecessary();
+ }
+
+ private void shutdownStorageNodeIfNecessary() throws Exception {
+ File binDir = new File(basedir, "bin");
+ File pidFile = new File(binDir, "cassandra.pid");
+
+ if (pidFile.exists()) {
+ CassandraClusterManager ccm = new CassandraClusterManager();
+ ccm.killNode(basedir);
+ }
+ }
+
+ @Test
+ public void discoverStorageNode() {
+ InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
+ InventoryReport inventoryReport = inventoryManager.executeServerScanImmediately();
+
+ if (inventoryReport.getAddedRoots().isEmpty()) {
+ // could be empty if the storage node is already in inventory from
+ // a prior discovery scan.
+ Resource platform = inventoryManager.getPlatform();
+ storageNode = findCassandraNode(platform.getChildResources());
+ } else {
+ storageNode = findCassandraNode(inventoryReport.getAddedRoots());
+ }
+
+ assertNotNull(storageNode, "Failed to discover Storage Node instance");
+ assertNodeIsUp("Expected " + storageNode + " to be UP after discovery");
+ }
+
+ @Test(dependsOnMethods = "discoverStorageNode")
+ public void shutdownStorageNode() throws Exception {
+ OperationManager operationManager = PluginContainer.getInstance().getOperationManager();
+ OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager);
+
+ long timeout = 1000 * 60;
+ OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId());
+ OperationServicesResult result = operationsService.invokeOperation(operationContext, "shutdown",
+ new Configuration(), timeout);
+
+ assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The shutdown operation failed");
+ // TODO why is this failing?
+ assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down");
+ }
+
+ private void assertNodeIsUp(String msg) {
+ executeAvailabilityScan();
+
+ Availability availability = getAvailability();
+
+ assertNotNull(availability, "Unable to determine availability for " + storageNode);
+ assertEquals(availability.getAvailabilityType(), AvailabilityType.UP, msg);
+ }
+
+ private void assertNodeIsDown(String msg) {
+ executeAvailabilityScan();
+
+ Availability availability = getAvailability();
+
+ assertNotNull(availability, "Unable to determine availability for " + storageNode);
+ assertEquals(availability.getAvailabilityType(), AvailabilityType.DOWN, msg);
+ }
+
+ private Availability getAvailability() {
+ InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
+ return inventoryManager.getAvailabilityIfKnown(storageNode);
+ }
+
+ private void executeAvailabilityScan() {
+ InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
+ inventoryManager.executeAvailabilityScanImmediately(false, true);
+ }
+
+ private Resource findCassandraNode(Set<Resource> resources) {
+ for (Resource resource : resources) {
+ if (isCassandraNode(resource.getResourceType())) {
+ return resource;
+ }
+ }
+ return null;
+ }
+
+ private boolean isCassandraNode(ResourceType type) {
+ return type.getPlugin().equals("RHQStorage") && type.getName().equals("RHQ Storage Node");
+ }
+
+}
diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.properties b/modules/plugins/rhq-storage/src/test/resources/log4j.properties
new file mode 100644
index 0000000..67db049
--- /dev/null
+++ b/modules/plugins/rhq-storage/src/test/resources/log4j.properties
@@ -0,0 +1,42 @@
+#
+# /*
+# * RHQ Management Platform
+# * Copyright (C) 2005-2012 Red Hat, Inc.
+# * All rights reserved.
+# *
+# * This program is free software; you can redistribute it and/or modify
+# * it under the terms of the GNU General Public License, version 2, as
+# * published by the Free Software Foundation, and/or the GNU Lesser
+# * General Public License, version 2.1, also as published by the Free
+# * Software Foundation.
+# *
+# * This program is distributed in the hope that it will be useful,
+# * but WITHOUT ANY WARRANTY; without even the implied warranty of
+# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# * GNU General Public License and the GNU Lesser General Public License
+# * for more details.
+# *
+# * You should have received a copy of the GNU General Public License
+# * and the GNU Lesser General Public License along with this program;
+# * if not, write to the Free Software Foundation, Inc.,
+# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# */
+#
+
+log4j.rootCategory=WARN, FILE, CONSOLE
+
+log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.FILE.DatePattern='.'yyyy-MM-dd
+log4j.appender.FILE.File=./target/test.log
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
+#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n
+log4j.appender.FILE.Append=false
+
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
+
+log4j.logger.org.rhq=DEBUG
+log4j.logger.com.datastax=DEBUG
commit 83e5b228871c9a8352e98a12e0db76f8f4ea982e
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Jul 23 22:16:41 2013 -0400
first stab at prepareForBootstrap operation (which is currently broken)
This is clearly broken from some manual testing I did. Given that the
implementation is a bit sloppy at the moment, this is a good time to get some
automated tests in place. The operation will perform the following steps in the
ordered specified:
1) shut down the storage node
2) update cassandra.yaml
3) update rhq-storage-auth.conf
4) restart the node
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
index 0037bfe..f76da22 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
@@ -111,7 +111,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
clusterBuilder = clusterBuilder.withCredentials(username, password);
}
- this.cassandraSession = clusterBuilder.build().connect(clusterName);
+// this.cassandraSession = clusterBuilder.build().connect(clusterName);
} catch (Exception e) {
LOG.error("Connect to Cassandra " + host + ":" + nativePort, e);
throw e;
@@ -196,7 +196,17 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
operation = storageService.getOperation("drain", emptyParams);
operation.invoke((Object[]) emptyParams);
- ProcessInfo process = context.getNativeProcess();
+ return stopNode();
+ }
+
+ protected OperationResult stopNode() {
+ ProcessInfo process = getResourceContext().getNativeProcess();
+
+ if (processInfo == null) {
+ LOG.warn("Failed to obtain process info. It appears Cassandra is already shutdown.");
+ return new OperationResult("Failed to obtain process info. It appears Cassandra is already shutdown.");
+ }
+
long pid = process.getPid();
try {
process.kill("KILL");
@@ -209,6 +219,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
}
}
+
protected OperationResult startNode() {
ResourceContext<?> context = getResourceContext();
Configuration pluginConfig = context.getPluginConfiguration();
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
index 3b0aa5b..d9b35b9 100644
--- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
+++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
@@ -26,11 +26,15 @@
package org.rhq.plugins.storage;
import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.io.StringReader;
+import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
@@ -39,6 +43,8 @@ import org.mc4j.ems.connection.EmsConnection;
import org.mc4j.ems.connection.bean.EmsBean;
import org.mc4j.ems.connection.bean.attribute.EmsAttribute;
import org.mc4j.ems.connection.bean.operation.EmsOperation;
+import org.yaml.snakeyaml.DumperOptions;
+import org.yaml.snakeyaml.Yaml;
import org.rhq.core.domain.configuration.Configuration;
import org.rhq.core.domain.configuration.ConfigurationUpdateStatus;
@@ -48,6 +54,7 @@ import org.rhq.core.domain.configuration.PropertyMap;
import org.rhq.core.domain.configuration.PropertySimple;
import org.rhq.core.pluginapi.configuration.ConfigurationFacet;
import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport;
+import org.rhq.core.pluginapi.inventory.ResourceContext;
import org.rhq.core.pluginapi.operation.OperationFacet;
import org.rhq.core.pluginapi.operation.OperationResult;
import org.rhq.core.util.StringUtil;
@@ -96,6 +103,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
return updateConfiguration(parameters);
} else if (name.equals("updateKnownNodes")) {
return updateKnownNodes(parameters);
+ } else if (name.equals("prepareForBootstrap")) {
+ return prepareForBootstrap(parameters);
} else {
return super.invokeOperation(name, parameters);
}
@@ -132,6 +141,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
ipAddresses.add(propertySimple.getStringValue());
}
+ if (updateAuthFile(result, ipAddresses)) return result;
+
+ EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator");
+ EmsOperation emsOperation = authBean.getOperation("reloadConfiguration");
+ emsOperation.invoke();
+
+ result.setSimpleResult("Successfully updated the set of known nodes.");
+
+ return result;
+ }
+
+ private boolean updateAuthFile(OperationResult result, Set<String> ipAddresses) {
log.info("Updating known nodes to " + ipAddresses);
File confDir = new File(getBasedir(), "conf");
@@ -150,7 +171,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
log.error(msg);
result.setErrorMessage(msg);
- return result;
+ return true;
}
}
@@ -161,7 +182,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
"to unexpected error";
log.error(msg, e);
result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e));
- return result;
+ return true;
}
try {
@@ -176,18 +197,127 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
}
result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " +
"it matches " + authBackupFile + " and then reschedule the operation.");
+ return true;
+ }
+ return false;
+ }
+
+ private OperationResult prepareForBootstrap(Configuration params) {
+ log.info("Preparing " + this + " for bootstrap...");
+
+ ResourceContext context = getResourceContext();
+ OperationResult result = new OperationResult();
+
+ log.info("Stopping storage node");
+ OperationResult stopNodeResult = stopNode();
+ if (stopNodeResult.getErrorMessage() != null) {
+ log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " +
+ "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " +
+ "the operation");
+ result.setErrorMessage("Failed to stop storage node. Cannot prepare the node for bootstrap which means " +
+ "that it cannot join the cluster. Make sure that the node is not running and retry the operation. " +
+ "Stopping the storage node failed with this error: " + stopNodeResult.getErrorMessage());
return result;
}
- EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator");
- EmsOperation emsOperation = authBean.getOperation("reloadConfiguration");
- emsOperation.invoke();
+ Configuration pluginConfig = context.getPluginConfiguration();
+ String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration");
+ File yamlFile = new File(yamlProp);
- result.setSimpleResult("Successfully updated the set of known nodes.");
+ DumperOptions options = new DumperOptions();
+ options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
+ Yaml yaml = new Yaml(options);
+
+ Map yamlConfig = null;
+ try {
+ yamlConfig = (Map) yaml.load(new FileInputStream(yamlFile));
+ } catch (FileNotFoundException e) {
+ log.error("Failed to load " + yamlFile, e);
+ log.error("Cannot prepare " + this + " for bootstrap. " + yamlFile + " must exist in order to make the " +
+ "necessary configuration changes.");
+ result.setErrorMessage("Cannot prepare storage node for bootstrap. It appears that " + yamlFile +
+ " does not exist. Make sure that it exists so that the necessary configuration changes can be made.");
+
+ return result;
+ }
+
+ purgeDir(getCommitLogDir(yamlConfig));
+ for (File dataDir : getDataDirs(yamlConfig)) {
+ purgeDir(dataDir);
+ }
+ purgeDir(getSavedCachesDir(yamlConfig));
+
+ log.info("Updating cluster settings");
+
+ String address = pluginConfig.getSimpleValue("host");
+ List<String> seeds = getAddresses(params.getList("storageNodeIPAddresses"));
+ // Make sure this node's address is not in the list; otherwise, it
+ // won't bootstrap properly.
+ seeds.remove(address);
+ try {
+ updateSeedsList(seeds);
+ } catch (IOException e) {
+ log.error("Failed to update seeds property in " + yamlFile, e);
+ result.setErrorMessage("Failed to prepared node for bootstrap due to unexpected error that occurred " +
+ "while updating seeds property in " + yamlFile + ":\n" + ThrowableUtil.getAllMessages(e));
+ return result;
+ }
+
+ if (updateAuthFile(result, new HashSet<String>(seeds))) {
+ return result;
+ }
+
+ int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort"));
+ int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort"));
+
+ yamlConfig.put("native_transport_port", cqlPort);
+ yamlConfig.put("storage_port", gossipPort);
+
+ try {
+ yaml.dump(yamlConfig, new FileWriter(yamlFile));
+ } catch (IOException e) {
+ log.error("Could not update cluster settings in " + yamlFile, e);
+ result.setErrorMessage("Could not update cluster settings in " + yamlFile + ":\n" +
+ ThrowableUtil.getAllMessages(e));
+ return result;
+ }
+
+ log.info(this + " is ready to be bootstrap. Restarting storage node...");
+ OperationResult startResult = startNode();
+ if (startResult.getErrorMessage() != null) {
+ log.error("Failed to restart storage node:\n" + startResult.getErrorMessage());
+ result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage());
+ } else {
+ result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster.");
+ }
return result;
}
+ private void purgeDir(File dir) {
+ log.info("Purging " + dir);
+ FileUtil.purge(dir, true);
+ }
+
+ private File getCommitLogDir(Map yamlConfig) {
+ return new File((String) yamlConfig.get("commitlog_directory"));
+ }
+
+ private List<File> getDataDirs(Map yamlConfig) {
+ List<File> dirs = new ArrayList<File>();
+ List<String> dirNames = (List<String>) yamlConfig.get("data_file_directories");
+
+ for (String dirName : dirNames) {
+ dirs.add(new File(dirName));
+ }
+
+ return dirs;
+ }
+
+ private File getSavedCachesDir(Map yamlConfig) {
+ return new File((String) yamlConfig.get("saved_caches_directory"));
+ }
+
private OperationResult nodeAdded(Configuration params) {
boolean runRepair = params.getSimple("runRepair").getBooleanValue();
boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue();
@@ -405,4 +535,10 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
boolean succeeded;
String details;
}
+
+ @Override
+ public String toString() {
+ return StorageNodeComponent.class.getSimpleName() + "[resourceKey: " + getResourceContext().getResourceKey() +
+ "]";
+ }
}
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
index 1e39d6c..cd84de6 100644
--- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
@@ -101,6 +101,16 @@
</parameters>
</operation>
+ <operation name="prepareForBootstrap">
+ <parameters>
+ <c:simple-property name="cqlPort" type="integer" displayName="CQL Port"/>
+ <c:simple-property name="gossipPort" type="integer"/>
+ <c:list-property name="storageNodeIPAddresses" displayName="Storage Node IP Addresses">
+ <c:simple-property name="storageNodeIPAddress"/>
+ </c:list-property>
+ </parameters>
+ </operation>
+
<operation name="prepareForUpgrade" description="Prepares the storage node for upgrade (this operation consists of following steps: 1) turning off the RPC server, 2) turning off the gossiper, 3) taking the snapshot (backuping the data), 4) invoking the drain operation">
<parameters>
<c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
commit 4fa9f082b2e011b3bde9defe1021248148c4ad40
Author: Simeon Pinder <spinder(a)fulliautomatix.conchfritter.com>
Date: Tue Jul 23 15:02:09 2013 -0400
[BZ 984649] fix module metadata.
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
index 145e3af..82ff294 100644
--- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
+++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
@@ -290,7 +290,7 @@
<delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar.index" />
<!-- Update the module metadata to the patched version -->
<replace file="${jboss.home}/modules/system/layers/base/org/jgroups/main/module.xml"
- token="jgroups-${jgroups.initial.version}.jar" value="${jgroups.patch.version}"/>
+ token="jgroups-${jgroups.initial.version}.jar" value="jgroups-${jgroups.patch.version}.jar"/>
<!-- Copy in patched version -->
<copy file="${settings.localRepository}/org/jgroups/jgroups/${jgroups.patch.version}/jgroups-${jgroups.patch.version}.jar"
toDir="${jboss.home}/modules/system/layers/base/org/jgroups/main" verbose="true"/>
commit 8203c669b3b3ba5ed5c3ef27f051220da93ea868
Author: Simeon Pinder <spinder(a)fulliautomatix.conchfritter.com>
Date: Tue Jul 23 13:37:32 2013 -0400
Upgrading richfaces to latest patched version.
diff --git a/pom.xml b/pom.xml
index 3662bc7..f909033 100644
--- a/pom.xml
+++ b/pom.xml
@@ -135,7 +135,7 @@
<postgresql.version>9.2-1002.jdbc4</postgresql.version>
<h2.version>1.2.139</h2.version>
<jtds.version>1.2.2</jtds.version>
- <richfaces.version>3.3.3.Final</richfaces.version>
+ <richfaces.version>3.3.4.Final</richfaces.version>
<jline.version>0.9.94</jline.version>
<sigar.version>1.6.5.132-5</sigar.version>
<sigar.zip.version>1.6.5</sigar.zip.version>
commit caeb7a5c832334b74f76a265c8028a5697152dda
Author: Simeon Pinder <spinder(a)fulliautomatix.conchfritter.com>
Date: Tue Jul 23 12:28:52 2013 -0400
[BZ 984649] update jgroups usage to latest patched version.
diff --git a/modules/enterprise/server/appserver/pom.xml b/modules/enterprise/server/appserver/pom.xml
index 0a61138..f1a4c7b 100644
--- a/modules/enterprise/server/appserver/pom.xml
+++ b/modules/enterprise/server/appserver/pom.xml
@@ -19,6 +19,8 @@
<properties>
<rhq.dev.data.dir>${rhq.rootDir}/rhq-data</rhq.dev.data.dir>
+ <jgroups.initial.version>3.2.7.Final</jgroups.initial.version>
+ <jgroups.patch.version>3.2.10.Final</jgroups.patch.version>
</properties>
<dependencies>
@@ -72,6 +74,13 @@
<groupId>org.codehaus.groovy</groupId>
<artifactId>groovy-all</artifactId>
</dependency>
+
+ <!-- Pull down the patched version of JGroups. See CVE 2013-4112 and BZ 984365 -->
+ <dependency>
+ <groupId>org.jgroups</groupId>
+ <artifactId>jgroups</artifactId>
+ <version>${jgroups.patch.version}</version>
+ </dependency>
</dependencies>
<build>
@@ -157,6 +166,8 @@
<property name="rhq.server.http.port" value="${rhq.server.http.port}" />
<property name="rhq.server.https.port" value="${rhq.server.https.port}" />
<property name="rhq.sync.endpoint-address" value="${rhq.sync.endpoint-address}" />
+ <property name="jgroups.initial.version" value="${jgroups.initial.version}" />
+ <property name="jgroups.patch.version" value="${jgroups.patch.version}" />
</ant>
</target>
</configuration>
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
index a81b6cd..145e3af 100644
--- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
+++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
@@ -283,6 +283,17 @@
</resources>
</module>
]]></echo>
+
+ <echo>Updating JGroups module component for EAP to ${jgroups.patch.version}</echo>
+ <!-- Remove the unpatched version -->
+ <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar" />
+ <delete file="${jboss.home}/modules/system/layers/base/org/jgroups/main/jgroups-${jgroups.initial.version}.jar.index" />
+ <!-- Update the module metadata to the patched version -->
+ <replace file="${jboss.home}/modules/system/layers/base/org/jgroups/main/module.xml"
+ token="jgroups-${jgroups.initial.version}.jar" value="${jgroups.patch.version}"/>
+ <!-- Copy in patched version -->
+ <copy file="${settings.localRepository}/org/jgroups/jgroups/${jgroups.patch.version}/jgroups-${jgroups.patch.version}.jar"
+ toDir="${jboss.home}/modules/system/layers/base/org/jgroups/main" verbose="true"/>
<echo>Generate SSL key for RHQ server - 128-bit key that expires in 20 years</echo>
<property name="jboss.conf.dir" location="${jboss.home}/standalone/configuration" />
commit cc64adde1d8835f8c000afe2de0746fda5bbd5c1
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue Jul 23 08:43:19 2013 -0500
One more place where the previous rebase removed code for the storage node configuration.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index 31e3bf7..9416c67 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -643,6 +643,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
Configuration parameters = new Configuration();
parameters.setSimpleValue("jmxPort", storageNodeConfiguration.getJmxPort() + "");
parameters.setSimpleValue("heapSize", storageNodeConfiguration.getHeapSize() + "");
+ parameters.setSimpleValue("heapNewSize", storageNodeConfiguration.getHeapNewSize() + "");
+ parameters.setSimpleValue("threadStackSize", storageNodeConfiguration.getThreadStackSize() + "");
boolean updateConfigurationResult = runOperationAndWaitForResult(subject, storageNodeResource,
UPDATE_CONFIGURATION_OPERATION, parameters);
commit 373a931987b402479df3d02269cc00f4ac88a358
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Tue Jul 23 08:42:49 2013 -0500
Enable the new set of calculated metrics for disk space utilization in the UI and CLI.
diff --git a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java
index 80bfdd6..2c0b8f8 100644
--- a/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java
+++ b/modules/core/domain/src/main/java/org/rhq/core/domain/cloud/StorageNodeLoadComposite.java
@@ -41,9 +41,13 @@ public class StorageNodeLoadComposite implements Serializable {
private MeasurementAggregateWithUnits heapUsed;
private MeasurementAggregateWithUnits heapPercentageUsed;
private MeasurementAggregateWithUnits load;
- private MeasurementAggregateWithUnits partitionDiskUsedPercentage;
private MeasurementAggregateWithUnits dataDiskUsed;
private MeasurementAggregate tokens;
+
+ private MeasurementAggregateWithUnits dataDiskUsedPercentage;
+ private MeasurementAggregateWithUnits totalDiskUsedPercentage;
+ private MeasurementAggregate freeDiskToDataSizeRatio;
+
private MeasurementAggregateWithUnits actuallyOwns;
public StorageNodeLoadComposite() {
@@ -113,35 +117,59 @@ public class StorageNodeLoadComposite implements Serializable {
public void setHeapPercentageUsed(MeasurementAggregateWithUnits heapPercentageUsed) {
this.heapPercentageUsed = heapPercentageUsed;
}
-
+
/**
* @deprecated use {@link #getPartitionDiskUsedPercentage() getPartitionDiskUsedPercentage()} instead
- *
+ *
* @return partitionDiskUsedPercentage
*/
public MeasurementAggregateWithUnits getDiskSpacePercentageUsed() {
- return getPartitionDiskUsedPercentage();
+ return getDataDiskUsedPercentage();
}
-
+
/**
* @deprecated use {@link #setPartitionDiskUsedPercentage() setPartitionDiskUsedPercentage()} instead
- *
+ *
* @param partitionDiskUsedPercentage
*/
- public void setDiskSpacePercentageUsed(MeasurementAggregateWithUnits partitionDiskUsedPercentage) {
- setPartitionDiskUsedPercentage(partitionDiskUsedPercentage);
+ public void setDiskSpacePercentageUsed(MeasurementAggregateWithUnits diskUsedPercentage) {
+ setDataDiskUsedPercentage(diskUsedPercentage);
+ }
+
+ /**
+ * @return A computed metric for the percentage of disk space used by data file on the corresponding partitions.
+ * If multiple data locations are configured then the aggregate is calculated.
+ */
+ public MeasurementAggregateWithUnits getDataDiskUsedPercentage() {
+ return dataDiskUsedPercentage;
+ }
+
+ public void setDataDiskUsedPercentage(MeasurementAggregateWithUnits dataDiskUsedPercentage) {
+ this.dataDiskUsedPercentage = dataDiskUsedPercentage;
+ }
+
+ /**
+ * @return A computed metric for the percentage of total (system + Storage Node data file) disk space used the partitions where data files are stored.
+ * If multiple data locations are configured then the aggregate is calculated.
+ */
+ public MeasurementAggregateWithUnits getTotalDiskUsedPercentage() {
+ return totalDiskUsedPercentage;
+ }
+
+ public void setTotalDiskUsedPercentage(MeasurementAggregateWithUnits totalDiskUsedPercentage) {
+ this.totalDiskUsedPercentage = totalDiskUsedPercentage;
}
/**
- * @return A computed metric for the percentage of disk space used on the partition that contains the SSTables.
- * If multiple data locations are configured then the partition with the highest utilization will be reported.
+ * @return A computed metric for the percentage of total (system + Storage Node data file) disk space used the partitions where data files are stored.
+ * If multiple data locations are configured then the aggregate is calculated.
*/
- public MeasurementAggregateWithUnits getPartitionDiskUsedPercentage() {
- return partitionDiskUsedPercentage;
+ public MeasurementAggregate getFreeDiskToDataSizeRatio() {
+ return freeDiskToDataSizeRatio;
}
- public void setPartitionDiskUsedPercentage(MeasurementAggregateWithUnits partitionDiskUsedPercentage) {
- this.partitionDiskUsedPercentage = partitionDiskUsedPercentage;
+ public void setFreeDiskToDataSizeRatio(MeasurementAggregate freeDiskToDataSizeRatio) {
+ this.freeDiskToDataSizeRatio = freeDiskToDataSizeRatio;
}
/**
@@ -202,7 +230,7 @@ public class StorageNodeLoadComposite implements Serializable {
builder.append("heapUsed=").append(heapUsed).append(", ");
builder.append("heapPercentageUsed=").append(heapPercentageUsed).append(", ");
builder.append("load=").append(load).append(", ");
- builder.append("partitionDiskUsedPercentage=").append(partitionDiskUsedPercentage).append(", ");
+ builder.append("dataUsedPercentage=").append(dataDiskUsedPercentage).append(", ");
builder.append("dataDiskUsed=").append(dataDiskUsed).append(", ");
builder.append("tokens=").append(tokens).append(", ");
builder.append("actuallyOwns=").append(actuallyOwns);
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java
index 7d413fd..07064b7 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeDatasource.java
@@ -47,6 +47,7 @@ import org.rhq.core.domain.cloud.StorageNode.OperationMode;
import org.rhq.core.domain.cloud.StorageNodeLoadComposite;
import org.rhq.core.domain.cloud.StorageNodeLoadComposite.MeasurementAggregateWithUnits;
import org.rhq.core.domain.criteria.StorageNodeCriteria;
+import org.rhq.core.domain.measurement.MeasurementAggregate;
import org.rhq.core.domain.util.PageControl;
import org.rhq.core.domain.util.PageList;
import org.rhq.core.domain.util.PageOrdering;
@@ -200,7 +201,8 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
public static class StorageNodeLoadCompositeDatasource extends RPCDataSource<StorageNodeLoadComposite, StorageNodeCriteria> {
public static final String HEAP_PERCENTAGE_KEY = "heapPercentage";
- public static final String DISK_SPACE_PERCENTAGE_KEY = "diskSpacePercentage";
+ public static final String DATA_DISK_SPACE_PERCENTAGE_KEY = "dataDiskSpacePercentage";
+ public static final String TOTAL_DISK_SPACE_PERCENTAGE_KEY = "totalDiskSpacePercentage";
private int id;
public static StorageNodeLoadCompositeDatasource getInstance(int id) {
@@ -296,10 +298,15 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
"This value is calculated by dividing Heap Used by Heap Maximum.", HEAP_PERCENTAGE_KEY),
Arrays.<Object> asList(loadComposite.getLoad(), "Load", "Data stored on the node", "load"),
Arrays.<Object> asList(
- loadComposite.getPartitionDiskUsedPercentage(),
- "Disk Space Percent Used",
- "Percentage of total disk space used for the partition that contains the data files.If multiple data locations are specified then this will report the average utilization accross all the partitions.",
- DISK_SPACE_PERCENTAGE_KEY),
+ loadComposite.getDataDiskUsedPercentage(),
+ "Data Disk Space Percent Used",
+ "Percentage of disk space used by data files on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.",
+ DATA_DISK_SPACE_PERCENTAGE_KEY),
+ Arrays.<Object> asList(
+ loadComposite.getTotalDiskUsedPercentage(),
+ "Total Disk Space Percent Used",
+ "Percentage of total disk space used (system and Storage Node) on the partitions that contain the data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.",
+ TOTAL_DISK_SPACE_PERCENTAGE_KEY),
Arrays.<Object> asList(
loadComposite.getDataDiskUsed(),
"Total Disk Space Used",
@@ -325,6 +332,21 @@ public class StorageNodeDatasource extends RPCDataSource<StorageNode, StorageNod
recordsList.add(tokens);
}
+
+ if (loadComposite.getFreeDiskToDataSizeRatio() != null){
+ MeasurementAggregate aggregate = loadComposite.getFreeDiskToDataSizeRatio();
+
+ ListGridRecord record = new ListGridRecord();
+ record.setAttribute("id", "freeDiskToDataSizeRatio");
+ record.setAttribute("name", "Free Disk To Data Size Ratio");
+ record.setAttribute("hover", "Ratio of (Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. If multiple data locations are specified then the aggregate accross all the partitions that contain data files is reported.");
+ record.setAttribute("min", aggregate.getMin());
+ record.setAttribute("avg", aggregate.getAvg());
+ record.setAttribute("max", aggregate.getMax());
+
+ recordsList.add(record);
+ }
+
ListGridRecord[] records = recordsList.toArray(new ListGridRecord[recordsList.size()]);
return records;
}
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java
index e8dde9d..e044e4e 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/admin/storage/StorageNodeLoadComponent.java
@@ -51,8 +51,9 @@ public class StorageNodeLoadComponent extends EnhancedVLayout {
@Override
protected String getCellCSSText(ListGridRecord record, int rowNum, int colNum) {
if ("avg".equals(getFieldName(colNum))
- && (StorageNodeLoadCompositeDatasource.HEAP_PERCENTAGE_KEY.equals(record.getAttribute("id")) || StorageNodeLoadCompositeDatasource.DISK_SPACE_PERCENTAGE_KEY
- .equals(record.getAttribute("id")))) {
+ && (StorageNodeLoadCompositeDatasource.HEAP_PERCENTAGE_KEY.equals(record.getAttribute("id")) ||
+ StorageNodeLoadCompositeDatasource.DATA_DISK_SPACE_PERCENTAGE_KEY.equals(record.getAttribute("id")) ||
+ StorageNodeLoadCompositeDatasource.TOTAL_DISK_SPACE_PERCENTAGE_KEY.equals(record.getAttribute("id")))) {
if (record.getAttributeAsFloat("avgFloat") > .85) {
return "font-weight:bold; color:#d64949;";
} else if (record.getAttributeAsFloat("avgFloat") > .7) {
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index fab803b..31e3bf7 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -253,13 +253,16 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();
// get the schedule ids for Storage Service resource
- final String tokensMetric = "Tokens", ownershipMetric = "Ownership", diskUsedPercentageMetric = "Calculated.PartitionDiskUsedPercentage";
+ final String tokensMetric = "Tokens", ownershipMetric = "Ownership";
+ final String dataDiskUsedPercentageMetric = "Calculated.DataDiskUsedPercentage";
+ final String totalDiskUsedPercentageMetric = "Calculated.TotalDiskUsedPercentage";
+ final String freeDiskToDataRatioMetric = "Calculated.FreeDiskToDataSizeRatio";
final String loadMetric = "Load", keyCacheSize = "KeyCacheSize", rowCacheSize = "RowCacheSize", totalCommitLogSize = "TotalCommitlogSize";
TypedQuery<Object[]> query = entityManager.<Object[]> createNamedQuery(
StorageNode.QUERY_FIND_SCHEDULE_IDS_BY_PARENT_RESOURCE_ID_AND_MEASUREMENT_DEFINITION_NAMES, Object[].class);
query.setParameter("parrentId", resourceId).setParameter("metricNames",
- Arrays.asList(tokensMetric, ownershipMetric, diskUsedPercentageMetric, loadMetric, keyCacheSize,
- rowCacheSize, totalCommitLogSize));
+ Arrays.asList(tokensMetric, ownershipMetric, loadMetric, keyCacheSize, rowCacheSize, totalCommitLogSize,
+ dataDiskUsedPercentageMetric, totalDiskUsedPercentageMetric, freeDiskToDataRatioMetric));
for (Object[] pair : query.getResultList()) {
scheduleIdsMap.put((String) pair[0], (Integer) pair[1]);
}
@@ -292,10 +295,22 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
result.setActuallyOwns(ownershipAggregateWithUnits);
}
- if ((scheduleId = scheduleIdsMap.get(diskUsedPercentageMetric)) != null) {
- StorageNodeLoadComposite.MeasurementAggregateWithUnits diskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits(
+
+ //calculated disk space related metrics
+ if ((scheduleId = scheduleIdsMap.get(dataDiskUsedPercentageMetric)) != null) {
+ StorageNodeLoadComposite.MeasurementAggregateWithUnits dataDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits(
+ subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
+ result.setDataDiskUsedPercentage(dataDiskUsedPercentageAggregateWithUnits);
+ }
+ if ((scheduleId = scheduleIdsMap.get(totalDiskUsedPercentageMetric)) != null) {
+ StorageNodeLoadComposite.MeasurementAggregateWithUnits totalDiskUsedPercentageAggregateWithUnits = getMeasurementAggregateWithUnits(
subject, scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
- result.setPartitionDiskUsedPercentage(diskUsedPercentageAggregateWithUnits);
+ result.setDataDiskUsedPercentage(totalDiskUsedPercentageAggregateWithUnits);
+ }
+ if ((scheduleId = scheduleIdsMap.get(freeDiskToDataRatioMetric)) != null) {
+ MeasurementAggregate freeDiskToDataRatioAggregate = measurementManager.getAggregate(subject,
+ scheduleId, beginTime, endTime);
+ result.setFreeDiskToDataSizeRatio(freeDiskToDataRatioAggregate);
}
if ((scheduleId = scheduleIdsMap.get(loadMetric)) != null) {
diff --git a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
index 5bbebed..e95f995 100644
--- a/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/cassandra/src/main/resources/META-INF/rhq-plugin.xml
@@ -188,7 +188,7 @@
<metric property="Calculated.DataDiskUsedPercentage" displayName="Data File Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of disk space used by Cassandra data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
<metric property="Calculated.TotalDiskUsedPercentage" displayName="Total Disk Used Percentage" dataType="measurement" units="percentage" displayType="summary" description="Percentage of total disk space used. The metric acounts overall disk usage (including system files), not just disk space used by Cassandra. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
- <metric property="Calculated.FreeDiskToDataSizeRatio" displayName="Free Disk to Data Size Ratio" dataType="measurement" displayType="summary" description="Ratio of (Amount of Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
+ <metric property="Calculated.FreeDiskToDataSizeRatio" displayName="Free Disk to Data Size Ratio" dataType="measurement" displayType="summary" description="Ratio of (Free Disk)/(Data File Size). A value below 1 is not recommended since a compaction or repair process could double the amount of disk space used by data files. The aggregate accross all the partitions will be reported if multiple data locations are specified. This is a calculated metric derived from system and Cassandra runtime information."/>
<metric property="CurrentGenerationNumber" dataType="trait" displayType="summary" description="Current generation number"/>
<metric property="ExceptionCount" measurementType="trendsup" dataType="measurement" displayType="summary" description="Exception Count"/>
commit 6997631e56204db41c9f4902eef1c6210706be3f
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Mon Jul 22 15:14:22 2013 -0500
Add back code used to update storage node configuration tha was lost due to rebase. This code updates two additional storage node properties added.
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index b32ab5b..fab803b 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -611,6 +611,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
storageNodeResource.getId());
configuration.setHeapSize(storageNodeConfiguration.getSimpleValue("maxHeapSize"));
+ configuration.setHeapNewSize(storageNodeConfiguration.getSimpleValue("heapNewSize"));
+ configuration.setThreadStackSize(storageNodeConfiguration.getSimpleValue("threadStackSize"));
configuration.setJmxPort(storageNode.getJmxPort());
}
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
index 8156d02..1e39d6c 100644
--- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
@@ -122,6 +122,9 @@
<operation name="updateConfiguration" description="Updates the node configuration. Will require a separate server restart for the settings to take effect.">
<parameters>
<c:simple-property name="heapSize" type="string" description="The heap size to be used for both -Xms and -Xmx JVM options."/>
+ <c:simple-property name="heapNewSize" type="string" description="The heap new size to be used be used with -Xmn JVM option."/>
+ <c:simple-property name="threadStackSize" type="integer" description="The thread stack size. This memory is allocated to each thread off heap. The
+ value should be an integer that will be interpreted in kilobytes."/>
<c:simple-property name="jmxPort" type="integer" description="JMX port JVM option."/>
</parameters>
<results>
commit e85abcf495d6939d876b09bd1de1e71a29af17ec
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Tue Jul 23 10:42:17 2013 +0200
BZ 796480 (and others) add support for subCategory in embedded types (aka runs-inside)
diff --git a/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java b/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java
index 88dd865..d5ff2ef 100644
--- a/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java
+++ b/modules/core/client-api/src/main/java/org/rhq/core/clientapi/agent/metadata/PluginMetadataParser.java
@@ -417,16 +417,56 @@ public class PluginMetadataParser {
return serviceResourceType;
}
- private static void setSubCategory(ResourceDescriptor resourceDescriptor, ResourceType resourceType)
+ /**
+ * Try to find the subCategory of the p/s/s descriptor in one of the parents
+ * <subcategories><subcategory>Foo</subcategory></subcategories> elements and
+ * set it on the resourceType if found.
+ *
+ * It is not enough to look at the direct parents, but we need to also look at the
+ * <runs-inside> types if our type is "embedded" in a different type.
+ * @param resourceDescriptor Descriptor to get the subCategory attribute from
+ * @param resourceType The type to attach the ResourceSubCategory to.
+ * @throws InvalidPluginDescriptorException If the descriptor.subCategory can not be found in any parent.
+ */
+ private void setSubCategory(ResourceDescriptor resourceDescriptor, ResourceType resourceType)
throws InvalidPluginDescriptorException {
String subCatName = resourceDescriptor.getSubCategory();
if (subCatName != null) {
ResourceSubCategory subCat = SubCategoriesMetadataParser.findSubCategoryOnResourceTypeAncestor(
resourceType, subCatName);
- if (subCat == null)
+
+ // We need to look at resourceDescriptor -> runsInside to see if one of those defines the
+ // subcategories that we are looking for.
+ if (subCat == null && resourceDescriptor.getRunsInside() != null) {
+ RunsInsideType rit = resourceDescriptor.getRunsInside();
+ List<ParentResourceType> parentResourceTypeList = rit.getParentResourceType();
+ for (ParentResourceType parentResourceType : parentResourceTypeList) {
+ ResourceType parentType = getResourceTypeFromPlugin(parentResourceType.getName(),parentResourceType.getPlugin());
+ // check on the parent
+ if (parentType.getChildSubCategories()!=null ) {
+ for (ResourceSubCategory parentSubcat : parentType.getChildSubCategories()) {
+ if (parentSubcat.getName().equals(subCatName)) {
+ subCat = parentSubcat;
+ break;
+ }
+ }
+ }
+
+ // Not found on runs-inside type look at the ancestor of those runs-inside types?
+ if (subCat==null) {
+ subCat = SubCategoriesMetadataParser.findSubCategoryOnResourceTypeAncestor(parentType,subCatName);
+ }
+ if (subCat!=null) {
+ break;
+ }
+ }
+ }
+
+ if (subCat == null) {
throw new InvalidPluginDescriptorException("Resource type [" + resourceType.getName()
+ "] specified a subcategory (" + subCatName
+ ") that is not defined as a child subcategory of one of its ancestor resource types.");
+ }
resourceType.setSubCategory(subCat);
}
}
diff --git a/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java
index 20e1aa9..2a9595f 100644
--- a/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java
+++ b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/ExtensionModelTest.java
@@ -226,6 +226,7 @@ public class ExtensionModelTest {
assert jbossServer.getCategory().equals(ResourceCategory.SERVER);
assert jbossServer.getDescription().equals("JBoss Application Server Description");
assert jbossServer.getParentResourceTypes().size() == 0;
+ assert jbossServer.getChildSubCategories().size() == 2;
assert jbossServer.getChildResourceTypes().size() == 1;
ResourceType embeddedTomcatServer = jbossServer.getChildResourceTypes().iterator().next();
@@ -263,6 +264,7 @@ public class ExtensionModelTest {
assert hibernateService.getDescription().equals("Hibernate Service Description");
assert hibernateService.getChildResourceTypes().size() == 0;
assert hibernateService.getParentResourceTypes().size() == 3;
+ assert hibernateService.getSubCategory().getName().equals("Framework");
ResourceType tomcatServer = metadataManager.getType("TomcatServer", "Tomcat");
ResourceType jbossServer = metadataManager.getType("JBossASServer", "JBossAS");
diff --git a/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java
new file mode 100644
index 0000000..1cba523
--- /dev/null
+++ b/modules/core/client-api/src/test/java/org/rhq/core/clientapi/agent/metadata/test/NestedSubCategoriesMetadataParserTest.java
@@ -0,0 +1,159 @@
+ /*
+ * RHQ Management Platform
+ * Copyright (C) 2005-2008 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation, and/or the GNU Lesser
+ * General Public License, version 2.1, also as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License and the GNU Lesser General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * and the GNU Lesser General Public License along with this program;
+ * if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+package org.rhq.core.clientapi.agent.metadata.test;
+
+ import java.net.URL;
+ import java.util.List;
+ import java.util.Set;
+
+ import javax.xml.bind.JAXBContext;
+ import javax.xml.bind.Unmarshaller;
+ import javax.xml.bind.util.ValidationEventCollector;
+
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+ import org.testng.annotations.BeforeSuite;
+ import org.testng.annotations.Test;
+
+ import org.rhq.core.clientapi.agent.metadata.PluginMetadataManager;
+ import org.rhq.core.clientapi.agent.metadata.SubCategoriesMetadataParser;
+ import org.rhq.core.clientapi.descriptor.AgentPluginDescriptorUtil;
+ import org.rhq.core.clientapi.descriptor.DescriptorPackages;
+ import org.rhq.core.clientapi.descriptor.plugin.PluginDescriptor;
+ import org.rhq.core.clientapi.descriptor.plugin.ResourceDescriptor;
+ import org.rhq.core.clientapi.descriptor.plugin.ServerDescriptor;
+ import org.rhq.core.clientapi.descriptor.plugin.ServiceDescriptor;
+ import org.rhq.core.clientapi.descriptor.plugin.SubCategoryDescriptor;
+ import org.rhq.core.domain.resource.ResourceCategory;
+ import org.rhq.core.domain.resource.ResourceSubCategory;
+ import org.rhq.core.domain.resource.ResourceType;
+
+ /**
+ * @author Charles Crouch
+ * @author Heiko W. Rupp
+ */
+public class NestedSubCategoriesMetadataParserTest {
+ private static final String DESCRIPTOR_FILENAME = "test-subcategories-nested.xml";
+ private final Log LOG = LogFactory.getLog(NestedSubCategoriesMetadataParserTest.class);
+
+ private PluginDescriptor pluginDescriptor;
+
+ @BeforeSuite
+ public void loadPluginDescriptor() throws Exception {
+ try {
+ URL descriptorUrl = this.getClass().getClassLoader().getResource(DESCRIPTOR_FILENAME);
+ LOG.info("Loading plugin descriptor at: " + descriptorUrl);
+
+ JAXBContext jaxbContext = JAXBContext.newInstance(DescriptorPackages.PC_PLUGIN);
+
+ Unmarshaller unmarshaller = jaxbContext.createUnmarshaller();
+ ValidationEventCollector vec = new ValidationEventCollector();
+ unmarshaller.setEventHandler(vec);
+ pluginDescriptor = (PluginDescriptor) unmarshaller.unmarshal(descriptorUrl.openStream());
+ } catch (Throwable t) {
+ // Catch RuntimeExceptions and Errors and dump their stack trace, because Surefire will completely swallow them
+ // and throw a cryptic NPE (see http://jira.codehaus.org/browse/SUREFIRE-157)!
+ t.printStackTrace();
+ throw new RuntimeException(t);
+ }
+ }
+
+ @Test
+ public void parseSingleSubCategory() {
+ List<ServerDescriptor> servers = pluginDescriptor.getServers();
+ ServerDescriptor server0 = servers.get(0);
+ ResourceDescriptor.Subcategories subCategoriesDescriptor = server0.getSubcategories();
+ assert subCategoriesDescriptor != null : "No subcategories element: " + server0.getName();
+
+ List<SubCategoryDescriptor> subCategoryDescriptors = subCategoriesDescriptor.getSubcategory();
+
+ assert subCategoryDescriptors != null : "No subcategory elements: " + server0.getName();
+ assert !subCategoryDescriptors.isEmpty() : "No subcategory elements: " + server0.getName();
+
+ ResourceSubCategory subCat;
+
+ ResourceType resType = new ResourceType("testResType", "myplugin", ResourceCategory.SERVER, null);
+ subCat = SubCategoriesMetadataParser.getSubCategory(subCategoryDescriptors.get(0), resType);
+
+ assert subCat != null : "Null subcategory received from parser";
+ assert subCat.getName().equals("applications") : "Name not read correctly";
+ assert subCat.getDisplayName().equals("Apps") : "Display name not read correctly";
+ assert subCat.getDescription().equals("The apps.") : "Description not read correctly";
+ // getSubCategory is no longer responsible for setting resourcetype information, that is done in PluginMetadataParser
+ //assert subCat.getResourceType().equals(resType) : "ResourceType not set correctly";
+
+ }
+
+ @Test
+ public void parseNestedSubCategories() {
+ List<ServerDescriptor> servers = pluginDescriptor.getServers();
+ ServerDescriptor server2 = servers.get(1);
+ assert server2.getName().equals("testServer2");
+ ResourceDescriptor.Subcategories subCategoriesDescriptor = server2.getSubcategories();
+ assert subCategoriesDescriptor == null : "Unexpected subcategories element: " + server2.getName();
+ assert server2.getSubCategory().equals("applications");
+
+ List<ServiceDescriptor> services = pluginDescriptor.getServices();
+ ServiceDescriptor service1 = services.get(0);
+ assert service1.getName().equals("testService");
+ assert service1.getSubCategory().equals("applications");
+ }
+
+ @Test
+ public void testParseViaMetaDataManager() throws Exception {
+
+ PluginDescriptor pluginDescriptor;
+
+ URL descriptorUrl = this.getClass().getClassLoader().getResource(DESCRIPTOR_FILENAME);
+ System.out.println("Loading plugin descriptor at: " + descriptorUrl);
+
+ pluginDescriptor = (PluginDescriptor) AgentPluginDescriptorUtil.parsePluginDescriptor(descriptorUrl
+ .openStream());
+
+ PluginMetadataManager metadataManager = new PluginMetadataManager();
+ Set<ResourceType> typeSet = metadataManager.loadPlugin(pluginDescriptor);
+ assert typeSet != null : "Got no types!!";
+ assert typeSet.size()==5 : "Expected 5 types, but got " + typeSet.size();
+
+ ResourceType testService = findType(typeSet,"testService");
+ assert testService.getSubCategory().getName().equals("applications");
+
+ ResourceType testService2 = findType(typeSet,"testService2");
+ assert testService2.getSubCategory().getName().equals("applications");
+
+ ResourceType testService3 = findType(typeSet,"testService3");
+ assert testService3.getSubCategory().getName().equals("fooBar");
+
+
+ }
+
+ private ResourceType findType(Set<ResourceType> types, String name) {
+ for (ResourceType type : types ) {
+ if (type.getName().equals(name)) {
+ return type;
+ }
+ }
+ assert false : "Type with name " + name + " not found";
+ return null;
+ }
+ }
\ No newline at end of file
diff --git a/modules/core/client-api/src/test/resources/test-hibernate.xml b/modules/core/client-api/src/test/resources/test-hibernate.xml
index 37a2e03..9051ca2 100644
--- a/modules/core/client-api/src/test/resources/test-hibernate.xml
+++ b/modules/core/client-api/src/test/resources/test-hibernate.xml
@@ -3,7 +3,7 @@
package="org.rhq.plugins.test2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:xmlns:rhq-plugin">
-
+
<depends plugin="JMX" />
<depends plugin="Tomcat" />
<depends plugin="JBossAS" />
@@ -11,11 +11,13 @@
<service name="HibernateService"
discovery="HibernateDiscoveryComponent"
class="HibernateServiceComponent"
- description="Hibernate Service Description">
+ description="Hibernate Service Description"
+ subCategory="Framework"
+ >
<runs-inside>
<parent-resource-type name="TomcatServer" plugin="Tomcat"/>
<parent-resource-type name="JBossASServer" plugin="JBossAS"/>
<parent-resource-type name="EmbeddedTomcatServer" plugin="JBossAS"/>
</runs-inside>
- </service>
+ </service>
</plugin>
\ No newline at end of file
diff --git a/modules/core/client-api/src/test/resources/test-jbossas.xml b/modules/core/client-api/src/test/resources/test-jbossas.xml
index 4d2602b..8269343 100644
--- a/modules/core/client-api/src/test/resources/test-jbossas.xml
+++ b/modules/core/client-api/src/test/resources/test-jbossas.xml
@@ -12,7 +12,11 @@
discovery="JBossASDiscoveryComponent"
class="JBossASServerComponent"
description="JBoss Application Server Description">
-
+ <subcategories>
+ <subcategory name="Applications" />
+ <subcategory name="Framework" />
+ </subcategories>
+
<operation name="stop" displayName="Stop JBossAS Server" description="Kills the server" timeout="30">
<parameters>
<c:simple-property name="force"
@@ -29,7 +33,7 @@
required="true"
description="If true, the server is definitely down; otherwise, the shutdown was issued but it is unclear if it really died"/>
</results>
- </operation>
+ </operation>
<server name="EmbeddedTomcatServer"
description="Embedded Tomcat Web Server Description"
sourcePlugin="Tomcat"
@@ -37,6 +41,6 @@
discovery="JBossASTomcatDiscoveryComponent"
class="JBossASTomcatServerComponent">
</server>
-
+
</server>
</plugin>
\ No newline at end of file
diff --git a/modules/core/client-api/src/test/resources/test-subcategories-nested.xml b/modules/core/client-api/src/test/resources/test-subcategories-nested.xml
new file mode 100644
index 0000000..eb43af7
--- /dev/null
+++ b/modules/core/client-api/src/test/resources/test-subcategories-nested.xml
@@ -0,0 +1,60 @@
+<!--
+ ~ RHQ Management Platform
+ ~ Copyright (C) 2005-2013 Red Hat, Inc.
+ ~ All rights reserved.
+ ~
+ ~ This program is free software; you can redistribute it and/or modify
+ ~ it under the terms of the GNU General Public License as published by
+ ~ the Free Software Foundation version 2 of the License.
+ ~
+ ~ This program is distributed in the hope that it will be useful,
+ ~ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ ~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ ~ GNU General Public License for more details.
+ ~
+ ~ You should have received a copy of the GNU General Public License
+ ~ along with this program; if not, write to the Free Software Foundation, Inc.,
+ ~ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ -->
+
+<plugin name="TestPlugin" displayName="Mock JBoss AS" package="org.rhq.plugins.mock.jboss"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns="urn:xmlns:rhq-plugin">
+
+ <server name="testServer1">
+ <subcategories>
+ <subcategory name="applications" displayName="Apps" description="The apps."/>
+ <subcategory name="fooBar"/>
+ </subcategories>
+ </server>
+
+ <!-- subCategory="applications" means that resources of this type go in the 'applications folder' of testServer1-->
+ <server name="testServer2" subCategory="applications">
+ <runs-inside>
+ <parent-resource-type name="testServer1" plugin="TestPlugin"/>
+ </runs-inside>
+ <metric property="testMetric"/>
+ </server>
+
+ <service name="testService" subCategory="applications">
+ <runs-inside>
+ <parent-resource-type name="testServer1" plugin="TestPlugin"/>
+ </runs-inside>
+
+ </service>
+
+ <service name="testService2" subCategory="applications">
+ <runs-inside>
+ <parent-resource-type name="testServer1" plugin="TestPlugin"/>
+ </runs-inside>
+
+ </service>
+
+ <service name="testService3" subCategory="fooBar">
+ <runs-inside>
+ <parent-resource-type name="testServer1" plugin="TestPlugin"/>
+ </runs-inside>
+
+ </service>
+
+</plugin>
commit 60329fcbda5a1961e0f9285c70eb56ea12fe2eb0
Author: Heiko W. Rupp <hwr(a)redhat.com>
Date: Mon Jul 22 15:00:35 2013 +0200
Add Michael Burman as contributor
diff --git a/pom.xml b/pom.xml
index c5cf5a7..3662bc7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2353,6 +2353,10 @@
<timezone>+1</timezone>
</contributor>
<contributor>
+ <name>Michael Burman</name>
+ <timezone>+2</timezone>
+ </contributor>
+ <contributor>
<name>Torben Jäger</name>
<timezone>+1</timezone>
</contributor>
commit d7e9f5b9871824d1f02ae762b44cff85ff6c3d44
Author: Thomas Segismont <tsegismo(a)redhat.com>
Date: Tue Jul 23 10:41:12 2013 +0200
Bug 969621 - EAP 6 managed plug-in is unable to discover EAP servers when more then one is running on a single host
Update expected resource keys in itests
diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java
index 91ece92..269474b 100644
--- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java
+++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/DomainServerComponentTest.java
@@ -43,8 +43,11 @@ public class DomainServerComponentTest extends AbstractServerComponentTest {
public static final ResourceType RESOURCE_TYPE =
new ResourceType("JBossAS7 Host Controller", PLUGIN_NAME, ResourceCategory.SERVER, null);
- // The key is the server's base dir.
- public static final String RESOURCE_KEY = new File(JBOSS_HOME, "domain").getPath();
+ // The key is the server host config file
+ // hostConfig: /tmp/jboss-as-6.0.0/domain/configuration/host.xml
+ public static final String RESOURCE_KEY = "hostConfig: "
+ + new File(JBOSS_HOME, "domain" + File.separator + "configuration" + File.separator + "host.xml")
+ .getAbsolutePath();
@Override
protected ResourceType getServerResourceType() {
diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java
index d128144..182ef36 100644
--- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java
+++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/domain/SecurityModuleOptionsTest.java
@@ -1,6 +1,6 @@
/*
* RHQ Management Platform
- * Copyright (C) 2005-2011 Red Hat, Inc.
+ * Copyright (C) 2005-2013 Red Hat, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -13,8 +13,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * along with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
package org.rhq.modules.plugins.jbossas7.itest.domain;
@@ -488,10 +488,8 @@ public class SecurityModuleOptionsTest extends AbstractJBossAS7PluginTest {
InventoryManager im = pluginContainer.getInventoryManager();
Resource platform = im.getPlatform();
//host controller
- ResourceType hostControllerType = new ResourceType("JBossAS7 Host Controller", PLUGIN_NAME,
- ResourceCategory.SERVER, null);
- Resource hostController = getResourceByTypeAndKey(platform, hostControllerType,
- "/tmp/jboss-as-6.0.0/domain");
+ Resource hostController = getResourceByTypeAndKey(platform, DomainServerComponentTest.RESOURCE_TYPE,
+ DomainServerComponentTest.RESOURCE_KEY);
//profile=full-ha
ResourceType profileType = new ResourceType("Profile", PLUGIN_NAME, ResourceCategory.SERVICE, null);
String key = PROFILE;
diff --git a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java
index 8446345..32f92c7 100644
--- a/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java
+++ b/modules/plugins/jboss-as-7/src/test/java/org/rhq/modules/plugins/jbossas7/itest/standalone/StandaloneServerComponentTest.java
@@ -51,8 +51,11 @@ public class StandaloneServerComponentTest extends AbstractServerComponentTest {
public static final ResourceType RESOURCE_TYPE =
new ResourceType("JBossAS7 Standalone Server", PLUGIN_NAME, ResourceCategory.SERVER, null);
- // The key is the server's base dir.
- public static final String RESOURCE_KEY = new File(JBOSS_HOME, "standalone").getPath();
+ // The key is the server host config file
+ // hostConfig: /tmp/jboss-as-6.0.0/standalone/configuration/standalone-full-ha.xml
+ public static final String RESOURCE_KEY = "hostConfig: "
+ + new File(JBOSS_HOME, "standalone" + File.separator + "configuration" + File.separator
+ + "standalone-full-ha.xml").getAbsolutePath();
private static final String RELOAD_OPERATION_NAME = "reload";
private static final String RESTART_OPERATION_NAME = "restart";
commit 567aee7f81c6aa0f7680d4f394cccb1974705320
Author: Larry O'Leary <loleary(a)redhat.com>
Date: Mon Jul 22 16:10:09 2013 -0500
BZ 981015: Fix test failures introduced by commit 01cd91b
- findLdapUserDetails was appending baseDN twice during fallback code
- FakeLdapContext contained some lazy escaping on the mock group entries
diff --git a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java
index dad31ce..2ae6265 100644
--- a/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java
+++ b/modules/enterprise/server/itests-2/src/test/java/org/rhq/enterprise/server/test/ldap/FakeLdapContext.java
@@ -99,7 +99,6 @@ public class FakeLdapContext implements LdapContext {
try {
return new FakeNamingEnumeration<SearchResult>(ldapTestData.getSearchResults(attributes));
} catch (Exception e) {
- // TODO Auto-generated catch block
e.printStackTrace();
return null;
}
@@ -516,12 +515,12 @@ public class FakeLdapContext implements LdapContext {
attr = new BasicAttribute("member");
attr.add("cn=Robert Smith,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Cannon\\, Brett,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Charles H\\\\Samlin,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Charles H\\Samlin,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Craig \\#1 Sellers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Beverly \\+1 Balanger,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Bethany \\<Stuart\\> Wallace,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Zachory S\\; Balanger,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Allen \\\"The Hammer\\\" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Allen \"The Hammer\" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Sam Not \\= Smitherson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=\\ Billy The Kiddough\\ ,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=System/Integration API,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
@@ -557,12 +556,12 @@ public class FakeLdapContext implements LdapContext {
attr = new BasicAttribute("member");
attr.add("cn=John Smith,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Dr. Greg Hause\\, MD,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Cindy\\\\Cynthia Groober,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Cindy\\Cynthia Groober,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Biff \\# Rogers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Steven \\+2 Reed,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Lisa \\<The Great\\> Toller,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Homer J Simpsonite\\; III,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Jessica \\\"Crouching Tiger\\\" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Jessica \"Crouching Tiger\" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Hope \\= Rein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=\\ Sue Ferguson\\ ,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Phil/Susan Carlson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
@@ -598,12 +597,12 @@ public class FakeLdapContext implements LdapContext {
attr = new BasicAttribute("member");
attr.add("cn=Sheri Smith,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Walsh\\, Brad,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Jim\\\\James Kirk,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Jim\\James Kirk,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Sandra \\# Phillips,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=William Tell Overture \\+1,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Craig \\<Bison\\> Allen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Walter T Fredrick\\; The Second,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
- attr.add("cn=Stanley \\\"Short\\\" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
+ attr.add("cn=Stanley \"Short\" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=Noah \\= Sadler,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=\\ Stuart Smiley\\ ,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
attr.add("cn=System/Integration API 2,ou=users,dc=test,dc=rhq,dc=redhat,dc=com");
@@ -984,7 +983,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with backslash (\\) in 'cn' in the RHQ Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Charles H\\\\Samlin,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Charles H\\Samlin,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Cindy\\Cynthia Groober,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -1033,7 +1032,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with backslash (\\) in 'cn' in the JBoss Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Cindy\\\\Cynthia Groober,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Cindy\\Cynthia Groober,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Jim\\James Kirk,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -1082,7 +1081,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with backslash (\\) in 'cn' in the JBoss Monitor Group");
attrs.put(attr);
- sr = new SearchResult("cn=Jim\\\\James Kirk,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Jim\\James Kirk,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Craig \#1 Sellers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -1675,7 +1674,7 @@ public class FakeLdapContext implements LdapContext {
null, attrs, true);
this.add(sr);
- // dn: cn=Allen \"The Hammer\" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
+ // dn: cn=Allen "The Hammer" Callen,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
attrs = new BasicAttributes();
attr = new BasicAttribute("baseName");
@@ -1721,11 +1720,11 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with quote (\") in 'cn' in the RHQ Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Allen \\\"The Hammer\\\" Callen,ou=users", "javax.naming.directory.DirContext",
+ sr = new SearchResult("cn=Allen \"The Hammer\" Callen,ou=users", "javax.naming.directory.DirContext",
null, attrs, true);
this.add(sr);
- // dn: cn=Jessica \"Crouching Tiger\" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
+ // dn: cn=Jessica "Crouching Tiger" Mathers,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
attrs = new BasicAttributes();
attr = new BasicAttribute("baseName");
@@ -1771,11 +1770,11 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with quote (\") in 'cn' in the JBoss Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Jessica \\\"Crouching Tiger\\\" Mathers,ou=users",
+ sr = new SearchResult("cn=Jessica \"Crouching Tiger\" Mathers,ou=users",
"javax.naming.directory.DirContext", null, attrs, true);
this.add(sr);
- // dn: cn=Stanley \"Short\" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
+ // dn: cn=Stanley "Short" Mein,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
attrs = new BasicAttributes();
attr = new BasicAttribute("baseName");
@@ -1821,7 +1820,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with quote (\") in 'cn' in the JBoss Monitor Group");
attrs.put(attr);
- sr = new SearchResult("cn=Stanley \\\"Short\\\" Mein,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Stanley \"Short\" Mein,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Sam Not \= Smitherson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -2160,7 +2159,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with slash (/) in 'cn' in the RHQ Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=System/Integration API,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=System\\/Integration API,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Phil/Susan Carlson,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -2209,7 +2208,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with slash (/) in 'cn' in the JBoss Admin Group");
attrs.put(attr);
- sr = new SearchResult("cn=Phil/Susan Carlson,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=Phil\\/Susan Carlson,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=System/Integration API 2,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
@@ -2254,7 +2253,7 @@ public class FakeLdapContext implements LdapContext {
attr.add("User with slash (/) in 'cn' in the JBoss Monitor Group");
attrs.put(attr);
- sr = new SearchResult("cn=System/Integration API 2,ou=users", null, null, attrs, true);
+ sr = new SearchResult("cn=System\\/Integration API 2,ou=users", null, null, attrs, true);
this.add(sr);
// dn: cn=Lee -Fast- Croutche,ou=users,dc=test,dc=rhq,dc=redhat,dc=com
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java
index a28c709..7473321 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/resource/group/LdapGroupManagerBean.java
@@ -350,7 +350,6 @@ public class LdapGroupManagerBean implements LdapGroupManagerLocal {
if (si.isRelative()) {
userDN += "," + baseDNs[x];
}
- userDN = userDN + "," + baseDNs[x];
}
userDetails.put("dn", userDN);
commit 8c693ee685d538a28a3c42ce813b10b49997f871
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon Jul 22 16:36:23 2013 -0400
get rid of the upgrade wording, it's confusing when performing an initial install.
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
index 61c8a9c..fb9bceb 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
@@ -363,14 +363,14 @@ public abstract class AbstractInstall extends ControlCommand {
protected void startRHQServerForInstallation() throws IOException {
try {
- log.info("The RHQ Server must be started to complete its upgrade. Starting the RHQ server in preparation of running the server installer...");
+ log.info("The RHQ Server must be started to complete its installation. Starting the RHQ server in preparation of running the server installer...");
// when you unzip the distro, you are getting a fresh, unadulterated, out-of-box EAP installation, which by default listens
// to port 9999 for its native management subsystem. Make sure some other independent EAP server (or anything for that matter)
// isn't already listening to that port.
if (isPortInUse("127.0.0.1", 9999)) {
throw new IOException(
- "Something is already listening to port 9999 - shut it down before upgrading the server.");
+ "Something is already listening to port 9999 - shut it down before installing the server.");
}
Executor executor = new DefaultExecutor();
@@ -400,7 +400,7 @@ public abstract class AbstractInstall extends ControlCommand {
}
// Wait for the server to complete it's startup
- log.info("Waiting for the RHQ Server to start in preparation of running the server installer for upgrade...");
+ log.info("Waiting for the RHQ Server to start in preparation of running the server installer...");
commandLine = getCommandLine("rhq-installer", "--test");
Executor installerExecutor = new DefaultExecutor();
commit 2409ed2dcd705c58e5024182e95445431b25acf5
Author: John Sanda <jsanda(a)redhat.com>
Date: Mon Jul 22 15:16:49 2013 -0400
document the 4.8 storage patch script
diff --git a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh
index 33984d1..ae78240 100755
--- a/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh
+++ b/etc/scripts/rhq48-storage-patch/rhq48-storage-patch.sh
@@ -1,7 +1,43 @@
#!/bin/bash
+#
+# BACKGROUND:
+# This patch script needs to be run against RHQ 4.8.0 installations prior to
+# script. You do not need to run this script if upgrading from a version
+# earlier than 4.8.0.
+#
+# PREREQUISITES:
+# 1) Shut down the RHQ storage node and server.
+#
+# 2) Edit <rhq-install-dir>/rhq-storage/conf/cassandra.yaml and set the
+# following property,
+#
+# start_rpc: true
+#
+# 3) Note the value of rpc_port in cassandra.yaml. By default it is 9160 which
+# is fine.
+#
+# RUNNING THE PATCH:
+# 1) cd <patch-dir>
+#
+# 2) ./rhq48-storage-patch.sh <rhq-480-server-dir> <storage-ip-address> <thrift-port> <jmx-port>
+#
+# 3) Carefully reivew the script output for any errors.
+#
+# 4) Edit cassandra.yaml against and reset start_rpc: false
+#
+# ADDITIONAL NOTES:
+# The <jmx-port> defaults to 7299. If you are uncertain of what value to use,
+# you can find it in the UI. Log into RHQ and go to Administration --> Storage Nodes.
+#
+# If you are uncertain of the value to use for the storage node IP address, you
+# find the correct valu in the storage nodes admin UI as well.
+#
+# EXAMPLE:
+# ./rhq48-storage-patch.sh /opt/rhq-4.8.0 127.0.0.1 9160 7299
+# Usage: ./rhq48-storage-patch.sh <rhq-480-server-dir> <storage-ip-address> <thrift-port> <jmx-port>
function usage() {
- echo "Usage: $0 <rhq-server-dir> <storage-ip-address> <cql-port> <jmx-port>"
+ echo "Usage: $0 <rhq-480-server-dir> <storage-ip-address> <thrift-port> <jmx-port>"
}
if [ $# -ne 4 ]; then
@@ -16,11 +52,11 @@ fi
RHQ_SERVER_DIR=$1
CQL_HOSTNAME=$2
-CQL_PORT=$3
+THRIFT_PORT=$3
JMX_PORT=$4
export CQLSH_HOST=$2
-export CQL_PORT=$3
+export CQLSH_PORT=$3
PATCH="apache-cassandra-1.2.4-patch-1.jar"
commit c91c8f23416db836308b2bf3871fdda87559297e
Author: Stefan Negrea <snegrea(a)redhat.com>
Date: Mon Jul 22 14:12:08 2013 -0500
Update the storage node manager API for alerts to support UI functionality.
diff --git a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml
index a9ce322..58c4eda 100644
--- a/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml
+++ b/modules/enterprise/server/jar/intentional-api-changes-since-4.8.0.xml
@@ -11,8 +11,22 @@
<difference>
<className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className>
<differenceType>7012</differenceType> <!-- method added to an interface -->
+ <method>org.rhq.core.domain.util.PageList findNotAcknowledgedStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method>
+ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification>
+ </difference>
+
+ <difference>
+ <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className>
+ <differenceType>7012</differenceType> <!-- method added to an interface -->
<method>org.rhq.core.domain.util.PageList findAllStorageNodeAlerts(org.rhq.core.domain.auth.Subject)</method>
<justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification>
</difference>
+ <difference>
+ <className>org/rhq/enterprise/server/cloud/StorageNodeManagerRemote</className>
+ <differenceType>7012</differenceType> <!-- method added to an interface -->
+ <method>org.rhq.core.domain.util.PageList findAllStorageNodeAlerts(org.rhq.core.domain.auth.Subject, org.rhq.core.domain.cloud.StorageNode)</method>
+ <justification>Adding a method to a remote API interface is safe. This is newly implemented functionality.</justification>
+ </difference>
+
</differences>
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index c2a7b46..b32ab5b 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -512,12 +512,22 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
@Override
public PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject) {
- return findStorageNodeAlerts(subject, false);
+ return findStorageNodeAlerts(subject, false, null);
+ }
+
+ @Override
+ public PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject, StorageNode storageNode) {
+ return findStorageNodeAlerts(subject, false, storageNode);
}
@Override
public PageList<Alert> findAllStorageNodeAlerts(Subject subject) {
- return findStorageNodeAlerts(subject, true);
+ return findStorageNodeAlerts(subject, true, null);
+ }
+
+ @Override
+ public PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode) {
+ return findStorageNodeAlerts(subject, true, storageNode);
}
/**
@@ -527,8 +537,8 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
* @param allAlerts if [true] then return all alerts; if [false] then return only alerts that are not acknowledged
* @return alerts
*/
- private PageList<Alert> findStorageNodeAlerts(Subject subject, boolean allAlerts) {
- Integer[] resouceIdsWithAlertDefinitions = findResourcesWithAlertDefinitions();
+ private PageList<Alert> findStorageNodeAlerts(Subject subject, boolean allAlerts, StorageNode storageNode) {
+ Integer[] resouceIdsWithAlertDefinitions = findResourcesWithAlertDefinitions(storageNode);
PageList<Alert> alerts = new PageList<Alert>();
if( resouceIdsWithAlertDefinitions != null && resouceIdsWithAlertDefinitions.length != 0 ){
@@ -555,31 +565,35 @@ public class StorageNodeManagerBean implements StorageNodeManagerLocal, StorageN
return alerts;
}
- /**
- * Return resource Ids for all resources and sub-resources of Storage Nodes that
- * have alert definitions. This will be used by the resource criteria to find
- * all alerts triggered for storage nodes.
- *
- * @return
- */
- private Integer[] findResourcesWithAlertDefinitions() {
- List<Integer> resourceIdsWithAlertDefinitions = new ArrayList<Integer>();
- List<StorageNode> test2 = getStorageNodes();
+ @Override
+ public Integer[] findResourcesWithAlertDefinitions() {
+ return this.findResourcesWithAlertDefinitions(null);
+ }
+
+ @Override
+ public Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode) {
+ List<StorageNode> initialStorageNodes;
+ if (storageNode == null) {
+ initialStorageNodes = getStorageNodes();
+ } else {
+ initialStorageNodes = Arrays.asList(storageNode);
+ }
Queue<Resource> unvisitedResources = new LinkedList<Resource>();
- for (StorageNode node : test2) {
- if (node.getResource() != null) {
- unvisitedResources.add(node.getResource());
+ for (StorageNode initialStorageNode : initialStorageNodes) {
+ if (initialStorageNode.getResource() != null) {
+ unvisitedResources.add(initialStorageNode.getResource());
}
}
- while(!unvisitedResources.isEmpty()){
+ List<Integer> resourceIdsWithAlertDefinitions = new ArrayList<Integer>();
+ while (!unvisitedResources.isEmpty()) {
Resource resource = unvisitedResources.poll();
if (resource.getAlertDefinitions() != null) {
resourceIdsWithAlertDefinitions.add(resource.getId());
}
- for(Resource child: resource.getChildResources()){
+ for (Resource child : resource.getChildResources()) {
unvisitedResources.add(child);
}
}
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
index 6fca820..69b16c4 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
@@ -81,6 +81,15 @@ public interface StorageNodeManagerLocal {
PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject);
/**
+ * Fetches the list of Storage Node related alerts that have not yet been acknowledged for the
+ * specified storage node.
+ *
+ * @param subject subject
+ * @return storage nodes alerts not acknowledged
+ */
+ PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject, StorageNode storageNode);
+
+ /**
* Fetches all the Storage Node related alerts.
*
* @param subject subject
@@ -89,6 +98,39 @@ public interface StorageNodeManagerLocal {
PageList<Alert> findAllStorageNodeAlerts(Subject subject);
/**
+ * Fetches all the Storage Node related alerts for the specified storage node.
+ *
+ * @param subject subject
+ * @return all storage nodes alerts
+ */
+ PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode);
+
+
+ /**
+ * Find ids for all resources and sub-resources of Storage Nodes that
+ * have alert definitions. This can be used by the resource criteria queries to find
+ * all alerts triggered for storage nodes resources.
+ *
+ * @return resource ids
+ */
+ Integer[] findResourcesWithAlertDefinitions();
+
+ /**
+ * Find ids for all resources and sub-resources, of the specified storage node, that
+ * have alert definitions. This can be used by the resource criteria queries to find
+ * all alerts triggered for storage nodes resources.
+ *
+ * If storage node is null it find ids for all resources and sub-resources of Storage Nodes that
+ * have alert definitions. Please see {@link #findResourcesWithAlertDefinitions()} for more details.
+ *
+ * @param storageNode storage node
+ *
+ * @return resource ids
+ */
+ Integer[] findResourcesWithAlertDefinitions(StorageNode storageNode);
+
+
+ /**
* <p>Prepares the node for subsequent upgrade.</p>
* <p> CAUTION: this method will set the RHQ server to maintenance mode, RHQ storage flushes all the data to disk
* and backup of all the keyspaces is created</p>
diff --git a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java
index 7be1b07..75ac02b 100644
--- a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java
+++ b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerRemote.java
@@ -73,10 +73,27 @@ public interface StorageNodeManagerRemote {
PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject);
/**
+ * Fetches the list of Storage Node related alerts that have not yet been acknowledged for the
+ * specified storage node.
+ *
+ * @param subject subject
+ * @return storage nodes alerts not acknowledged
+ */
+ PageList<Alert> findNotAcknowledgedStorageNodeAlerts(Subject subject, StorageNode storageNode);
+
+ /**
* Fetches all the Storage Node related alerts.
*
* @param subject subject
* @return all storage nodes alerts
*/
PageList<Alert> findAllStorageNodeAlerts(Subject subject);
+
+ /**
+ * Fetches all the Storage Node related alerts for the specified storage node.
+ *
+ * @param subject subject
+ * @return all storage nodes alerts
+ */
+ PageList<Alert> findAllStorageNodeAlerts(Subject subject, StorageNode storageNode);
}
commit c6c9e50398ffb5fc6d297ceffc369975a56b3ef9
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon Jul 22 14:09:52 2013 -0400
Add windows support storage install options and resource config update. Introduce
rhq-storage-wrapper.env to hold the configurable values (mimicing somewhat
cassandra-jvm.properties) and apply the values as token replacements in
rhq-storage-wrapper.conf.
Note that cassandra-jvm.properties is still kept up to date on windows and
can be used generically, as needed.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
index 6547043..60667cc 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
@@ -91,7 +91,14 @@ public class Deployer {
applyConfigChanges(confDir, "cassandra.yaml", tokens);
applyConfigChanges(confDir, "log4j-server.properties", tokens);
applyChangesToCassandraJvmProps(confDir, deploymentOptions);
-// applyConfigChanges(confDir, "cassandra-env.sh", tokens);
+
+ // For windows, update the service wrapper env. It may not ne necessary to have updated cassandra-jvm.properties
+ // as well as this file, but for now we'll update both, leaving the former as a dependably set file.
+ if (File.separatorChar == '\\') {
+ applyChangesToWindowsServiceWrapper(deployDir);
+ }
+
+ // applyConfigChanges(confDir, "cassandra-env.sh", tokens);
}
private void applyConfigChanges(File confDir, String fileName, Map<String, String> tokens)
@@ -109,8 +116,8 @@ public class Deployer {
rhqFile.delete();
} catch (IOException e) {
log.error("An unexpected error occurred while apply configuration changes to " + filteredFile, e);
- throw new DeploymentException("An unexpected error occurred while apply configuration changes to " +
- filteredFile, e);
+ throw new DeploymentException("An unexpected error occurred while apply configuration changes to "
+ + filteredFile, e);
}
}
@@ -132,8 +139,8 @@ public class Deployer {
String javaVersion = System.getProperty("java.version");
// The check here is taken right from cassandra-env.sh
- if ((!isOpenJDK() || javaVersion.compareTo("1.6.0") > 0) ||
- (javaVersion.equals("1.6.0") && getJavaPatchVersion() > 23)) {
+ if ((!isOpenJDK() || javaVersion.compareTo("1.6.0") > 0)
+ || (javaVersion.equals("1.6.0") && getJavaPatchVersion() > 23)) {
properties.put("java_agent", "-javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar");
}
@@ -165,6 +172,29 @@ public class Deployer {
return Integer.parseInt(javaVersion.substring(startIndex + 1, javaVersion.length()));
}
+ public void applyChangesToWindowsServiceWrapper(File deployDir) throws DeploymentException {
+ File wrapperDir = new File(deployDir, "../bin/wrapper");
+ File wrapperEnvFile = new File(wrapperDir, "rhq-storage-wrapper.env");
+
+ try {
+ log.info("Applying configuration changes to " + wrapperEnvFile);
+
+ PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(wrapperEnvFile.getAbsolutePath());
+ Properties wrapperEnvProps = propertiesUpdater.loadExistingProperties();
+
+ wrapperEnvProps.setProperty("set.heap_min", "-Xms" + deploymentOptions.getHeapSize());
+ wrapperEnvProps.setProperty("set.heap_max", "-Xmx" + deploymentOptions.getHeapSize());
+ wrapperEnvProps.setProperty("set.heap_new", "-Xmn" + deploymentOptions.getHeapNewSize());
+ wrapperEnvProps.setProperty("set.thread_stack_size", "-Xss" + deploymentOptions.getStackSize());
+ wrapperEnvProps.setProperty("set.jmx_port", deploymentOptions.getJmxPort().toString());
+
+ propertiesUpdater.update(wrapperEnvProps);
+ } catch (IOException e) {
+ log.error("An error occurred while updating " + wrapperEnvFile, e);
+ throw new DeploymentException("An error occurred while updating " + wrapperEnvFile, e);
+ }
+ }
+
public void updateFilePerms() {
File deployDir = new File(deploymentOptions.getBasedir());
File binDir = new File(deployDir, "bin");
@@ -187,8 +217,8 @@ public class Deployer {
try {
authFile.delete();
- StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")),
- new FileWriter(authFile), true);
+ StreamUtil.copy(new StringReader(StringUtil.collectionToString(addresses, "\n")), new FileWriter(authFile),
+ true);
} catch (IOException e) {
throw new RuntimeException("Failed to update " + authFile);
}
diff --git a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf
index 14f2ff1..de83364 100644
--- a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf
+++ b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.conf
@@ -66,23 +66,25 @@ wrapper.app.parameter.3="-Dcassandra-foreground=yes"
# Additional JVM parameters (quotes ARE needed)
wrapper.java.additional.1="-ea"
wrapper.java.additional.2="-javaagent:"%RHQ_STORAGE_HOME%\lib\jamm-0.2.5.jar""
-wrapper.java.additional.3="-Xms1G"
-wrapper.java.additional.4="-Xmx1G"
-wrapper.java.additional.5="-XX:+HeapDumpOnOutOfMemoryError"
-wrapper.java.additional.6="-XX:+UseParNewGC"
-wrapper.java.additional.7="-XX:+UseConcMarkSweepGC"
-wrapper.java.additional.8="-XX:+CMSParallelRemarkEnabled"
-wrapper.java.additional.9="-XX:SurvivorRatio=8"
-wrapper.java.additional.10="-XX:MaxTenuringThreshold=1"
-wrapper.java.additional.11="-XX:CMSInitiatingOccupancyFraction=75"
-wrapper.java.additional.12="-XX:+UseCMSInitiatingOccupancyOnly"
-wrapper.java.additional.13="-Dcom.sun.management.jmxremote.port=7299"
-wrapper.java.additional.14="-Dcom.sun.management.jmxremote.ssl=false"
-wrapper.java.additional.15="-Dcom.sun.management.jmxremote.authenticate=false"
-wrapper.java.additional.16="-Dlog4j.configuration=log4j-server.properties"
-wrapper.java.additional.17="-Dlog4j.defaultInitOverride=true"
-
-# We want to make sure the Storage Node starts in the casandra bin directory
+wrapper.java.additional.3="%heap_min%"
+wrapper.java.additional.4="%heap_max%"
+wrapper.java.additional.5="%heap_new%"
+wrapper.java.additional.6="%heap_dump_on_OOMError%"
+wrapper.java.additional.7="%heap_dump_dir%"
+wrapper.java.additional.8="-XX:+UseConcMarkSweepGC"
+wrapper.java.additional.9="-XX:+CMSParallelRemarkEnabled"
+wrapper.java.additional.10="-XX:SurvivorRatio=8"
+wrapper.java.additional.11="-XX:MaxTenuringThreshold=1"
+wrapper.java.additional.12="-XX:CMSInitiatingOccupancyFraction=75"
+wrapper.java.additional.13="-XX:+UseCMSInitiatingOccupancyOnly"
+wrapper.java.additional.14="-XX:+UseParNewGC"
+wrapper.java.additional.15="-Dcom.sun.management.jmxremote.port=%jmx_port%"
+wrapper.java.additional.16="-Dcom.sun.management.jmxremote.ssl=false"
+wrapper.java.additional.17="-Dcom.sun.management.jmxremote.authenticate=false"
+wrapper.java.additional.18="-Dlog4j.configuration=log4j-server.properties"
+wrapper.java.additional.19="-Dlog4j.defaultInitOverride=true"
+
+# We want to make sure the Storage Node starts in the cassandra bin directory
wrapper.working.dir=%RHQ_STORAGE_HOME%/bin
#*****************************************************************************
diff --git a/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env
new file mode 100644
index 0000000..1441e0a
--- /dev/null
+++ b/modules/enterprise/server/appserver/src/main/bin-resources/bin/wrapper/rhq-storage-wrapper.env
@@ -0,0 +1,24 @@
+#*****************************************************************************
+# RHQ Storage Node Java Service Wrapper Environment Settings File
+#
+# This file specifies a set of environment variables that will be
+# applied to the Storage Node JVM.
+#
+# THIS FILE SHOULD NOT BE EDITED!
+#
+# This file represents the values managed as RHQ Storage Node resource
+# configuration values. Or, set by the installer.
+#
+#*****************************************************************************
+
+set.jmx_port=7299
+
+set.heap_min=-Xms512M
+set.heap_max=-Xms512M
+set.heap_new=-Xmn128M
+
+set.thread_stack_size=-Xss180k
+
+set.heap_dump_on_OOMError=-XX:+HeapDumpOnOutOfMemoryError
+
+set.heap_dump_dir=
diff --git a/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc b/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc
index ee0d448..6941358 100644
--- a/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc
+++ b/modules/enterprise/server/appserver/src/main/dev-resources/bin/wrapper/rhq-storage-wrapper.inc
@@ -4,11 +4,11 @@
#
# override and lower the initial memory profile
-wrapper.java.additional.18=-Xms128M
-wrapper.java.additional.19=-Xmx256M
+wrapper.java.additional.20=-Xms128M
+wrapper.java.additional.21=-Xmx256M
# enable remote debugging
-#wrapper.java.additional.20=-agentlib:jdwp=transport=dt_socket,address=8788,server=y,suspend=n
+#wrapper.java.additional.22=-agentlib:jdwp=transport=dt_socket,address=8788,server=y,suspend=n
# disable JVM startup timeout
wrapper.startup.timeout=0
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java
index 8d1771d..1667877 100644
--- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java
+++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeConfigDelegate.java
@@ -19,10 +19,17 @@ import org.rhq.core.util.StringUtil;
public class StorageNodeConfigDelegate implements ConfigurationFacet {
private File jvmOptsFile;
+ private File wrapperEnvFile;
public StorageNodeConfigDelegate(File basedir) {
File confDir = new File(basedir, "conf");
jvmOptsFile = new File(confDir, "cassandra-jvm.properties");
+
+ // for windows, config props also get propagated to the wrapper env
+ if (isWindows()) {
+ File wrapperDir = new File(basedir, "../bin/wrapper");
+ wrapperEnvFile = new File(wrapperDir, "rhq-storage-wrapper.env");
+ }
}
@Override
@@ -56,6 +63,16 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
return config;
}
+ /**
+ * Ensure that the path uses only forward slash.
+ * @param path
+ * @return forward-slashed path, or null if path is null
+ */
+ private static String useForwardSlash(String path) {
+
+ return (null != path) ? path.replace('\\', '/') : null;
+ }
+
private String getHeapMinProp(Properties properties) {
String value = properties.getProperty("heap_min");
@@ -115,47 +132,14 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
@Override
public void updateResourceConfiguration(ConfigurationUpdateReport configurationUpdateReport) {
try {
- PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmOptsFile.getAbsolutePath());
- Properties properties = propertiesUpdater.loadExistingProperties();
-
Configuration config = configurationUpdateReport.getConfiguration();
- String maxHeapSize = config.getSimpleValue("maxHeapSize");
- if (!StringUtil.isEmpty(maxHeapSize)) {
- validateHeapArg("maxHeapSize", maxHeapSize);
- // We want min and max heap to be the same
- properties.setProperty("heap_min", "-Xms" + maxHeapSize);
- properties.setProperty("heap_max", "-Xmx" + maxHeapSize);
- }
-
- String heapNewSize = config.getSimpleValue("heapNewSize");
- if (!StringUtil.isEmpty(heapNewSize)) {
- validateHeapArg("heapNewSize", heapNewSize);
- properties.setProperty("heap_new", "-Xmn" + heapNewSize);
- }
+ updateCassandraJvmProps(config);
- String threadStackSize = config.getSimpleValue("threadStackSize");
- if (!StringUtil.isEmpty(threadStackSize)) {
- validateStackArg(threadStackSize);
- properties.setProperty("thread_stack_size", "-Xss" + threadStackSize + "k");
+ if (isWindows()) {
+ updateWrapperEnv(config);
}
- PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError");
- if (heapDumpOnOMMError != null) {
- if (heapDumpOnOMMError.getBooleanValue()) {
- properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError");
- } else {
- properties.setProperty("heap_dump_on_OOMError", "");
- }
- }
-
- String heapDumpDir = config.getSimpleValue("heapDumpDir");
- if (!StringUtil.isEmpty(heapDumpDir)) {
- properties.setProperty("heap_dump_dir", heapDumpDir);
- }
-
- propertiesUpdater.update(properties);
-
configurationUpdateReport.setStatus(ConfigurationUpdateStatus.SUCCESS);
} catch (IllegalArgumentException e) {
configurationUpdateReport.setErrorMessage("No configuration update was applied: " + e.getMessage());
@@ -164,6 +148,88 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
}
}
+ private void updateCassandraJvmProps(Configuration config) throws IOException {
+ PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(jvmOptsFile.getAbsolutePath());
+ Properties properties = propertiesUpdater.loadExistingProperties();
+
+ String maxHeapSize = config.getSimpleValue("maxHeapSize");
+ if (!StringUtil.isEmpty(maxHeapSize)) {
+ validateHeapArg("maxHeapSize", maxHeapSize);
+ // We want min and max heap to be the same
+ properties.setProperty("heap_min", "-Xms" + maxHeapSize);
+ properties.setProperty("heap_max", "-Xmx" + maxHeapSize);
+ }
+
+ String heapNewSize = config.getSimpleValue("heapNewSize");
+ if (!StringUtil.isEmpty(heapNewSize)) {
+ validateHeapArg("heapNewSize", heapNewSize);
+ properties.setProperty("heap_new", "-Xmn" + heapNewSize);
+ }
+
+ String threadStackSize = config.getSimpleValue("threadStackSize");
+ if (!StringUtil.isEmpty(threadStackSize)) {
+ validateStackArg(threadStackSize);
+ properties.setProperty("thread_stack_size", "-Xss" + threadStackSize + "k");
+ }
+
+ PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError");
+ if (heapDumpOnOMMError != null) {
+ if (heapDumpOnOMMError.getBooleanValue()) {
+ properties.setProperty("heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError");
+ } else {
+ properties.setProperty("heap_dump_on_OOMError", "");
+ }
+ }
+
+ String heapDumpDir = useForwardSlash(config.getSimpleValue("heapDumpDir"));
+ if (!StringUtil.isEmpty(heapDumpDir)) {
+ properties.setProperty("heap_dump_dir", heapDumpDir);
+ }
+
+ propertiesUpdater.update(properties);
+ }
+
+ private void updateWrapperEnv(Configuration config) throws IOException {
+ PropertiesFileUpdate propertiesUpdater = new PropertiesFileUpdate(wrapperEnvFile.getAbsolutePath());
+ Properties properties = propertiesUpdater.loadExistingProperties();
+
+ String maxHeapSize = config.getSimpleValue("maxHeapSize");
+ if (!StringUtil.isEmpty(maxHeapSize)) {
+ validateHeapArg("maxHeapSize", maxHeapSize);
+ // We want min and max heap to be the same
+ properties.setProperty("set.heap_min", "-Xms" + maxHeapSize);
+ properties.setProperty("set.heap_max", "-Xmx" + maxHeapSize);
+ }
+
+ String heapNewSize = config.getSimpleValue("heapNewSize");
+ if (!StringUtil.isEmpty(heapNewSize)) {
+ validateHeapArg("heapNewSize", heapNewSize);
+ properties.setProperty("set.heap_new", "-Xmn" + heapNewSize);
+ }
+
+ String threadStackSize = config.getSimpleValue("threadStackSize");
+ if (!StringUtil.isEmpty(threadStackSize)) {
+ validateStackArg(threadStackSize);
+ properties.setProperty("set.thread_stack_size", "-Xss" + threadStackSize + "k");
+ }
+
+ PropertySimple heapDumpOnOMMError = config.getSimple("heapDumpOnOOMError");
+ if (heapDumpOnOMMError != null) {
+ if (heapDumpOnOMMError.getBooleanValue()) {
+ properties.setProperty("set.heap_dump_on_OOMError", "-XX:+HeapDumpOnOutOfMemoryError");
+ } else {
+ properties.setProperty("set.heap_dump_on_OOMError", "");
+ }
+ }
+
+ String heapDumpDir = useForwardSlash(config.getSimpleValue("heapDumpDir"));
+ if (!StringUtil.isEmpty(heapDumpDir)) {
+ properties.setProperty("set.heap_dump_dir", heapDumpDir);
+ }
+
+ propertiesUpdater.update(properties);
+ }
+
private void validateHeapArg(String name, String value) {
if (value.length() < 2) {
throw new IllegalArgumentException(value + " is not a legal value for the property [" + name + "]");
@@ -189,4 +255,8 @@ public class StorageNodeConfigDelegate implements ConfigurationFacet {
throw new IllegalArgumentException(value + " is not a legal value for the property [threadStackSize]");
}
}
+
+ private boolean isWindows() {
+ return File.separatorChar == '\\';
+ }
}
commit 969ea38e7254d61903c699380bd066d6cad3e85e
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Mon Jul 22 14:06:39 2013 -0400
When recreating win services make sure they get started appropriately.
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
index 8c885b0..61c8a9c 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/AbstractInstall.java
@@ -36,12 +36,16 @@ import java.util.Properties;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.exec.DefaultExecuteResultHandler;
import org.apache.commons.exec.DefaultExecutor;
+import org.apache.commons.exec.ExecuteException;
import org.apache.commons.exec.Executor;
import org.apache.commons.exec.PumpStreamHandler;
+
import org.jboss.as.controller.client.ModelControllerClient;
+
import org.rhq.common.jbossas.client.controller.DeploymentJBossASClient;
import org.rhq.common.jbossas.client.controller.MCCHelper;
import org.rhq.server.control.ControlCommand;
+import org.rhq.server.control.RHQControlException;
/**
* Common code for commands that perform installs. Basically shared code for Install and Upgrade commands.
@@ -55,7 +59,7 @@ public abstract class AbstractInstall extends ControlCommand {
protected final String STORAGE_CONFIG_PROP = "rhqctl.install.storage-config";
- protected void installWindowsService(File workingDir, String batFile) throws Exception {
+ protected void installWindowsService(File workingDir, String batFile, boolean start) throws Exception {
Executor executor = new DefaultExecutor();
executor.setWorkingDirectory(workingDir);
executor.setStreamHandler(new PumpStreamHandler());
@@ -69,6 +73,11 @@ public abstract class AbstractInstall extends ControlCommand {
commandLine = getCommandLine(batFile, "install");
executor.execute(commandLine);
+
+ if (start) {
+ commandLine = getCommandLine(batFile, "start");
+ executor.execute(commandLine);
+ }
}
protected void validateCustomStorageDataDirectories(CommandLine commandLine, List<String> errors) {
@@ -100,6 +109,54 @@ public abstract class AbstractInstall extends ControlCommand {
}
}
+ protected void waitForProcessToStop(String pid) throws Exception {
+
+ if (isWindows() || pid == null) {
+ // For the moment we have no better way to just wait some time
+ Thread.sleep(10 * 1000L);
+ } else {
+ int tries = 5;
+ while (tries > 0) {
+ log.debug(".");
+ if (!isUnixPidRunning(pid)) {
+ break;
+ }
+ Thread.sleep(2 * 1000L);
+ tries--;
+ }
+ if (tries == 0) {
+ throw new RHQControlException("Process [" + pid
+ + "] did not finish yet. Terminate it manually and retry.");
+ }
+ }
+
+ }
+
+ protected boolean isUnixPidRunning(String pid) {
+
+ Executor executor = new DefaultExecutor();
+ executor.setWorkingDirectory(getBinDir());
+ executor.setStreamHandler(new PumpStreamHandler());
+ org.apache.commons.exec.CommandLine commandLine;
+
+ commandLine = new org.apache.commons.exec.CommandLine("/bin/kill").addArgument("-0").addArgument(pid);
+
+ try {
+ int code = executor.execute(commandLine);
+ if (code != 0) {
+ return false;
+ }
+ } catch (ExecuteException ee) {
+ if (ee.getExitValue() == 1) {
+ // return code 1 means process does not exist
+ return false;
+ }
+ } catch (IOException e) {
+ log.error("Checking for running process failed: " + e.getMessage());
+ }
+ return true;
+ }
+
protected void waitForRHQServerToInitialize() throws Exception {
try {
final long messageInterval = 30000L;
@@ -287,7 +344,6 @@ public abstract class AbstractInstall extends ControlCommand {
log.debug("Stopping RHQ server...");
-
Executor executor = new DefaultExecutor();
executor.setWorkingDirectory(serverBinDir);
executor.setStreamHandler(new PumpStreamHandler());
diff --git a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java
index bb6aa40..0808db2 100644
--- a/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java
+++ b/modules/enterprise/server/server-control/src/main/java/org/rhq/server/control/command/Install.java
@@ -142,7 +142,7 @@ public class Install extends AbstractInstall {
if (!isStorageInstalled()) {
installStorageNode(getStorageBasedir(), commandLine);
} else if (isWindows()) {
- installWindowsService(getBinDir(), "rhq-storage");
+ installWindowsService(getBinDir(), "rhq-storage", true);
}
if (!isServerInstalled()) {
@@ -150,7 +150,7 @@ public class Install extends AbstractInstall {
runRHQServerInstaller();
waitForRHQServerToInitialize();
} else if (isWindows()) {
- installWindowsService(getBinDir(), "rhq-server");
+ installWindowsService(getBinDir(), "rhq-server", true);
}
if (!isAgentInstalled()) {
@@ -158,13 +158,15 @@ public class Install extends AbstractInstall {
File agentBasedir = getAgentBasedir();
installAgent(agentBasedir);
configureAgent(agentBasedir, commandLine);
- if (Boolean.parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true"))) {
+ boolean start = Boolean.parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true"));
+ if (start) {
startAgent(agentBasedir, true);
} else {
log.info("The agent was installed but was told not to start automatically.");
}
} else if (isWindows()) {
- installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper");
+ boolean start = Boolean.parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true"));
+ installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper", start);
}
} else {
@@ -173,7 +175,7 @@ public class Install extends AbstractInstall {
log.info("The RHQ storage node is already installed in " + new File(getBaseDir(), "storage"));
if (isWindows()) {
- installWindowsService(getBinDir(), "rhq-storage");
+ installWindowsService(getBinDir(), "rhq-storage", true);
} else {
log.info("Skipping storage node installation.");
}
@@ -200,7 +202,7 @@ public class Install extends AbstractInstall {
log.warn("The RHQ server is already installed.");
if (isWindows()) {
- installWindowsService(getBinDir(), "rhq-server");
+ installWindowsService(getBinDir(), "rhq-server", true);
} else {
log.info("Skipping server installation.");
}
@@ -217,8 +219,10 @@ public class Install extends AbstractInstall {
if (isAgentInstalled() && !commandLine.hasOption(STORAGE_OPTION)) {
log.info("The RHQ agent is already installed in [" + getAgentBasedir() + "]");
+ boolean start = Boolean
+ .parseBoolean(commandLine.getOptionValue(AGENT_AUTOSTART_OPTION, "true"));
if (isWindows()) {
- installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper");
+ installWindowsService(new File(getAgentBasedir(), "bin"), "rhq-agent-wrapper", start);
} else {
log.info("Skipping agent installation.");
}
10 years, 10 months
[rhq] 2 commits - modules/plugins
by John Sanda
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java | 15
modules/plugins/rhq-storage/pom.xml | 113 +++++
modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java | 148 ++++++
modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml | 10
modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java | 216 ++++++++++
modules/plugins/rhq-storage/src/test/resources/log4j.properties | 42 +
6 files changed, 534 insertions(+), 10 deletions(-)
New commits:
commit b537244bad778a80f6fdf92880abc245eed465ec
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Jul 23 22:23:06 2013 -0400
initial commit for StorageNodeComponentITest
This is a first stab at some integration tests for the storage plugin.
diff --git a/modules/plugins/rhq-storage/pom.xml b/modules/plugins/rhq-storage/pom.xml
index a88f56e..df79e40 100644
--- a/modules/plugins/rhq-storage/pom.xml
+++ b/modules/plugins/rhq-storage/pom.xml
@@ -10,11 +10,16 @@
<groupId>org.rhq</groupId>
<artifactId>rhq-rhqstorage-plugin</artifactId>
- <packaging>jar</packaging>
<name>RHQ Storage Plugin</name>
<description>A plugin for managing RHQ Storage Nodes</description>
+ <properties>
+ <pc.basedir>${project.build.directory}/plugin-container</pc.basedir>
+ <pc.plugins.dir>${pc.basedir}/plugins</pc.plugins.dir>
+ <pc.lib.dir>${pc.basedir}/lib</pc.lib.dir>
+ </properties>
+
<dependencies>
<dependency>
<groupId>${rhq.groupId}</groupId>
@@ -27,7 +32,6 @@
<groupId>${rhq.groupId}</groupId>
<artifactId>rhq-cassandra-plugin</artifactId>
<version>${project.version}</version>
- <!--<scope>provided</scope>-->
</dependency>
<dependency>
@@ -35,8 +39,113 @@
<artifactId>org-mc4j-ems</artifactId>
<scope>provided</scope>
</dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-ccm-core</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-platform-plugin</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-schema</artifactId>
+ <version>${project.version}</version>
+ </dependency>
</dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <phase>pre-integration-test</phase>
+ <configuration>
+ <target>
+ <property name="sigar.dir" value="${project.build.directory/sigar}"/>
+
+ <mkdir dir="${pc.basedir}"/>
+ <mkdir dir="${pc.lib.dir}"/>
+ <mkdir dir="${pc.plugins.dir}"/>
+
+ <copy file="${org.rhq:rhq-platform-plugin:jar}" todir="${pc.plugins.dir}"/>
+ <copy file="${org.rhq:rhq-jmx-plugin:jar}" todir="${pc.plugins.dir}"/>
+ <copy file="${org.rhq:rhq-cassandra-plugin:jar}" todir="${pc.plugins.dir}"/>
+ <copy file="${project.build.directory}/${project.build.finalName}.jar" todir="${pc.plugins.dir}"/>
+
+ <unzip src="${org.hyperic:sigar-dist:zip}" dest="${sigar.dir}">
+ <patternset>
+ <include name="**/lib/sigar.jar" />
+ <include name="**/lib/bcel*.jar" />
+ <include name="**/lib/*.so" />
+ <include name="**/lib/*.sl" />
+ <include name="**/lib/*.dll" />
+ <include name="**/lib/*.dylib" />
+ </patternset>
+ </unzip>
+ <move todir="${pc.lib.dir}" flatten="true">
+ <fileset dir="${sigar.dir}" includes="**/lib/*"/>
+ </move>
+ <delete dir="${sigar.dir}"/>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <version>2.13</version>
+ <executions>
+ <execution>
+ <id>integration-test</id>
+ <goals>
+ <goal>integration-test</goal>
+ </goals>
+ <configuration>
+ <includes>
+ <include>**/*ITest.java</include>
+ </includes>
+ <argLine>-Djava.library.path=${pc.lib.dir}</argLine>
+ <systemPropertyVariables>
+ <pc.plugins.dir>${pc.plugins.dir}</pc.plugins.dir>
+ </systemPropertyVariables>
+ </configuration>
+ </execution>
+ <execution>
+ <id>verify</id>
+ <goals>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <testFailureIgnore>false</testFailureIgnore>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <excludes>
+ <exclude>**/*ITest.java</exclude>
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
<profiles>
<profile>
<id>dev</id>
diff --git a/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
new file mode 100644
index 0000000..cd9f148
--- /dev/null
+++ b/modules/plugins/rhq-storage/src/test/java/org/rhq/plugins/storage/StorageNodeComponentITest.java
@@ -0,0 +1,216 @@
+package org.rhq.plugins.storage;
+
+import static java.util.Arrays.asList;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNotNull;
+
+import java.io.File;
+import java.net.InetAddress;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
+
+import org.testng.annotations.AfterSuite;
+import org.testng.annotations.BeforeSuite;
+import org.testng.annotations.Test;
+
+import org.rhq.cassandra.CassandraClusterManager;
+import org.rhq.cassandra.ClusterInitService;
+import org.rhq.cassandra.Deployer;
+import org.rhq.cassandra.DeploymentOptions;
+import org.rhq.cassandra.DeploymentOptionsFactory;
+import org.rhq.cassandra.schema.SchemaManager;
+import org.rhq.core.clientapi.server.discovery.InventoryReport;
+import org.rhq.core.domain.cloud.StorageNode;
+import org.rhq.core.domain.configuration.Configuration;
+import org.rhq.core.domain.measurement.Availability;
+import org.rhq.core.domain.measurement.AvailabilityType;
+import org.rhq.core.domain.resource.Resource;
+import org.rhq.core.domain.resource.ResourceType;
+import org.rhq.core.pc.PluginContainer;
+import org.rhq.core.pc.PluginContainerConfiguration;
+import org.rhq.core.pc.inventory.InventoryManager;
+import org.rhq.core.pc.operation.OperationContextImpl;
+import org.rhq.core.pc.operation.OperationManager;
+import org.rhq.core.pc.operation.OperationServicesAdapter;
+import org.rhq.core.pc.plugin.FileSystemPluginFinder;
+import org.rhq.core.pluginapi.operation.OperationServicesResult;
+import org.rhq.core.pluginapi.operation.OperationServicesResultCode;
+import org.rhq.core.pluginapi.util.ProcessExecutionUtility;
+import org.rhq.core.system.ProcessExecution;
+import org.rhq.core.system.ProcessExecutionResults;
+import org.rhq.core.system.SystemInfo;
+import org.rhq.core.system.SystemInfoFactory;
+
+/**
+ * @author John Sanda
+ */
+public class StorageNodeComponentITest {
+
+ private File basedir;
+
+ private Resource storageNode;
+
+ @BeforeSuite
+ public void deployStorageNodeAndPluginContainer() throws Exception {
+ basedir = new File("target", "rhq-storage");
+
+ deployStorageNode();
+
+ initPluginContainer();
+ }
+
+ private void deployStorageNode() throws Exception {
+ DeploymentOptionsFactory factory = new DeploymentOptionsFactory();
+ DeploymentOptions deploymentOptions = factory.newDeploymentOptions();
+ String address = "127.0.0.1";
+
+ deploymentOptions.setSeeds(address);
+ deploymentOptions.setListenAddress(address);
+ deploymentOptions.setRpcAddress(address);
+ deploymentOptions.setBasedir(basedir.getAbsolutePath());
+ deploymentOptions.setCommitLogDir(new File(basedir, "commit_log").getAbsolutePath());
+ deploymentOptions.setDataDir(new File(basedir, "data").getAbsolutePath());
+ deploymentOptions.setSavedCachesDir(new File(basedir, "saved_caches").getAbsolutePath());
+ deploymentOptions.setCommitLogDir(new File(basedir, "logs").getAbsolutePath());
+ deploymentOptions.setLoggingLevel("DEBUG");
+ deploymentOptions.setNativeTransportPort(9142);
+ deploymentOptions.setJmxPort(7399);
+ deploymentOptions.setHeapSize("256M");
+ deploymentOptions.setHeapNewSize("64M");
+
+ deploymentOptions.load();
+
+ Deployer deployer = new Deployer();
+ deployer.setDeploymentOptions(deploymentOptions);
+
+ deployer.unzipDistro();
+ deployer.applyConfigChanges();
+ deployer.updateFilePerms();
+ deployer.updateStorageAuthConf(Sets.newHashSet(InetAddress.getByName(address)));
+
+ File binDir = new File(basedir, "bin");
+ SystemInfo systemInfo = SystemInfoFactory.createSystemInfo();
+
+ File startScript = new File(binDir, "cassandra");
+ ProcessExecution startScriptExe = ProcessExecutionUtility.createProcessExecution(startScript);
+
+ startScriptExe.addArguments(asList("-p", "cassandra.pid"));
+ startScriptExe.setCaptureOutput(true);
+ ProcessExecutionResults results = systemInfo.executeProcess(startScriptExe);
+
+ assertEquals(results.getExitCode(), (Integer) 0, "Cassandra failed to start: " + results.getCapturedOutput());
+
+ StorageNode storageNode = new StorageNode();
+ storageNode.parseNodeInformation("127.0.0.1|7399|9142");
+
+ ClusterInitService clusterInitService = new ClusterInitService();
+ clusterInitService.waitForClusterToStart(asList(storageNode));
+
+ SchemaManager schemaManager = new SchemaManager("rhqadmin", "rhqadmin", "127.0.0.1|7399|9142");
+ schemaManager.install();
+ schemaManager.updateTopology(true);
+ }
+
+ private void initPluginContainer() {
+ PluginContainerConfiguration pcConfig = new PluginContainerConfiguration();
+ File pluginsDir = new File(System.getProperty("pc.plugins.dir"));
+ pcConfig.setPluginDirectory(pluginsDir);
+ pcConfig.setPluginFinder(new FileSystemPluginFinder(pluginsDir));
+
+ pcConfig.setInsideAgent(false);
+ PluginContainer.getInstance().setConfiguration(pcConfig);
+ PluginContainer.getInstance().initialize();
+ }
+
+ @AfterSuite
+ public void ShutdownPluginContainerAndStorageNode() throws Exception {
+ PluginContainer.getInstance().shutdown();
+ shutdownStorageNodeIfNecessary();
+ }
+
+ private void shutdownStorageNodeIfNecessary() throws Exception {
+ File binDir = new File(basedir, "bin");
+ File pidFile = new File(binDir, "cassandra.pid");
+
+ if (pidFile.exists()) {
+ CassandraClusterManager ccm = new CassandraClusterManager();
+ ccm.killNode(basedir);
+ }
+ }
+
+ @Test
+ public void discoverStorageNode() {
+ InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
+ InventoryReport inventoryReport = inventoryManager.executeServerScanImmediately();
+
+ if (inventoryReport.getAddedRoots().isEmpty()) {
+ // could be empty if the storage node is already in inventory from
+ // a prior discovery scan.
+ Resource platform = inventoryManager.getPlatform();
+ storageNode = findCassandraNode(platform.getChildResources());
+ } else {
+ storageNode = findCassandraNode(inventoryReport.getAddedRoots());
+ }
+
+ assertNotNull(storageNode, "Failed to discover Storage Node instance");
+ assertNodeIsUp("Expected " + storageNode + " to be UP after discovery");
+ }
+
+ @Test(dependsOnMethods = "discoverStorageNode")
+ public void shutdownStorageNode() throws Exception {
+ OperationManager operationManager = PluginContainer.getInstance().getOperationManager();
+ OperationServicesAdapter operationsService = new OperationServicesAdapter(operationManager);
+
+ long timeout = 1000 * 60;
+ OperationContextImpl operationContext = new OperationContextImpl(storageNode.getId());
+ OperationServicesResult result = operationsService.invokeOperation(operationContext, "shutdown",
+ new Configuration(), timeout);
+
+ assertEquals(result.getResultCode(), OperationServicesResultCode.SUCCESS, "The shutdown operation failed");
+ // TODO why is this failing?
+ assertNodeIsDown("Expected " + storageNode + " to be DOWN after shutting it down");
+ }
+
+ private void assertNodeIsUp(String msg) {
+ executeAvailabilityScan();
+
+ Availability availability = getAvailability();
+
+ assertNotNull(availability, "Unable to determine availability for " + storageNode);
+ assertEquals(availability.getAvailabilityType(), AvailabilityType.UP, msg);
+ }
+
+ private void assertNodeIsDown(String msg) {
+ executeAvailabilityScan();
+
+ Availability availability = getAvailability();
+
+ assertNotNull(availability, "Unable to determine availability for " + storageNode);
+ assertEquals(availability.getAvailabilityType(), AvailabilityType.DOWN, msg);
+ }
+
+ private Availability getAvailability() {
+ InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
+ return inventoryManager.getAvailabilityIfKnown(storageNode);
+ }
+
+ private void executeAvailabilityScan() {
+ InventoryManager inventoryManager = PluginContainer.getInstance().getInventoryManager();
+ inventoryManager.executeAvailabilityScanImmediately(false, true);
+ }
+
+ private Resource findCassandraNode(Set<Resource> resources) {
+ for (Resource resource : resources) {
+ if (isCassandraNode(resource.getResourceType())) {
+ return resource;
+ }
+ }
+ return null;
+ }
+
+ private boolean isCassandraNode(ResourceType type) {
+ return type.getPlugin().equals("RHQStorage") && type.getName().equals("RHQ Storage Node");
+ }
+
+}
diff --git a/modules/plugins/rhq-storage/src/test/resources/log4j.properties b/modules/plugins/rhq-storage/src/test/resources/log4j.properties
new file mode 100644
index 0000000..67db049
--- /dev/null
+++ b/modules/plugins/rhq-storage/src/test/resources/log4j.properties
@@ -0,0 +1,42 @@
+#
+# /*
+# * RHQ Management Platform
+# * Copyright (C) 2005-2012 Red Hat, Inc.
+# * All rights reserved.
+# *
+# * This program is free software; you can redistribute it and/or modify
+# * it under the terms of the GNU General Public License, version 2, as
+# * published by the Free Software Foundation, and/or the GNU Lesser
+# * General Public License, version 2.1, also as published by the Free
+# * Software Foundation.
+# *
+# * This program is distributed in the hope that it will be useful,
+# * but WITHOUT ANY WARRANTY; without even the implied warranty of
+# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# * GNU General Public License and the GNU Lesser General Public License
+# * for more details.
+# *
+# * You should have received a copy of the GNU General Public License
+# * and the GNU Lesser General Public License along with this program;
+# * if not, write to the Free Software Foundation, Inc.,
+# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# */
+#
+
+log4j.rootCategory=WARN, FILE, CONSOLE
+
+log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.FILE.DatePattern='.'yyyy-MM-dd
+log4j.appender.FILE.File=./target/test.log
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
+#log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n
+log4j.appender.FILE.Append=false
+
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+#log4j.appender.CONSOLE.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ABSOLUTE} %-5p %c %m%n
+
+log4j.logger.org.rhq=DEBUG
+log4j.logger.com.datastax=DEBUG
commit 83e5b228871c9a8352e98a12e0db76f8f4ea982e
Author: John Sanda <jsanda(a)redhat.com>
Date: Tue Jul 23 22:16:41 2013 -0400
first stab at prepareForBootstrap operation (which is currently broken)
This is clearly broken from some manual testing I did. Given that the
implementation is a bit sloppy at the moment, this is a good time to get some
automated tests in place. The operation will perform the following steps in the
ordered specified:
1) shut down the storage node
2) update cassandra.yaml
3) update rhq-storage-auth.conf
4) restart the node
diff --git a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
index 0037bfe..f76da22 100644
--- a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
+++ b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeComponent.java
@@ -111,7 +111,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
clusterBuilder = clusterBuilder.withCredentials(username, password);
}
- this.cassandraSession = clusterBuilder.build().connect(clusterName);
+// this.cassandraSession = clusterBuilder.build().connect(clusterName);
} catch (Exception e) {
LOG.error("Connect to Cassandra " + host + ":" + nativePort, e);
throw e;
@@ -196,7 +196,17 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
operation = storageService.getOperation("drain", emptyParams);
operation.invoke((Object[]) emptyParams);
- ProcessInfo process = context.getNativeProcess();
+ return stopNode();
+ }
+
+ protected OperationResult stopNode() {
+ ProcessInfo process = getResourceContext().getNativeProcess();
+
+ if (processInfo == null) {
+ LOG.warn("Failed to obtain process info. It appears Cassandra is already shutdown.");
+ return new OperationResult("Failed to obtain process info. It appears Cassandra is already shutdown.");
+ }
+
long pid = process.getPid();
try {
process.kill("KILL");
@@ -209,6 +219,7 @@ public class CassandraNodeComponent extends JMXServerComponent<ResourceComponent
}
}
+
protected OperationResult startNode() {
ResourceContext<?> context = getResourceContext();
Configuration pluginConfig = context.getPluginConfiguration();
diff --git a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
index 3b0aa5b..d9b35b9 100644
--- a/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
+++ b/modules/plugins/rhq-storage/src/main/java/org/rhq/plugins/storage/StorageNodeComponent.java
@@ -26,11 +26,15 @@
package org.rhq.plugins.storage;
import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.io.StringReader;
+import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
@@ -39,6 +43,8 @@ import org.mc4j.ems.connection.EmsConnection;
import org.mc4j.ems.connection.bean.EmsBean;
import org.mc4j.ems.connection.bean.attribute.EmsAttribute;
import org.mc4j.ems.connection.bean.operation.EmsOperation;
+import org.yaml.snakeyaml.DumperOptions;
+import org.yaml.snakeyaml.Yaml;
import org.rhq.core.domain.configuration.Configuration;
import org.rhq.core.domain.configuration.ConfigurationUpdateStatus;
@@ -48,6 +54,7 @@ import org.rhq.core.domain.configuration.PropertyMap;
import org.rhq.core.domain.configuration.PropertySimple;
import org.rhq.core.pluginapi.configuration.ConfigurationFacet;
import org.rhq.core.pluginapi.configuration.ConfigurationUpdateReport;
+import org.rhq.core.pluginapi.inventory.ResourceContext;
import org.rhq.core.pluginapi.operation.OperationFacet;
import org.rhq.core.pluginapi.operation.OperationResult;
import org.rhq.core.util.StringUtil;
@@ -96,6 +103,8 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
return updateConfiguration(parameters);
} else if (name.equals("updateKnownNodes")) {
return updateKnownNodes(parameters);
+ } else if (name.equals("prepareForBootstrap")) {
+ return prepareForBootstrap(parameters);
} else {
return super.invokeOperation(name, parameters);
}
@@ -132,6 +141,18 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
ipAddresses.add(propertySimple.getStringValue());
}
+ if (updateAuthFile(result, ipAddresses)) return result;
+
+ EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator");
+ EmsOperation emsOperation = authBean.getOperation("reloadConfiguration");
+ emsOperation.invoke();
+
+ result.setSimpleResult("Successfully updated the set of known nodes.");
+
+ return result;
+ }
+
+ private boolean updateAuthFile(OperationResult result, Set<String> ipAddresses) {
log.info("Updating known nodes to " + ipAddresses);
File confDir = new File(getBasedir(), "conf");
@@ -150,7 +171,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
log.error(msg);
result.setErrorMessage(msg);
- return result;
+ return true;
}
}
@@ -161,7 +182,7 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
"to unexpected error";
log.error(msg, e);
result.setErrorMessage(msg + ": " + ThrowableUtil.getRootMessage(e));
- return result;
+ return true;
}
try {
@@ -176,18 +197,127 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
}
result.setErrorMessage("There was an unexpected error while updating " + authFile + ". Make sure that " +
"it matches " + authBackupFile + " and then reschedule the operation.");
+ return true;
+ }
+ return false;
+ }
+
+ private OperationResult prepareForBootstrap(Configuration params) {
+ log.info("Preparing " + this + " for bootstrap...");
+
+ ResourceContext context = getResourceContext();
+ OperationResult result = new OperationResult();
+
+ log.info("Stopping storage node");
+ OperationResult stopNodeResult = stopNode();
+ if (stopNodeResult.getErrorMessage() != null) {
+ log.error("Failed to stop storage node " + this + " Cannot prepare the node for bootstrap which means " +
+ "that the storage node cannot join the cluster. Make sure the storage node is not running and retry " +
+ "the operation");
+ result.setErrorMessage("Failed to stop storage node. Cannot prepare the node for bootstrap which means " +
+ "that it cannot join the cluster. Make sure that the node is not running and retry the operation. " +
+ "Stopping the storage node failed with this error: " + stopNodeResult.getErrorMessage());
return result;
}
- EmsBean authBean = getEmsConnection().getBean("org.rhq.cassandra.auth:type=RhqInternodeAuthenticator");
- EmsOperation emsOperation = authBean.getOperation("reloadConfiguration");
- emsOperation.invoke();
+ Configuration pluginConfig = context.getPluginConfiguration();
+ String yamlProp = pluginConfig.getSimpleValue("yamlConfiguration");
+ File yamlFile = new File(yamlProp);
- result.setSimpleResult("Successfully updated the set of known nodes.");
+ DumperOptions options = new DumperOptions();
+ options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
+ Yaml yaml = new Yaml(options);
+
+ Map yamlConfig = null;
+ try {
+ yamlConfig = (Map) yaml.load(new FileInputStream(yamlFile));
+ } catch (FileNotFoundException e) {
+ log.error("Failed to load " + yamlFile, e);
+ log.error("Cannot prepare " + this + " for bootstrap. " + yamlFile + " must exist in order to make the " +
+ "necessary configuration changes.");
+ result.setErrorMessage("Cannot prepare storage node for bootstrap. It appears that " + yamlFile +
+ " does not exist. Make sure that it exists so that the necessary configuration changes can be made.");
+
+ return result;
+ }
+
+ purgeDir(getCommitLogDir(yamlConfig));
+ for (File dataDir : getDataDirs(yamlConfig)) {
+ purgeDir(dataDir);
+ }
+ purgeDir(getSavedCachesDir(yamlConfig));
+
+ log.info("Updating cluster settings");
+
+ String address = pluginConfig.getSimpleValue("host");
+ List<String> seeds = getAddresses(params.getList("storageNodeIPAddresses"));
+ // Make sure this node's address is not in the list; otherwise, it
+ // won't bootstrap properly.
+ seeds.remove(address);
+ try {
+ updateSeedsList(seeds);
+ } catch (IOException e) {
+ log.error("Failed to update seeds property in " + yamlFile, e);
+ result.setErrorMessage("Failed to prepared node for bootstrap due to unexpected error that occurred " +
+ "while updating seeds property in " + yamlFile + ":\n" + ThrowableUtil.getAllMessages(e));
+ return result;
+ }
+
+ if (updateAuthFile(result, new HashSet<String>(seeds))) {
+ return result;
+ }
+
+ int cqlPort = Integer.parseInt(params.getSimpleValue("cqlPort"));
+ int gossipPort = Integer.parseInt(params.getSimpleValue("gossipPort"));
+
+ yamlConfig.put("native_transport_port", cqlPort);
+ yamlConfig.put("storage_port", gossipPort);
+
+ try {
+ yaml.dump(yamlConfig, new FileWriter(yamlFile));
+ } catch (IOException e) {
+ log.error("Could not update cluster settings in " + yamlFile, e);
+ result.setErrorMessage("Could not update cluster settings in " + yamlFile + ":\n" +
+ ThrowableUtil.getAllMessages(e));
+ return result;
+ }
+
+ log.info(this + " is ready to be bootstrap. Restarting storage node...");
+ OperationResult startResult = startNode();
+ if (startResult.getErrorMessage() != null) {
+ log.error("Failed to restart storage node:\n" + startResult.getErrorMessage());
+ result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage());
+ } else {
+ result.setSimpleResult("The storage node was succesfully updated is now bootstrapping into the cluster.");
+ }
return result;
}
+ private void purgeDir(File dir) {
+ log.info("Purging " + dir);
+ FileUtil.purge(dir, true);
+ }
+
+ private File getCommitLogDir(Map yamlConfig) {
+ return new File((String) yamlConfig.get("commitlog_directory"));
+ }
+
+ private List<File> getDataDirs(Map yamlConfig) {
+ List<File> dirs = new ArrayList<File>();
+ List<String> dirNames = (List<String>) yamlConfig.get("data_file_directories");
+
+ for (String dirName : dirNames) {
+ dirs.add(new File(dirName));
+ }
+
+ return dirs;
+ }
+
+ private File getSavedCachesDir(Map yamlConfig) {
+ return new File((String) yamlConfig.get("saved_caches_directory"));
+ }
+
private OperationResult nodeAdded(Configuration params) {
boolean runRepair = params.getSimple("runRepair").getBooleanValue();
boolean updateSeedsList = params.getSimple("updateSeedsList").getBooleanValue();
@@ -405,4 +535,10 @@ public class StorageNodeComponent extends CassandraNodeComponent implements Oper
boolean succeeded;
String details;
}
+
+ @Override
+ public String toString() {
+ return StorageNodeComponent.class.getSimpleName() + "[resourceKey: " + getResourceContext().getResourceKey() +
+ "]";
+ }
}
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
index 1e39d6c..cd84de6 100644
--- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
@@ -101,6 +101,16 @@
</parameters>
</operation>
+ <operation name="prepareForBootstrap">
+ <parameters>
+ <c:simple-property name="cqlPort" type="integer" displayName="CQL Port"/>
+ <c:simple-property name="gossipPort" type="integer"/>
+ <c:list-property name="storageNodeIPAddresses" displayName="Storage Node IP Addresses">
+ <c:simple-property name="storageNodeIPAddress"/>
+ </c:list-property>
+ </parameters>
+ </operation>
+
<operation name="prepareForUpgrade" description="Prepares the storage node for upgrade (this operation consists of following steps: 1) turning off the RPC server, 2) turning off the gossiper, 3) taking the snapshot (backuping the data), 4) invoking the drain operation">
<parameters>
<c:simple-property name="snapshotName" required="false" type="string" displayName="Snapshot Name"
10 years, 10 months
[rhq] Changes to 'feature/bundle-group'
by Jay Shaughnessy
New branch 'feature/bundle-group' available with the following commits:
commit da7b8bf04aab224fcce8c613fb1dee74c62186d1
Author: Jay Shaughnessy <jshaughn(a)redhat.com>
Date: Tue Jul 23 17:37:15 2013 -0400
Start setting up some infrastructure for Bundle Groups and associated
permissions.
10 years, 10 months
[rhq] Branch 'mtho11/consolidated-metrics' - modules/enterprise
by mike thompson
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
New commits:
commit b7a901b43c29d0d0bdd8e883443b483b86039f18
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Tue Jul 23 13:52:13 2013 -0700
Fixed Global Exception in dashboard portlet chart when chartContext.data is null.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java
index eeb97cf..67552d3 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/common/graph/graphtype/StackedBarMetricGraphImpl.java
@@ -694,7 +694,7 @@ public class StackedBarMetricGraphImpl extends AbstractMetricGraph {
}; // end public closure
}();
- if(typeof chartContext.data !== 'undefined' && chartContext.data.length > 0){
+ if(typeof chartContext.data !== 'undefined' && chartContext.data !== null && chartContext.data.length > 0){
metricStackedBarGraph.draw(chartContext);
}
10 years, 10 months
[rhq] Branch 'mtho11/consolidated-metrics' - modules/enterprise
by mike thompson
modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java | 32 ++++++++--
1 file changed, 28 insertions(+), 4 deletions(-)
New commits:
commit 5f701d641746c6dfc69d3ce8dd93afd82b52c768
Author: Mike Thompson <mithomps(a)redhat.com>
Date: Tue Jul 23 13:09:38 2013 -0700
Consolidated Metrics - Implement manual expand/collapse row collection as redraws forget expanded rows. Expanded rows are now preserved across refreshes.
diff --git a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java
index b837af1..8797d59 100644
--- a/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java
+++ b/modules/enterprise/gui/coregui/src/main/java/org/rhq/enterprise/gui/coregui/client/inventory/resource/detail/monitoring/table/MetricsTableView.java
@@ -40,12 +40,12 @@ import com.smartgwt.client.widgets.events.CloseClickHandler;
import com.smartgwt.client.widgets.grid.ListGrid;
import com.smartgwt.client.widgets.grid.ListGridField;
import com.smartgwt.client.widgets.grid.ListGridRecord;
+import com.smartgwt.client.widgets.grid.events.DataArrivedEvent;
+import com.smartgwt.client.widgets.grid.events.DataArrivedHandler;
import com.smartgwt.client.widgets.grid.events.RecordCollapseEvent;
import com.smartgwt.client.widgets.grid.events.RecordCollapseHandler;
import com.smartgwt.client.widgets.grid.events.RecordExpandEvent;
import com.smartgwt.client.widgets.grid.events.RecordExpandHandler;
-import com.smartgwt.client.widgets.grid.events.SelectionChangedHandler;
-import com.smartgwt.client.widgets.grid.events.SelectionEvent;
import com.smartgwt.client.widgets.grid.events.SortChangedHandler;
import com.smartgwt.client.widgets.grid.events.SortEvent;
import com.smartgwt.client.widgets.layout.VLayout;
@@ -86,6 +86,8 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re
private final MeasurementUserPreferences measurementUserPrefs;
private final Menu addToDashboardMenu;
+ Set<Integer> expandedRows = new HashSet<Integer>();
+
public MetricsTableView(Resource resource, AbstractD3GraphListView abstractD3GraphListView) {
super();
this.resource = resource;
@@ -106,7 +108,7 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re
*/
@Override
protected ListGrid createListGrid() {
- return new MetricsTableListGrid(resource, addToDashboardMenu);
+ return new MetricsTableListGrid(this, resource, addToDashboardMenu);
}
protected void configureTable() {
@@ -255,18 +257,24 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re
private static final int NUM_METRIC_POINTS = 60;
private Resource resource;
final private Menu addToDashboardMenu;
+ final MetricsTableView metricsTableView;
- public MetricsTableListGrid(final Resource resource, final Menu dashboardMenu) {
+ public MetricsTableListGrid(final MetricsTableView metricsTableView, final Resource resource,
+ final Menu dashboardMenu) {
super();
this.resource = resource;
this.addToDashboardMenu = dashboardMenu;
this.addToDashboardMenu.disable();
+ this.metricsTableView = metricsTableView;
setCanExpandRecords(true);
setCanExpandMultipleRecords(true);
setExpansionMode(ExpansionMode.DETAIL_FIELD);
+
addRecordExpandHandler(new RecordExpandHandler() {
@Override
public void onRecordExpand(RecordExpandEvent recordExpandEvent) {
+ metricsTableView.expandedRows.add(recordExpandEvent.getRecord().getAttributeAsInt(
+ MetricsViewDataSource.FIELD_METRIC_DEF_ID));
redrawGraphs();
}
@@ -274,6 +282,8 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re
addRecordCollapseHandler(new RecordCollapseHandler() {
@Override
public void onRecordCollapse(RecordCollapseEvent recordCollapseEvent) {
+ metricsTableView.expandedRows.remove(recordCollapseEvent.getRecord().getAttributeAsInt(
+ MetricsViewDataSource.FIELD_METRIC_DEF_ID));
redrawGraphs();
}
});
@@ -283,9 +293,23 @@ public class MetricsTableView extends Table<MetricsViewDataSource> implements Re
redrawGraphs();
}
});
+ addDataArrivedHandler(new DataArrivedHandler() {
+ @Override
+ public void onDataArrived(DataArrivedEvent dataArrivedEvent) {
+ int startRow = dataArrivedEvent.getStartRow();
+ int endRow = dataArrivedEvent.getEndRow();
+ for (int i = startRow; i < endRow; i++) {
+ if (metricsTableView.expandedRows.contains(getRecord(i).getAttributeAsInt(
+ MetricsViewDataSource.FIELD_METRIC_DEF_ID))) {
+ expandRecord(getRecord(i));
+ }
+ }
+ }
+ });
}
+
@Override
protected Canvas getExpansionComponent(final ListGridRecord record) {
final Integer definitionId = record.getAttributeAsInt(MetricsViewDataSource.FIELD_METRIC_DEF_ID);
10 years, 10 months