[PATCH configure] disable packages / repos installation by default
by Mo Morsi
---
bin/aeolus-configure | 2 +-
recipes/aeolus_recipe/manifests/aeolus.pp | 2 ++
2 files changed, 3 insertions(+), 1 deletions(-)
diff --git a/bin/aeolus-configure b/bin/aeolus-configure
index 5360260..5a6319a 100644
--- a/bin/aeolus-configure
+++ b/bin/aeolus-configure
@@ -1,6 +1,6 @@
#!/bin/sh
export FACTER_AEOLUS_ENABLE_SECURITY=false
-export FACTER_AEOLUS_ENABLE_PACKAGES=true
+export FACTER_AEOLUS_ENABLE_PACKAGES=false
puppet /usr/share/aeolus-configure/aeolus_recipe.pp \
--modulepath=/usr/share/aeolus-configure/modules/
diff --git a/recipes/aeolus_recipe/manifests/aeolus.pp b/recipes/aeolus_recipe/manifests/aeolus.pp
index fec4265..17db402 100644
--- a/recipes/aeolus_recipe/manifests/aeolus.pp
+++ b/recipes/aeolus_recipe/manifests/aeolus.pp
@@ -38,6 +38,7 @@ class aeolus {
$pulp_base_url_release = $operatingsystem ? { 'fedora' => "fedora-13",
'redhat' => 'rhel5' }
+ if $enable_packages {
yumrepo{"${name}_arch":
name => "${name}_arch",
descr => "${name}_arch",
@@ -48,6 +49,7 @@ class aeolus {
descr => "${name}_noarch",
baseurl => "http://repos.fedorapeople.org/repos/aeolus/packages/${base_url_release}/n...",
enabled => 1, gpgcheck => 0}
+ }
}
--
1.7.2.3
13 years
OZ Development & Custom Partitions
by Jeff VanDellen
Hello,
My name is jeff vandellen I am a systems engineer for LiquidWeb Inc. I
am interested in contributing to development of OZ, additionally I
have a few questions I was wondering if somebody could answer for me.
Thanks in advance
1.) Can custom partitions be setup when doing an oz-install
2.) Is there anymore documentation out there that you could provide for me?
Thanks again.
13 years
[PATCH] task #1104: handle condor log rotations in dbomatic
by Mo Morsi
---
src/dbomatic/dbomatic | 8 ++++++++
1 files changed, 8 insertions(+), 0 deletions(-)
diff --git a/src/dbomatic/dbomatic b/src/dbomatic/dbomatic
index 1cd1229..3ab9a4f 100755
--- a/src/dbomatic/dbomatic
+++ b/src/dbomatic/dbomatic
@@ -248,6 +248,14 @@ class CondorEventLog < Nokogiri::XML::SAX::Document
end
def parse_log_file(log_file, parser)
+ # if the log file has been rotated out (condor does this internally)
+ if log_file.pos > File.size(CONDOR_EVENT_LOG_FILE)
+ # since the original file descriptor is pointing at the log
+ # just rotated out, we need to close and reopen
+ log_file.close
+ log_file = File.open(CONDOR_EVENT_LOG_FILE)
+ end
+
while s = log_file.gets
parser << s
end
--
1.7.2.3
13 years
[PATCH conductor 1/2] Validate pools deletion
by Tomas Sedovic
From: Tomas Sedovic <tsedovic(a)redhat.com>
This fixes Redmine #1098
Pools can be deleted only if they either don't have any instances associated
with them or if all those instances are stopped and stateless.
This also provides a dummy check to see whether an instance is stateful or
stateless. Currently, we only support stateless instances, but when we modify
the instance.restartable? method later on, nothing should break.
---
src/app/controllers/resources/pools_controller.rb | 20 ++++++++++++++++++--
src/app/models/instance.rb | 7 +++++++
src/app/models/pool.rb | 6 ++++++
src/config/locales/en.yml | 10 +++++++++-
src/config/navigation.rb | 2 +-
5 files changed, 41 insertions(+), 4 deletions(-)
diff --git a/src/app/controllers/resources/pools_controller.rb b/src/app/controllers/resources/pools_controller.rb
index 337a1b8..04fdf41 100644
--- a/src/app/controllers/resources/pools_controller.rb
+++ b/src/app/controllers/resources/pools_controller.rb
@@ -73,16 +73,32 @@ class Resources::PoolsController < ApplicationController
end
def multi_destroy
+ destroyed = []
+ failed = []
+ error_messages = []
Pool.find(params[:pools_selected]).each do |pool|
# FIXME: remove this check when pools can be assigned to new users
# default_pool cannot be deleted because metadata object has it tied
# to id of 1 and deleting it prevents new users from being created
if pool.id == MetadataObject.lookup("self_service_default_pool").id
- flash[:notice] = "The default pool cannot be deleted"
+ error_messages << "The default pool cannot be deleted"
+ elsif check_privilege(Privilege::MODIFY, pool) && pool.destroyable?
+ pool.destroy
+ destroyed << pool.name
else
- pool.destroy if check_privilege(Privilege::MODIFY, pool)
+ failed << pool.name
end
end
+
+ unless destroyed.empty?
+ flash[:notice] = t('pools.index.pool_deleted', :count => destroyed.length, :list => destroyed.join(', '))
+ end
+ unless failed.empty?
+ error_messages << t('pools.index.pool_not_deleted', :count => failed.length, :list => failed.join(', '))
+ end
+ unless error_messages.empty?
+ flash[:error] = error_messages.join('<br />')
+ end
redirect_to resources_pools_url
end
diff --git a/src/app/models/instance.rb b/src/app/models/instance.rb
index 1841ecc..cd7d400 100644
--- a/src/app/models/instance.rb
+++ b/src/app/models/instance.rb
@@ -267,6 +267,13 @@ class Instance < ActiveRecord::Base
return stats
end
+ def restartable?
+ # TODO: we don't support stateful instances yet, so it's `false` for the time being.
+ # In the meantime, we can use this method to write validation code for cases
+ # where does matter whether an instance is stateful or stateless.
+ false
+ end
+
named_scope :with_hardware_profile, lambda {
{:include => :hardware_profile}
}
diff --git a/src/app/models/pool.rb b/src/app/models/pool.rb
index ebee420..c4081ec 100644
--- a/src/app/models/pool.rb
+++ b/src/app/models/pool.rb
@@ -60,6 +60,8 @@ class Pool < ActiveRecord::Base
:include => [:role],
:order => "permissions.id ASC"
+ before_destroy :destroyable?
+
def cloud_accounts
accounts = []
instances.each do |instance|
@@ -73,4 +75,8 @@ class Pool < ActiveRecord::Base
HardwareProfile.find(:all, :conditions => {:provider_id => nil})
end
+ def destroyable?
+ instances.all? {|i| i.state == Instance::STATE_STOPPED and not i.restartable? }
+ end
+
end
diff --git a/src/config/locales/en.yml b/src/config/locales/en.yml
index 7510a05..677af3b 100644
--- a/src/config/locales/en.yml
+++ b/src/config/locales/en.yml
@@ -69,7 +69,6 @@ en:
choose_treatment: Choose Treatment
apply: Apply
resource_management: Resource Management
- pools: Pools
deployments: Deployments
instances: Instances
searches: Searches
@@ -83,6 +82,15 @@ en:
provider_accounts_item: Provider Account
cloud_engine_hardware_profiles: Hardware Profiles
cloud_engine_realms: Realms
+ pools:
+ index:
+ pools: Pools
+ pool_deleted:
+ one: "Pool %{list} was deleted."
+ other: "Pools %{list} were deleted."
+ pool_not_deleted:
+ one: "Pool %{list} was not deleted. There are instances associated with it."
+ other: "Pools %{list} were not deleted. They have instances associated with them."
pool_families:
pool_families: Pool Families
index:
diff --git a/src/config/navigation.rb b/src/config/navigation.rb
index b6944a5..78a7862 100644
--- a/src/config/navigation.rb
+++ b/src/config/navigation.rb
@@ -2,7 +2,7 @@ SimpleNavigation::Configuration.run do |navigation|
navigation.autogenerate_item_ids = false
navigation.items do |first_level|
first_level.item :resource_management, t(:resource_management), resources_pools_path, :highlights_on => /^\/$/ do |second_level|
- second_level.item :pools, t(:pools), resources_pools_path
+ second_level.item :pools, t('pools.index.pools'), resources_pools_path
second_level.item :deployments, t(:deployments),resources_deployments_path, :highlights_on => /^\/$|\/deployments/
second_level.item :instances, t(:instances), resources_instances_path
end
--
1.7.4.2
13 years
Imported Image But Instance Failed to start on RHEV
by Pradeep Subramainan
Hi all,
I am setting up a conductor on RHEL6.1 64 Bit virtual machine to
integrate with the RHEV2.2 . During my initial testing am able to import
the RHEL6 image(rhel6 template created on RHEV) from the RHEV using
Image Factory,
But when i try to create and launch from resource management of CE am
getting the below error under properties of the created instance from
the template.
PROPERTIES FOR CE-VM-1-PSUBRAMA
Name CE-VM-1-psubrama
Status error
Public Addresses
Private Addresses
Operating system unknown
Provider
Base Template RHEL6-Template_template
Architecture x86_64
Memory 1
Storage 1
Instantiation Time 11-Apr-2011 05:45:30
Uptime Error, could not calculate state time: state is not monitored
Current Alerts 0
Console Connection via SSH
Owner aeolus user
Shared to N/A
Error Cannot expand $$(keypair).
And running condor -q -better on cloud engine returns below
[root@cloudenginevm1 ~]# condor_q -better
-- Submitter: cloudenginevm1.rhev.in : <10.65.70.27:60501> :
cloudenginevm1.rhev.in
---
001.000: Request is held.
Hold reason: Cannot expand $$(keypair).
---
002.000: Request is held.
Hold reason: Cannot expand $$(keypair).
Note: I used this
http://repos.fedorapeople.org/repos/aeolus/packages/rhel-6/$basearch/
repo for aeolus condctor install, and the virtual where this CE
installed is actually upgraded from RHEL 6.0 to 6.1 .
Can any one point me what am missing .
--
==============================================================
Pradeep Subramanian RHCE,RHCVA
Technical Consultant - Global Professional Services
Email: pradeep(a)redhat.com
(Cell Phone): +91 9321856769 (Direct Phone): +91 22 39878843
Red Hat Consulting http://www.redhat.com/consulting
==============================================================
Red Hat, Inc. | B-304, Delphi | Hiranandani Business Park, Powai, Mumbai | 400 076
---
Learn how Red Hat Enterprise Virtualization can help you virtualize your critical enterprise server workloads with unbeatable performance, unbreakable security, and the lowest cost in class.
http://www.redhat.com/virtualization/rhev/server/
Read My RHEV Blog: http://rcritical.blogspot.com/
13 years
Added Logging to File for aeolus-*
by Martyn Taylor
We could place some of this stuff in the puppet config file, but we would still need to override log for separation of each service log.
Do we still need to log to console? In my opinion we do not. However, I have kept logging to console in, since there is no mention of removing it
We might want to revisit how we treat logging, when puppet is responsible for configuring aeolus on >1 machine. We may need to do some puppet reporting magic, however, I think basic logging is sufficient.
Cheers,
Martyn
13 years
Testing Deltacloud API guide
by Michal Fojtik
Hi,
Today I added comprehensive set of Rake tasks you can use for testing various
DC API backend providers functionality:
rake cucumber # Call our Cucumber suite
rake cucumber:ec2:features # Run Cucumber features
rake cucumber:ec2:junit # Run Cucumber features
rake cucumber:ec2:test # Run Cucumber features
rake cucumber:mock:features # Run Cucumber features
rake cucumber:mock:junit # Run Cucumber features
rake cucumber:mock:test # Run Cucumber features
rake cucumber:sbc:features # Run Cucumber features
rake cucumber:sbc:junit # Run Cucumber features
rake cucumber:sbc:test # Run Cucumber features
rake test # Call our Test::Unit suite
rake test:mock # Run tests for mock
rake test:rackspace # Run tests for rackspace
rake test:rhevm # Run tests for rhevm
I hope task names are mostly self-descriptive, but:
rake cucumber:ec2:features # => Will produce nice HTML report
rake cucumber:ec2:junit # => Food for Apache Hudson
rake cucumber:ec2:test # => Console output
To execute everything by 'one-click' (actually two clicks ;-) you can do:
$ cd core/server
$ rake test
$ rake cucumber
FYI: To install all dependencies for running unit tests, you can use integrated
bundler using: $ cd core/server && bundle install
After this command all test suites should work.
IMPORTANT: Please give it a try before you posting patch or some change :-)
-- Michal
------------------------------------------------------
Michal Fojtik, mfojtik(a)redhat.com
Deltacloud API: http://deltacloud.org
13 years
[PATCH configure] bz #693369: permit http proxy before attempting to use conductor interface
by Mo Morsi
right now nothing in configure requires selinux to be permissive
before running, and thus selinux could still be enforcing, preventing
the aeolus seed data from being created.
if any other components require selinux to be permissive, we should
create policy exceptions for those operations and remove the selinux
permissive bits
---
recipes/aeolus_recipe/manifests/conductor.pp | 2 +-
recipes/apache/manifests/init.pp | 9 +++++++--
2 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/recipes/aeolus_recipe/manifests/conductor.pp b/recipes/aeolus_recipe/manifests/conductor.pp
index cd2934a..86c1210 100644
--- a/recipes/aeolus_recipe/manifests/conductor.pp
+++ b/recipes/aeolus_recipe/manifests/conductor.pp
@@ -265,7 +265,7 @@ define aeolus::conductor::login($user,$password){
-d commit=submit \
-c /tmp/aeolus-${user}.cookie",
onlyif => "/usr/bin/test ! -f /tmp/aeolus-${user}.cookie || \"\" == \"`curl -X GET http://localhost/conductor -b /tmp/aeolus-${user}.cookie -i --silent | grep 'HTTP/1.1 200'`\"",
- require => Service['aeolus-conductor']}
+ require => Service['aeolus-conductor', 'httpd']}
}
define aeolus::conductor::logout($user){
diff --git a/recipes/apache/manifests/init.pp b/recipes/apache/manifests/init.pp
index 80d8980..79f15f9 100644
--- a/recipes/apache/manifests/init.pp
+++ b/recipes/apache/manifests/init.pp
@@ -9,12 +9,17 @@ class apache {
package { "mod_ssl": ensure => installed }
}
+ # if selinux is enabled and we want to use mod_proxy, we need todo this
+ exec{'permit-http-networking':
+ command => '/usr/sbin/setsebool httpd_can_network_connect 1',
+ logoutput => true }
+
service { "httpd":
ensure => running,
- require => Package["httpd"],
+ require => [Package["httpd"], Exec['permit-http-networking']],
hasrestart => true,
hasstatus => true,
- enable => true,
+ enable => true
}
exec { "reload-apache":
--
1.7.2.3
13 years
[PATCH conductor] task 1104: handle condor log rotations in dbomatic (rev 6)
by Mo Morsi
---
src/dbomatic/dbomatic | 74 +++++++++++++++++++++++++-----------------------
1 files changed, 39 insertions(+), 35 deletions(-)
diff --git a/src/dbomatic/dbomatic b/src/dbomatic/dbomatic
index 1cd1229..e7f29b0 100755
--- a/src/dbomatic/dbomatic
+++ b/src/dbomatic/dbomatic
@@ -70,8 +70,9 @@ if help
exit(0)
end
-CONDOR_EVENT_LOG_FILE = "#{condor_event_log_dir}/EventLog"
-EVENT_LOG_POS_FILE = "#{dbomatic_run_dir}/event_log_position"
+CONDOR_EVENT_LOG_FILE = "#{condor_event_log_dir}/EventLog"
+CONDOR_EVENT_LOG_FILE_OLD = "#{condor_event_log_dir}/EventLog.old"
+EVENT_LOG_POS_FILE = "#{dbomatic_run_dir}/event_log_position"
if dbomatic_log_dir == '-'
DBOMATIC_LOG_FILE = STDOUT
else
@@ -247,10 +248,37 @@ class CondorEventLog < Nokogiri::XML::SAX::Document
end
end
-def parse_log_file(log_file, parser)
+# FIXME we should make sure everything here is done atomically
+def parse_log_file(parser)
+ # since the actual log file may be rotated out
+ # open a new handle every time we want to parse
+ log_file = File.open(CONDOR_EVENT_LOG_FILE)
+
+ # persistantly store log position in filesystem
+ # incase of dbomatic restarts
+ if File.exists?(EVENT_LOG_POS_FILE)
+ File.open(EVENT_LOG_POS_FILE, 'r') { |f| log_file.pos = f.read.to_i }
+ end
+
+ # if the log has been rotated out
+ if log_file.pos > File.size(CONDOR_EVENT_LOG_FILE)
+ if File.exists?(CONDOR_EVENT_LOG_FILE_OLD)
+ # finish parsing old log file
+ old_log_file = File.open(CONDOR_EVENT_LOG_FILE_OLD)
+ old_log_file.pos = log_file.pos
+ while s = old_log_file.gets
+ parser << s
+ end
+ end
+
+ # reset position
+ log_file.pos = 0
+ end
+
while s = log_file.gets
parser << s
end
+
File.open(EVENT_LOG_POS_FILE, 'w') { |f| f.write log_file.pos.to_s }
end
@@ -268,39 +296,15 @@ begin
parser << "<events>"
notifier = INotify::Notifier.new
- log_file = nil
-
- if File.exists? CONDOR_EVENT_LOG_FILE
- log_file = File.open(CONDOR_EVENT_LOG_FILE)
-
- # persistantly store log position in filesystem
- # incase of dbomatic restarts
- if File.exists?(EVENT_LOG_POS_FILE)
- File.open(EVENT_LOG_POS_FILE, 'r') { |f| log_file.pos = f.read.to_i }
- logger.info "Replaying old events..."
- parse_log_file log_file, parser
- logger.info "done"
- end
- # Setup inotify watch for condor event log
- notifier.watch(CONDOR_EVENT_LOG_FILE, :modify){ |event|
- parse_log_file log_file, parser
- }
-
- # if log file doesn't exist wait until it does
- else
- notifier.watch(condor_event_log_dir, :create){ |event|
- if event.name == "EventLog"
- log_file = File.open(CONDOR_EVENT_LOG_FILE)
- parse_log_file log_file, parser
-
- # Setup inotify watch for condor event log
- notifier.watch(CONDOR_EVENT_LOG_FILE, :modify){ |event|
- parse_log_file log_file, parser
- }
- end
- }
- end
+ parse_log_file(parser) if File.exists? CONDOR_EVENT_LOG_FILE
+
+ # Setup inotify watch for condor event log changes
+ notifier.watch(condor_event_log_dir, :all_events){ |event|
+ if event.name == "EventLog" && event.flags.include?(:modify)
+ parse_log_file parser
+ end
+ }
while true
begin
--
1.7.2.3
13 years