r5724 - in branches/tmckay: cumin/python/cumin/grid sage/python/sage/wallaby
by tmckay@fedoraproject.org
Author: tmckay
Date: 2013-02-25 21:17:01 +0000 (Mon, 25 Feb 2013)
New Revision: 5724
Modified:
branches/tmckay/cumin/python/cumin/grid/pool.py
branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py
Log:
More cache tweaking. Add tags_by_node in wallaby operations.
Modified: branches/tmckay/cumin/python/cumin/grid/pool.py
===================================================================
--- branches/tmckay/cumin/python/cumin/grid/pool.py 2013-02-25 16:30:30 UTC (rev 5723)
+++ branches/tmckay/cumin/python/cumin/grid/pool.py 2013-02-25 21:17:01 UTC (rev 5724)
@@ -108,7 +108,6 @@
self.limits = LimitSelector(app, "limits")
self.view.add_tab(self.limits)
- self.edit_node_tags = TagsNodeEditTask(app, self)
config_editor = TagInventory(app, "tagi")
self.view.add_tab(config_editor)
Modified: branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py
===================================================================
--- branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py 2013-02-25 16:30:30 UTC (rev 5723)
+++ branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py 2013-02-25 21:17:01 UTC (rev 5724)
@@ -89,6 +89,7 @@
# Cache a list of nodes that are members of a tag
self._nodes_by_tag = dict()
+ self._tags_by_node = dict()
# Store the name of the partition group so we can filter it out
# of tags/groups that we return
@@ -304,12 +305,8 @@
names = []
self._lock.acquire()
try:
- if node_name in self._cache[WBTypes.NODES].data:
- names = self._cache[WBTypes.NODES].data[node_name].getLabels()
- # This is a hack until remove group works
- names = [n for n in names \
- if n in self._nodes_by_tag and \
- node_name in self._nodes_by_tag[n]]
+ if node_name in self._tags_by_node:
+ names = self._tags_by_node[node_name]
finally:
self._lock.release()
return names
@@ -375,9 +372,15 @@
for tag in tags:
#for n in self._nodes_by_tag[tag]:
# n.refresh()
+ nodes = self._nodes_by_tag[tag]
del self._nodes_by_tag[tag]
del self._cache["tags"].data[tag]
del self._cache["groups"].data[tag]
+ for n in nodes:
+ try:
+ self._tags_by_node[n].remove(tag)
+ except:
+ pass
self._lock.release()
if callback and not self._replaying:
@@ -404,12 +407,23 @@
start = time.time()
# this is a workaround
self._store.getPartitionGroup()
- n.modifyLabels("REPLACE", list(tags), create_missing_tags=True)
+
+ # In case multiple cumin users are fighting
+ # check the list of tags for validity
+ bogus = []
+ for t in tags:
+ if not t in self._cache[WBTypes.TAGS].data:
+ bogus.append(t)
+ for b in bogus:
+ tags.remove(b)
+
+ n.modifyLabels("REPLACE", list(tags))
status = True
delta = time.time() - start
log.debug("WallabyOperations: edit_tags %s" % delta)
except Exception, e:
- log.debug("WallabyOperations: edit_tags, exception suppressed, %s" % str(e))
+ log.debug("WallabyOperations: edit_tags, "\
+ "exception suppressed, %s" % str(e))
if status:
self._lock.acquire()
@@ -422,6 +436,7 @@
val.remove(node)
except:
pass
+ self._tags_by_node[node] = list(tags)
self._lock.release()
callback = "callback" in kwargs and kwargs["callback"] or None
@@ -729,6 +744,7 @@
groups = ops.get_data(WBTypes.GROUPS)
tags = []
nodes_by_tag = dict()
+ tags_by_node = dict()
for g in groups:
if not g.name.startswith("+++") and \
ops._store.isLabel(g):
@@ -738,11 +754,17 @@
delta = time.time() - start
log.debug("WallabyOperations: update g.membership %s " % delta)
nodes_by_tag[g.name] = nodes
+ for n in nodes:
+ if n in tags_by_node:
+ tags_by_node[n].append(g.name)
+ else:
+ tags_by_node[n] = [g.name]
ops._lock.acquire()
try:
ops._cache[WBTypes.TAGS].data = ops._to_dict(tags)
ops._nodes_by_tag = nodes_by_tag
+ ops._tags_by_node = tags_by_node
finally:
ops._lock.release()
log.debug("WallabyOperations: %s list updated (%s items)" \
11 years, 1 month
r5723 - in branches/tmckay: cumin/python/cumin/grid sage/python/sage/wallaby
by tmckay@fedoraproject.org
Author: tmckay
Date: 2013-02-25 16:30:30 +0000 (Mon, 25 Feb 2013)
New Revision: 5723
Modified:
branches/tmckay/cumin/python/cumin/grid/tags.py
branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py
Log:
Fix up wallaby operations methods so there is no need to recache after
Cumin operations. Instead, fix up our own data structures by hand.
Modified: branches/tmckay/cumin/python/cumin/grid/tags.py
===================================================================
--- branches/tmckay/cumin/python/cumin/grid/tags.py 2013-02-21 22:11:06 UTC (rev 5722)
+++ branches/tmckay/cumin/python/cumin/grid/tags.py 2013-02-25 16:30:30 UTC (rev 5723)
@@ -1001,7 +1001,6 @@
self.invoc = invoc
try:
self.app.wallaroo.edit_tags(node_name, *tags, callback=callback, refresh=True)
- #call_async(self.callback, self.app.wallaroo.edit_tags, node_name, *tags)
except:
invoc.status = invoc.FAILED
log.debug("Edit node failed", exc_info=True)
@@ -1068,19 +1067,13 @@
current_tags = self.app.wallaroo.get_tag_names(node)
current_tags.append(tag_name)
self.call_count += 1
- self.app.wallaroo.edit_tags(node, *current_tags, callback=self.callback, refresh=False)
- #call_async(self.callback, self.app.wallaroo.edit_tags, node, *current_tags)
+ self.app.wallaroo.edit_tags(node, *current_tags, callback=self.callback, refresh=False)
for node in current_nodes:
if node not in chosen_nodes:
current_tags = self.app.wallaroo.get_tag_names(node)
current_tags.remove(tag_name)
self.call_count += 1
self.app.wallaroo.edit_tags(node, *current_tags, callback=self.callback, refresh=False)
- #call_async(self.callback, self.app.wallaroo.edit_tags, node, *current_tags)
-
- # These calls have been serialized so we can just queue the refresh here
- self.app.wallaroo.refresh(WBTypes.GROUPS,WBTypes.TAGS)
-
except:
self.call_count = 0
invoc.status = invoc.FAILED
Modified: branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py
===================================================================
--- branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py 2013-02-21 22:11:06 UTC (rev 5722)
+++ branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py 2013-02-25 16:30:30 UTC (rev 5723)
@@ -306,6 +306,10 @@
try:
if node_name in self._cache[WBTypes.NODES].data:
names = self._cache[WBTypes.NODES].data[node_name].getLabels()
+ # This is a hack until remove group works
+ names = [n for n in names \
+ if n in self._nodes_by_tag and \
+ node_name in self._nodes_by_tag[n]]
finally:
self._lock.release()
return names
@@ -317,23 +321,29 @@
Refresh the cached lists of groups and tags.
'''
- created = False
+ tags = []
try:
for name in names:
start = time.time()
self._store.addLabel(name)
- created = True
+ tags.append(self._store.getGroupByName(name))
delta = time.time() - start
log.debug("WallabyOperations: store.addLabel %s" % delta)
except Exception, e:
log.debug("WallabyOperations: create_tag, exception suppressed, %s" % str(e))
- # if we actually created tags then we must update the cache
- if created:
- self._mark_refresh("groups", "tags", must=True)
-
+ # If we actually created tags then we must add them to the cache
+ # Each tag must go in the groups and tags cache, and the tag has
+ # no nodes yet because we just made it.
+ self._lock.acquire()
+ for tag in tags:
+ self._nodes_by_tag[tag.name] = []
+ self._cache["tags"].data[tag.name] = tag
+ self._cache["groups"].data[tag.name] = tag
+ self._lock.release()
+
if callback and not self._replaying:
- callback(created)
+ callback(len(tags) > 0)
@_queued
def remove_tags(self, names, callback=None):
@@ -344,20 +354,34 @@
tag name first. Refresh cached lists of
groups, tags, and nodes.
'''
- removed = False
+ tags = []
for name in names:
if name in self._cache["tags"].data:
try:
- self._store.removeGroup(name)
- removed = True
+ # This handler here is a hack to allow remove group
+ # when it really doesn't work yet
+ try:
+ self._store.removeGroup(name)
+ except:
+ pass
+ tags.append(name)
except Exception, e:
log.debug("WallabyOperations: remove_tag, exception suppressed, %s" % str(e))
# if we actually created tags then we must update the cache
- if removed:
- self._mark_refresh("nodes", "groups", "tags", must=True)
+
+ # If we removed tags then we must remove them from the cache
+ self._lock.acquire()
+ for tag in tags:
+ #for n in self._nodes_by_tag[tag]:
+ # n.refresh()
+ del self._nodes_by_tag[tag]
+ del self._cache["tags"].data[tag]
+ del self._cache["groups"].data[tag]
+ self._lock.release()
+
if callback and not self._replaying:
- callback(removed)
+ callback(len(tags) > 0)
@_queued
def edit_tags(self, node, *tags, **kwargs):
@@ -370,12 +394,8 @@
list or tuple of strings
'''
status = False
- if type(node) in (str, unicode):
- n = node
- else:
- n = node.name
- n = n in self._cache[WBTypes.NODES].data and \
- self._cache[WBTypes.NODES].data[n] or None
+ n = node in self._cache[WBTypes.NODES].data and \
+ self._cache[WBTypes.NODES].data[node] or None
if n is None:
log.debug("WallabyOperations: edit_tags, node not found %s" % str(n))
else:
@@ -391,10 +411,18 @@
except Exception, e:
log.debug("WallabyOperations: edit_tags, exception suppressed, %s" % str(e))
- # Go ahead and default refresh to True
- refresh = "refresh" in kwargs and kwargs["refresh"] or True
- if refresh and status:
- self._mark_refresh("groups", "tags")
+ if status:
+ self._lock.acquire()
+ for key,val in self._nodes_by_tag.iteritems():
+ if key in tags:
+ if not node in val:
+ val.append(node)
+ else:
+ try:
+ val.remove(node)
+ except:
+ pass
+ self._lock.release()
callback = "callback" in kwargs and kwargs["callback"] or None
if callback and not self._replaying:
@@ -403,20 +431,15 @@
@_queued
def edit_features(self, group, *features, **kwargs):
'''
- Replace existing features in a group with the specified features.
-
+ Replace existing features in a group with the specified features
group -- a wallaby.Group object or the name of a wallaby.Group object
features -- the new set of features for the group,
list or tuple of strings
'''
status = False
- if type(group) in (str, unicode):
- g = group
- else:
- g = group.name
- g = g in self._cache[WBTypes.GROUPS].data and \
- self._cache[WBTypes.GROUPS].data[g] or None
+ g = group in self._cache[WBTypes.GROUPS].data and \
+ self._cache[WBTypes.GROUPS].data[group] or None
if g is None:
log.debug("WallabyOperations: edit_features, group not found %s" % str(g))
else:
@@ -429,7 +452,7 @@
except Exception, e:
log.debug("WallabyOperations: edit_features, exception suppressed, %s" % str(e))
- # we don't need a refresh because we have no mapping of features to nodes
+ # no other data to update here because we don't have
callback = "callback" in kwargs and kwargs["callback"] or None
if callback and not self._replaying:
11 years, 1 month
r5720 - in trunk: cumin/python/cumin/gridhadoop sage/python/sage/aviary
by croberts@fedoraproject.org
Author: croberts
Date: 2013-02-20 18:35:31 +0000 (Wed, 20 Feb 2013)
New Revision: 5720
Modified:
trunk/cumin/python/cumin/gridhadoop/datanode.py
trunk/cumin/python/cumin/gridhadoop/jobtracker.py
trunk/cumin/python/cumin/gridhadoop/namenode.py
trunk/cumin/python/cumin/gridhadoop/tasktracker.py
trunk/sage/python/sage/aviary/aviaryoperations.py
Log:
Changes to pass description on node creation. Currently description is set to an empty string until it's wired into the UI.
Modified: trunk/cumin/python/cumin/gridhadoop/datanode.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/datanode.py 2013-02-19 14:24:28 UTC (rev 5719)
+++ trunk/cumin/python/cumin/gridhadoop/datanode.py 2013-02-20 18:35:31 UTC (rev 5720)
@@ -129,16 +129,13 @@
self.invoc.status = self.invoc.FAILED
self.invoc.end()
- def fake_call(self, binfile, owner, hadoophost, count, name_node):
- return True
-
def do_invoke(self, session, object, invoc, args):
self.invoc = invoc
- (binfile, owner, hadoophost, count, name_node) = args
+ (binfile, owner, hadoophost, description, count, name_node) = args
try:
self.invoc = invoc
- self.app.remote.start_data_node(hadoophost, name_node, binfile, owner, count, invoc.make_callback())
+ self.app.remote.start_data_node(hadoophost, name_node, binfile, owner, description, count, invoc.make_callback())
except Exception, e:
invoc.status = invoc.FAILED
log.debug("Creating data node failed", exc_info=True)
@@ -199,7 +196,8 @@
hadoophost = self.hadoophost.get(session)
count = self.count.get(session)
name_node = self.nn_id.get(session)
- self.task.invoke(session, None, (binfile, owner, hadoophost, count, name_node))
+ description = ""
+ self.task.invoke(session, None, (binfile, owner, hadoophost, description, count, name_node))
self.task.exit_with_redirect(session, url)
def render_title(self, session):
Modified: trunk/cumin/python/cumin/gridhadoop/jobtracker.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-02-19 14:24:28 UTC (rev 5719)
+++ trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-02-20 18:35:31 UTC (rev 5720)
@@ -127,16 +127,13 @@
if result == False:
self.invoc.status = self.invoc.FAILED
self.invoc.end()
-
- def fake_call(self, binfile, owner, hadoophost):
- return True
-
+
def do_invoke(self, session, object, invoc, args):
self.invoc = invoc
- (binfile, owner, hadoophost, name_node, count) = args
+ (binfile, owner, hadoophost, name_node, description, count) = args
try:
- self.app.remote.start_job_tracker(hadoophost, name_node, binfile, owner, count, invoc.make_callback())
+ self.app.remote.start_job_tracker(hadoophost, name_node, binfile, owner, description, count, invoc.make_callback())
except Exception, e:
invoc.status = invoc.FAILED
log.debug("Creating job tracker failed", exc_info=True)
@@ -186,8 +183,9 @@
owner = session.client_session.username()
hadoophost = self.hadoophost.get(session)
name_node = self.nameNode.get(session)
+ description = ""
count = 1
- self.task.invoke(session, None, (binfile, owner, hadoophost, name_node, count))
+ self.task.invoke(session, None, (binfile, owner, hadoophost, name_node, description, count))
self.task.exit_with_redirect(session, url)
def render_title(self, session):
Modified: trunk/cumin/python/cumin/gridhadoop/namenode.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-02-19 14:24:28 UTC (rev 5719)
+++ trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-02-20 18:35:31 UTC (rev 5720)
@@ -90,8 +90,8 @@
def do_invoke(self, session, object, invoc, args):
self.invoc = invoc
- (binfile, owner, hadoophost) = args
- self.app.remote.start_name_node(hadoophost, binfile, owner, invoc.make_callback())
+ (binfile, owner, hadoophost, description) = args
+ self.app.remote.start_name_node(hadoophost, binfile, owner, description, invoc.make_callback())
def get_title(self, session, x):
return "Create a name node"
@@ -133,7 +133,8 @@
binfile = self.binfile.get(session)
hadoophost = self.hadoophost.get(session)
owner = session.client_session.username()
- self.task.invoke(session, None, (binfile, owner, hadoophost))
+ description = ""
+ self.task.invoke(session, None, (binfile, owner, hadoophost, description))
self.task.exit_with_redirect(session, url)
def render_title(self, session):
Modified: trunk/cumin/python/cumin/gridhadoop/tasktracker.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/tasktracker.py 2013-02-19 14:24:28 UTC (rev 5719)
+++ trunk/cumin/python/cumin/gridhadoop/tasktracker.py 2013-02-20 18:35:31 UTC (rev 5720)
@@ -129,15 +129,12 @@
self.invoc.status = self.invoc.FAILED
self.invoc.end()
- def fake_call(self, binfile, owner, hadoophost, count, job_tracker):
- return True
-
def do_invoke(self, session, object, invoc, args):
self.invoc = invoc
- (binfile, owner, hadoophost, count, job_tracker) = args
+ (binfile, owner, hadoophost, description, count, job_tracker) = args
try:
- self.app.remote.start_task_tracker(hadoophost, job_tracker, binfile, owner, count, invoc.make_callback())
+ self.app.remote.start_task_tracker(hadoophost, job_tracker, binfile, owner, description, count, invoc.make_callback())
except Exception, e:
invoc.status = invoc.FAILED
log.debug("Creating task trackers failed", exc_info=True)
@@ -192,7 +189,8 @@
hadoophost = self.hadoophost.get(session)
count = self.count.get(session)
job_tracker = self.jt_id.get(session)
- self.task.invoke(session, None, (binfile, owner, hadoophost, count, job_tracker))
+ description = ""
+ self.task.invoke(session, None, (binfile, owner, hadoophost, description, count, job_tracker))
self.task.exit_with_redirect(session, url)
def render_title(self, session):
Modified: trunk/sage/python/sage/aviary/aviaryoperations.py
===================================================================
--- trunk/sage/python/sage/aviary/aviaryoperations.py 2013-02-19 14:24:28 UTC (rev 5719)
+++ trunk/sage/python/sage/aviary/aviaryoperations.py 2013-02-20 18:35:31 UTC (rev 5720)
@@ -904,7 +904,7 @@
# Equivalence? We don't want no stinking QMF structural equivalence!
self.use_MethodResult_for_sync_calls = False
- def start_name_node(self, host, bin_file, owner, callback):
+ def start_name_node(self, host, bin_file, owner, description, callback):
assert callable(callback)
client = self.client_pool.get_object()
@@ -929,7 +929,7 @@
callback(*result_tuple(result, host))
t = CallThread(self.call_client_retry, my_callback,
- client, "startNameNode", bin_file, owner)
+ client, "startNameNode", bin_file, owner, description)
t.start()
def stop_name_node(self, host, ids, callback):
@@ -939,8 +939,8 @@
def get_name_node(self, host, ids, owner=None, callback=None):
return self._query_ids(host, ids, owner, callback, "getNameNode")
- def start_data_node(self, host, nn_id, bin_file, owner, count, callback):
- return self._start_node(host, nn_id, bin_file, owner, count,
+ def start_data_node(self, host, nn_id, bin_file, owner, description, count, callback):
+ return self._start_node(host, nn_id, bin_file, owner, description, count,
"startDataNode", callback)
def stop_data_node(self, host, ids, callback):
@@ -949,8 +949,8 @@
def get_data_node(self, host, ids, owner=None, callback=None):
return self._query_ids(host, ids, owner, callback, "getDataNode")
- def start_job_tracker(self, host, nn_id, bin_file, owner, count, callback):
- return self._start_node(host, nn_id, bin_file, owner, count,
+ def start_job_tracker(self, host, nn_id, bin_file, owner, description, count, callback):
+ return self._start_node(host, nn_id, bin_file, owner, description, count,
"startJobTracker", callback)
def stop_job_tracker(self, host, ids, callback):
@@ -962,8 +962,8 @@
def stop_task_tracker(self, host, ids, callback):
self._operate_on_ids(host, ids, callback, "stopTaskTracker")
- def start_task_tracker(self, host, nn_id, bin_file, owner, count, callback):
- return self._start_node(host, nn_id, bin_file, owner, count,
+ def start_task_tracker(self, host, nn_id, bin_file, owner, description, count, callback):
+ return self._start_node(host, nn_id, bin_file, owner, description, count,
"startTaskTracker", callback)
def get_task_tracker(self, host, ids, owner=None, callback=None):
@@ -1007,7 +1007,7 @@
refs.append(ref)
return refs
- def _start_node(self, host, ref_id, bin_file, owner, count,
+ def _start_node(self, host, ref_id, bin_file, owner, description, count,
meth_name, callback):
assert callable(callback)
@@ -1035,7 +1035,7 @@
ref = self._make_id(client, ref_id)
t = CallThread(self.call_client_retry, my_callback,
- client, meth_name, ref, bin_file, owner, count)
+ client, meth_name, ref, bin_file, owner, description, count)
t.start()
def _get_node_list(self, proc, owner, callback):
11 years, 2 months
r5719 - trunk/cumin/python/cumin/gridhadoop
by croberts@fedoraproject.org
Author: croberts
Date: 2013-02-19 14:24:28 +0000 (Tue, 19 Feb 2013)
New Revision: 5719
Modified:
trunk/cumin/python/cumin/gridhadoop/datanode.py
trunk/cumin/python/cumin/gridhadoop/hadoop.py
trunk/cumin/python/cumin/gridhadoop/jobtracker.py
trunk/cumin/python/cumin/gridhadoop/namenode.py
trunk/cumin/python/cumin/gridhadoop/tasktracker.py
Log:
Tweaks to the grid-hadoop object tables to optimize the display a bit.
Modified: trunk/cumin/python/cumin/gridhadoop/datanode.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/datanode.py 2013-02-18 20:11:04 UTC (rev 5718)
+++ trunk/cumin/python/cumin/gridhadoop/datanode.py 2013-02-19 14:24:28 UTC (rev 5719)
@@ -53,17 +53,14 @@
super(DataNodeTable, self).__init__(app, name, cls)
self.id_col = self.DataNodeColumn(app, "idcol", cls.Id, cls.Id, "datanodeframe")
- #self.sub_col = SubmittedColumn(app, "sub", cls.Submitted)
self.state_col = ObjectTableColumn(app, "statecol", cls.State)
self.uptime_col = UptimeColumn(app, "uptimecol", cls.Uptime)
- self.owner_col = ObjectTableColumn(app, "ownercol", cls.Owner)
-
+ self.owner_col = ObjectTableColumn(app, "ownercol", cls.Owner)
# we need to be able to filter by parent_id, but don't need to see it
self.parent_id_col = ObjectTableColumn(app, "parentid", cls.Parent_id)
self.parent_id_col.visible = False
self.add_column(self.id_col)
- #self.add_column(self.sub_col)
self.add_column(self.state_col)
self.add_column(self.uptime_col)
self.add_column(self.owner_col)
Modified: trunk/cumin/python/cumin/gridhadoop/hadoop.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-02-18 20:11:04 UTC (rev 5718)
+++ trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-02-19 14:24:28 UTC (rev 5719)
@@ -316,3 +316,16 @@
class HadoopLinkColumnCell(LinkColumnCell):
#needed so we can override display
pass
+
+class HadoopIpcColumn(ObjectTableColumn):
+ def __init__(self, app, name, attr, width):
+ super(HadoopIpcColumn, self).__init__(app, name, attr)
+ self.width = width
+
+ def render_cell_title(self, session, record):
+ return self.field.get_content(session, record)
+
+class HadoopHttpColumn(HadoopObjectLinkColumn):
+ def render_cell_href(self, session, record):
+ link = self.field.get_content(session, record)
+ return link
Modified: trunk/cumin/python/cumin/gridhadoop/jobtracker.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-02-18 20:11:04 UTC (rev 5718)
+++ trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-02-19 14:24:28 UTC (rev 5719)
@@ -55,17 +55,17 @@
super(JobTrackerTable, self).__init__(app, name, cls)
self.id_col = self.JobTrackerColumn(app, "idcol", cls.Id, cls.Id, ".jobtrackerframe")
- self.ipc_col = ObjectTableColumn(app, "ipc", cls.Ipc)
- self.ipc_col.width = "20%"
- #self.sub_col = SubmittedColumn(app, "sub", cls.Submitted)
- self.http_col = self.HttpColumn(app, "http", cls.Http, cls.Http, None)
+ self.id_col.width = "10%"
+ self.ipc_col = HadoopIpcColumn(app, "ipc", cls.Ipc, "20%")
+ self.http_col = HadoopHttpColumn(app, "http", cls.Http, cls.Http, None)
self.state_col = ObjectTableColumn(app, "statecol", cls.State)
+ self.state_col.width = "10%"
self.uptime_col = UptimeColumn(app, "uptimecol", cls.Uptime)
+ self.uptime_col.width = "10%"
self.owner_col = ObjectTableColumn(app, "ownercol", cls.Owner)
self.add_column(self.id_col)
self.add_column(self.ipc_col)
- #self.add_column(self.sub_col)
self.add_column(self.http_col)
self.add_column(self.state_col)
self.add_column(self.uptime_col)
@@ -76,12 +76,7 @@
id = unescape_entity(record[self.id_field.index])
frame = self.frame.children_by_name[self.frame.name + self.frame_path]
return frame.get_href(session, id, frame.parent.host.get(session))
-
- class HttpColumn(HadoopObjectLinkColumn):
- def render_cell_href(self, session, record):
- link = self.field.get_content(session, record)
- return link
-
+
class JobTrackerDelete(HadoopNodeDeleteTask):
def __init__(self, app, selector, name, module):
super(JobTrackerDelete, self).__init__(app, selector)
Modified: trunk/cumin/python/cumin/gridhadoop/namenode.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-02-18 20:11:04 UTC (rev 5718)
+++ trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-02-19 14:24:28 UTC (rev 5719)
@@ -51,17 +51,17 @@
super(NameNodeTable, self).__init__(app, name, cls)
self.id_col = self.NameNodeColumn(app, "idcol", cls.Id, cls.Id, ".namenodeframe")
- self.ipc_col = ObjectTableColumn(app, "ipc", cls.Ipc)
- self.ipc_col.width = "20%"
- #self.sub_col = SubmittedColumn(app, "sub", cls.Submitted)
- self.http_col = self.HttpColumn(app, "http", cls.Http, cls.Http, None)
+ self.id_col.width = "10%"
+ self.ipc_col = HadoopIpcColumn(app, "ipc", cls.Ipc, "20%")
+ self.http_col = HadoopHttpColumn(app, "http", cls.Http, cls.Http, None)
self.state_col = ObjectTableColumn(app, "statecol", cls.State)
+ self.state_col.width = "10%"
self.uptime_col = UptimeColumn(app, "uptimecol", cls.Uptime)
+ self.uptime_col.width = "10%"
self.owner_col = ObjectTableColumn(app, "ownercol", cls.Owner)
self.add_column(self.id_col)
self.add_column(self.ipc_col)
- #self.add_column(self.sub_col)
self.add_column(self.http_col)
self.add_column(self.state_col)
self.add_column(self.uptime_col)
@@ -73,11 +73,6 @@
frame = self.frame.children_by_name[self.frame.name + self.frame_path]
return frame.get_href(session, id, frame.parent.host.get(session))
- class HttpColumn(HadoopObjectLinkColumn):
- def render_cell_href(self, session, record):
- link = self.field.get_content(session, record)
- return link
-
class NameNodeCreate(Task):
def __init__(self, app, name, module):
super(NameNodeCreate, self).__init__(app, name)
Modified: trunk/cumin/python/cumin/gridhadoop/tasktracker.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/tasktracker.py 2013-02-18 20:11:04 UTC (rev 5718)
+++ trunk/cumin/python/cumin/gridhadoop/tasktracker.py 2013-02-19 14:24:28 UTC (rev 5719)
@@ -53,7 +53,6 @@
super(TaskTrackerTable, self).__init__(app, name, cls)
self.id_col = self.TaskTrackerColumn(app, "idcol", cls.Id, cls.Id, "tasktrackerframe")
- #self.sub_col = SubmittedColumn(app, "sub", cls.Submitted)
self.state_col = ObjectTableColumn(app, "statecol", cls.State)
self.uptime_col = UptimeColumn(app, "uptimecol", cls.Uptime)
self.owner_col = ObjectTableColumn(app, "ownercol", cls.Owner)
@@ -63,7 +62,6 @@
self.parent_id_col.visible = False
self.add_column(self.id_col)
- #self.add_column(self.sub_col)
self.add_column(self.state_col)
self.add_column(self.uptime_col)
self.add_column(self.owner_col)
11 years, 2 months
r5718 - trunk/sage/rpc-defs/aviary
by croberts@fedoraproject.org
Author: croberts
Date: 2013-02-18 20:11:04 +0000 (Mon, 18 Feb 2013)
New Revision: 5718
Modified:
trunk/sage/rpc-defs/aviary/aviary-hadoop.xsd
Log:
Updating xsd file for running locally.
Modified: trunk/sage/rpc-defs/aviary/aviary-hadoop.xsd
===================================================================
--- trunk/sage/rpc-defs/aviary/aviary-hadoop.xsd 2013-02-18 17:57:31 UTC (rev 5717)
+++ trunk/sage/rpc-defs/aviary/aviary-hadoop.xsd 2013-02-18 20:11:04 UTC (rev 5718)
@@ -37,6 +37,7 @@
<!-- path to a versioned Hadoop tar/zip binary dist file -->
<xs:element name="bin_file" type="xs:string" minOccurs="0"/>
<xs:element name="owner" type="xs:string" minOccurs="0"/>
+ <xs:element name="description" type="xs:string" minOccurs="0"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="HadoopStart">
@@ -45,6 +46,7 @@
<xs:element name="ref" type="hdp:HadoopID"/>
<!-- path to a versioned Hadoop tar/zip binary dist file -->
<xs:element name="bin_file" type="xs:string" minOccurs="0"/>
+ <xs:element name="description" type="xs:string" minOccurs="0"/>
<xs:element name="owner" type="xs:string" minOccurs="0"/>
<xs:element name="count" type="xs:int" minOccurs="0" default="1"/>
</xs:sequence>
@@ -82,6 +84,7 @@
<xs:element name="ref" type="hdp:HadoopID"/>
<xs:element name="parent" type="hdp:HadoopID"/>
<xs:element name="owner" type="xs:string"/>
+ <xs:element name="description" type="xs:string"/>
<xs:element name="submitted" type="xs:int"/>
<xs:element name="uptime" type="xs:int"/>
<xs:element name="state" type="hdp:HadoopStateType"/>
11 years, 2 months
r5717 - branches/tmckay/sage/python/sage/wallaby
by tmckay@fedoraproject.org
Author: tmckay
Date: 2013-02-18 17:57:31 +0000 (Mon, 18 Feb 2013)
New Revision: 5717
Modified:
branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py
Log:
Undo intermediate change for now (5712)
Modified: branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py
===================================================================
--- branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py 2013-02-18 17:52:26 UTC (rev 5716)
+++ branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py 2013-02-18 17:57:31 UTC (rev 5717)
@@ -145,57 +145,44 @@
self._condition.wait(retry_secs)
self._condition.release()
- last_wallaroo_tag = None
+ # Init remaining time til next update to 0 for each
+ # cached item in case the thread was restarted
+ for attr, val in self._cache.iteritems():
+ val.remaining = 0
# Okay, now we're ready to retrieve data
while not self._stop:
start_processing = time.time()
+ for attr, val in self._cache.iteritems():
+ if self._stop:
+ break
- # Get the current tag from wallaroo and see if the store
- # has been changed since we last saw it. If so, refresh
- # the cache with current values, potentially moving our
- # branch to current first and following up with a replay
- # of unactivated changes.
- # This represents other entities outside of cumin modifying the
- # store. We want to see those changes.
- wallaroo_current_tag = self._get_wallaroo_current_tag()
- if not self._compare_tags(wallaroo_current_tag,
- last_wallaroo_tag):
+ # val.remaining is the number of seconds left before
+ # the next update of this data item. None is "forever".
+ # Synthetic items are not retreived from the store.
+ if not val.synthetic and \
+ val.remaining is not None and val.remaining <= 0:
+ val.get_values(attr, self)
- # Before we update the cache, change to the current tag
- # if we are working on a local branch. If we are already
- # current then we can skip this step.
- need_replay = False
- if not self._my_branch_is_current():
- # Okay, we are moving to the current branch.
- # After we rebuild the cache, we need to replay
- # unactivated changes.
- need_replay = True
- self._update_branch_to_current()
-
- for attr, val in self._cache.iteritems():
- if self._stop:
- break
- if not val.synthetic:
- val.get_values(attr, self)
+ # Now handle the synthetics. val.synthetic generates
+ # and stores it's own results.
+ for attr, val in self._cache.iteritems():
+ if self._stop:
+ break
- # Now handle the synthetics. val.synthetic generates
- # and stores it's own results.
- for attr, val in self._cache.iteritems():
- if self._stop:
- break
- if val.synthetic:
- val.get_values(attr, self)
+ if val.synthetic and \
+ val.remaining is not None and val.remaining <= 0:
+ val.get_values(attr, self)
+
+ log.debug("WallabyOperations: total refresh processing time %s" \
+ % (time.time() - start_processing))
- last_wallaroo_tag = wallaroo_current_tag
- log.debug("WallabyOperations: total refresh processing time %s" \
- % (time.time() - start_processing))
+ # Find out how long we should sleep for.
+ # Based on min remaining times for all items
+ # If minimum is 0 because we have items waiting
+ # for a retry, we fall back on retry_secs as a minimum.
+ sleep_time = self._find_min_remaining(min=retry_secs)
- # Replay our unactivated changes
- if need_replay:
- pass
-
- sleep_time = 30
self._condition.acquire()
if not self._stop:
# Could be signaled, so track the actual sleep time
@@ -206,14 +193,39 @@
slept = time.time() - bed_time
log.debug("WallabyOperations: cache thread slept for"\
" %s seconds" % slept)
+
+ # When we wake up from sleep here, we already
+ # have the lock so we might as well check refresh
+ # and adjust the "remaining" values
+ for attr, val in self._cache.iteritems():
+ if val.refresh: # Force an update
+ val.remaining = 0
+ val.refresh = False
+ elif val.remaining is not None:
+ val.remaining -= slept
self._condition.release()
# Clear cache if we have been stopped....
for attr in self._cache:
self._set_cache(attr, [])
self._store = None
+
#end maintain_cache
+ def get_values(attr, call, *args):
+ log.debug("WallabyOperations: refreshing %s" % attr)
+ try:
+ # Wallaby API uses extensions to __getattr__ on
+ # the Store to retrieve objects from the Broker
+ # and return a list of proxy objects.
+ start = time.time()
+ d = call(*args)
+ except:
+ d = []
+ delta = time.time() - start
+ log.debug("WallabyOperations: %s seconds to refresh %s" % (delta, attr))
+ return d
+
# Wrap the entire cache thread with an exception handler
def wrap_maintain_cache():
try:
@@ -583,23 +595,6 @@
# Super secret private implementation stuff. Don't look!
- def _get_current_wallaroo_tag(self):
- return self._store.cm.fetch_json_resource("/tags/current")
-
- def _compare_tags(self, first, second):
- try:
- return first["commit"] == second["commit"]
- except:
- pass
- return False
-
- def _my_branch_is_current(self):
- my_branch = self._store.cm.how.to_q()
- return 'tag' in my_branch and my_branch['tag'] == 'current'
-
- def _update_branch_to_current(self):
- self._store.cm.how = wallaroo.client.cmeta.mk_how({"tag": "current"})
-
def _find_min_remaining(self, min):
# None indicates forever, the biggest value
# Note though that None < int is True in Python!
11 years, 2 months
r5716 - in branches/tmckay: mint/python/mint/aviary sage/python/sage/aviary
by tmckay@fedoraproject.org
Author: tmckay
Date: 2013-02-18 17:52:26 +0000 (Mon, 18 Feb 2013)
New Revision: 5716
Modified:
branches/tmckay/mint/python/mint/aviary/endpoints.py
branches/tmckay/sage/python/sage/aviary/aviaryoperations.py
Log:
Fix up collector endpoint resource types
Modified: branches/tmckay/mint/python/mint/aviary/endpoints.py
===================================================================
--- branches/tmckay/mint/python/mint/aviary/endpoints.py 2013-02-18 16:14:23 UTC (rev 5715)
+++ branches/tmckay/mint/python/mint/aviary/endpoints.py 2013-02-18 17:52:26 UTC (rev 5716)
@@ -115,7 +115,7 @@
self.type_map = \
{("CUSTOM","QUERY_SERVER"): self._make_submission_endpoint,
- ("COLLECTOR", ""): self._make_collector_endpoint}
+ ("COLLECTOR", "POOL"): self._make_collector_endpoint}
def get_endpoint_types(self):
# This routine maps Rosemary class names to Aviary
@@ -123,11 +123,11 @@
# what types of endpoints to report
classes_to_aviary = \
- {"Collector": ("COLLECTOR", ""),
- "Negotiator": ("COLLECTOR", ""),
- "Scheduler": ("COLLECTOR", ""),
- "Submitter": ("COLLECTOR", ""),
- "Slot": ("COLLECTOR", ""),
+ {"Collector": ("COLLECTOR", "POOL"),
+ "Negotiator": ("COLLECTOR", "POOL"),
+ "Scheduler": ("COLLECTOR", "POOL"),
+ "Submitter": ("COLLECTOR", "POOL"),
+ "Slot": ("COLLECTOR", "POOL"),
"Submission": ("CUSTOM", "QUERY_SERVER")}
endpoints = set()
Modified: branches/tmckay/sage/python/sage/aviary/aviaryoperations.py
===================================================================
--- branches/tmckay/sage/python/sage/aviary/aviaryoperations.py 2013-02-18 16:14:23 UTC (rev 5715)
+++ branches/tmckay/sage/python/sage/aviary/aviaryoperations.py 2013-02-18 17:52:26 UTC (rev 5716)
@@ -77,7 +77,7 @@
_nice = {("SCHEDULER","JOB"): "job service",
("CUSTOM","QUERY_SERVER"): "query service",
("SCHEDULER","HADOOP"): "hadoop service",
- ("COLLECTOR",""): "collector service"}
+ ("COLLECTOR","POOL"): "collector service"}
def __init__(self, resource, subtype):
self.servers = None
@@ -1259,7 +1259,7 @@
"9000",
"/services/collector/",
"COLLECTOR",
- "")
+ "POOL")
# Equivalence? We don't want no stinking QMF structural equivalence!
self.use_MethodResult_for_sync_calls = False
@@ -1448,9 +1448,8 @@
datadir, hadoop_servers))
if collector_servers:
- # we never use locator for collector (for now)
- self.mechs.append(_AviaryCollectorMethods(None,
- transports,
+ self.mechs.append(_AviaryCollectorMethods(self.locator,
+ transports,
datadir, collector_servers))
self.add_mechanisms(self.mechs)
@@ -1467,8 +1466,7 @@
# id location
# [ ( (name,pool,resource,sub_type), [full urls] ) ]
- # For now collector never uses locator
- if self.locator and resource != "COLLECTOR":
+ if self.locator:
ep = self.locator.get_endpoints(resource, subtype)
if ep.status.code in ("OK", "NO_MATCH"):
if hasattr(ep, "resources"):
11 years, 2 months
r5714 - branches/elephant/cumin/model
by croberts@fedoraproject.org
Author: croberts
Date: 2013-02-18 16:11:43 +0000 (Mon, 18 Feb 2013)
New Revision: 5714
Modified:
branches/elephant/cumin/model/rosemary.xml
Log:
Additional format tweaks
Modified: branches/elephant/cumin/model/rosemary.xml
===================================================================
--- branches/elephant/cumin/model/rosemary.xml 2013-02-18 16:00:42 UTC (rev 5713)
+++ branches/elephant/cumin/model/rosemary.xml 2013-02-18 16:11:43 UTC (rev 5714)
@@ -78,6 +78,7 @@
</property>
<property name="Submitted">
<title>Submitted</title>
+ <formatter>fmt_timestamp</formatter>
</property>
<property name="State">
<title>State</title>
@@ -107,6 +108,7 @@
</property>
<property name="Submitted">
<title>Submitted</title>
+ <formatter>fmt_timestamp</formatter>
</property>
<property name="State">
<title>State</title>
@@ -130,6 +132,7 @@
</property>
<property name="Submitted">
<title>Submitted</title>
+ <formatter>fmt_timestamp</formatter>
</property>
<property name="State">
<title>State</title>
11 years, 2 months
r5713 - branches/elephant/cumin/python/cumin/gridhadoop
by croberts@fedoraproject.org
Author: croberts
Date: 2013-02-18 16:00:42 +0000 (Mon, 18 Feb 2013)
New Revision: 5713
Modified:
branches/elephant/cumin/python/cumin/gridhadoop/hadoop.py
branches/elephant/cumin/python/cumin/gridhadoop/hadoop.strings
branches/elephant/cumin/python/cumin/gridhadoop/jobtracker.py
branches/elephant/cumin/python/cumin/gridhadoop/namenode.py
Log:
Links to name node/job tracker http interface now open in new tab/window.
Modified: branches/elephant/cumin/python/cumin/gridhadoop/hadoop.py
===================================================================
--- branches/elephant/cumin/python/cumin/gridhadoop/hadoop.py 2013-02-18 15:07:28 UTC (rev 5712)
+++ branches/elephant/cumin/python/cumin/gridhadoop/hadoop.py 2013-02-18 16:00:42 UTC (rev 5713)
@@ -13,6 +13,7 @@
from wooly import Session, Widget
from wooly.datatable import *
+from wooly.table import LinkColumnCell
from wooly.util import StringCatalog, xml_escape
from rosemary.model import RosemaryObject
@@ -301,3 +302,17 @@
invoc.exception = e
invoc.status = invoc.FAILED
invoc.end()
+
+class HadoopObjectLinkColumn(ObjectLinkColumn):
+ def __init__(self, app, name, attr, id_attr, frame_path):
+ super(HadoopObjectLinkColumn, self).__init__(app, name, attr, id_attr, frame_path)
+
+ self.id_attr = id_attr
+ self.frame_path = frame_path
+
+ self.cell = HadoopLinkColumnCell(app, "cell")
+ self.replace_child(self.cell)
+
+class HadoopLinkColumnCell(LinkColumnCell):
+ #needed so we can override display
+ pass
Modified: branches/elephant/cumin/python/cumin/gridhadoop/hadoop.strings
===================================================================
--- branches/elephant/cumin/python/cumin/gridhadoop/hadoop.strings 2013-02-18 15:07:28 UTC (rev 5712)
+++ branches/elephant/cumin/python/cumin/gridhadoop/hadoop.strings 2013-02-18 16:00:42 UTC (rev 5713)
@@ -1,2 +1,7 @@
[HadoopObjectDetails.html]
-{props}
\ No newline at end of file
+{props}
+
+
+
+[HadoopLinkColumnCell.html]
+<td class="{class}"><a href="{href}" title="{title}" target="_blank">{content}</a></td>
\ No newline at end of file
Modified: branches/elephant/cumin/python/cumin/gridhadoop/jobtracker.py
===================================================================
--- branches/elephant/cumin/python/cumin/gridhadoop/jobtracker.py 2013-02-18 15:07:28 UTC (rev 5712)
+++ branches/elephant/cumin/python/cumin/gridhadoop/jobtracker.py 2013-02-18 16:00:42 UTC (rev 5713)
@@ -77,7 +77,7 @@
frame = self.frame.children_by_name[self.frame.name + self.frame_path]
return frame.get_href(session, id, frame.parent.host.get(session))
- class HttpColumn(ObjectLinkColumn):
+ class HttpColumn(HadoopObjectLinkColumn):
def render_cell_href(self, session, record):
link = self.field.get_content(session, record)
return link
Modified: branches/elephant/cumin/python/cumin/gridhadoop/namenode.py
===================================================================
--- branches/elephant/cumin/python/cumin/gridhadoop/namenode.py 2013-02-18 15:07:28 UTC (rev 5712)
+++ branches/elephant/cumin/python/cumin/gridhadoop/namenode.py 2013-02-18 16:00:42 UTC (rev 5713)
@@ -73,7 +73,7 @@
frame = self.frame.children_by_name[self.frame.name + self.frame_path]
return frame.get_href(session, id, frame.parent.host.get(session))
- class HttpColumn(ObjectLinkColumn):
+ class HttpColumn(HadoopObjectLinkColumn):
def render_cell_href(self, session, record):
link = self.field.get_content(session, record)
return link
11 years, 2 months
r5712 - branches/tmckay/sage/python/sage/wallaby
by tmckay@fedoraproject.org
Author: tmckay
Date: 2013-02-18 15:07:28 +0000 (Mon, 18 Feb 2013)
New Revision: 5712
Modified:
branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py
Log:
Intermediate change
Modified: branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py
===================================================================
--- branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py 2013-02-18 15:00:53 UTC (rev 5711)
+++ branches/tmckay/sage/python/sage/wallaby/wallabyoperations.py 2013-02-18 15:07:28 UTC (rev 5712)
@@ -145,44 +145,57 @@
self._condition.wait(retry_secs)
self._condition.release()
- # Init remaining time til next update to 0 for each
- # cached item in case the thread was restarted
- for attr, val in self._cache.iteritems():
- val.remaining = 0
+ last_wallaroo_tag = None
# Okay, now we're ready to retrieve data
while not self._stop:
start_processing = time.time()
- for attr, val in self._cache.iteritems():
- if self._stop:
- break
- # val.remaining is the number of seconds left before
- # the next update of this data item. None is "forever".
- # Synthetic items are not retreived from the store.
- if not val.synthetic and \
- val.remaining is not None and val.remaining <= 0:
- val.get_values(attr, self)
+ # Get the current tag from wallaroo and see if the store
+ # has been changed since we last saw it. If so, refresh
+ # the cache with current values, potentially moving our
+ # branch to current first and following up with a replay
+ # of unactivated changes.
+ # This represents other entities outside of cumin modifying the
+ # store. We want to see those changes.
+ wallaroo_current_tag = self._get_wallaroo_current_tag()
+ if not self._compare_tags(wallaroo_current_tag,
+ last_wallaroo_tag):
- # Now handle the synthetics. val.synthetic generates
- # and stores it's own results.
- for attr, val in self._cache.iteritems():
- if self._stop:
- break
+ # Before we update the cache, change to the current tag
+ # if we are working on a local branch. If we are already
+ # current then we can skip this step.
+ need_replay = False
+ if not self._my_branch_is_current():
+ # Okay, we are moving to the current branch.
+ # After we rebuild the cache, we need to replay
+ # unactivated changes.
+ need_replay = True
+ self._update_branch_to_current()
+
+ for attr, val in self._cache.iteritems():
+ if self._stop:
+ break
+ if not val.synthetic:
+ val.get_values(attr, self)
- if val.synthetic and \
- val.remaining is not None and val.remaining <= 0:
- val.get_values(attr, self)
-
- log.debug("WallabyOperations: total refresh processing time %s" \
- % (time.time() - start_processing))
+ # Now handle the synthetics. val.synthetic generates
+ # and stores it's own results.
+ for attr, val in self._cache.iteritems():
+ if self._stop:
+ break
+ if val.synthetic:
+ val.get_values(attr, self)
- # Find out how long we should sleep for.
- # Based on min remaining times for all items
- # If minimum is 0 because we have items waiting
- # for a retry, we fall back on retry_secs as a minimum.
- sleep_time = self._find_min_remaining(min=retry_secs)
+ last_wallaroo_tag = wallaroo_current_tag
+ log.debug("WallabyOperations: total refresh processing time %s" \
+ % (time.time() - start_processing))
+ # Replay our unactivated changes
+ if need_replay:
+ pass
+
+ sleep_time = 30
self._condition.acquire()
if not self._stop:
# Could be signaled, so track the actual sleep time
@@ -193,39 +206,14 @@
slept = time.time() - bed_time
log.debug("WallabyOperations: cache thread slept for"\
" %s seconds" % slept)
-
- # When we wake up from sleep here, we already
- # have the lock so we might as well check refresh
- # and adjust the "remaining" values
- for attr, val in self._cache.iteritems():
- if val.refresh: # Force an update
- val.remaining = 0
- val.refresh = False
- elif val.remaining is not None:
- val.remaining -= slept
self._condition.release()
# Clear cache if we have been stopped....
for attr in self._cache:
self._set_cache(attr, [])
self._store = None
-
#end maintain_cache
- def get_values(attr, call, *args):
- log.debug("WallabyOperations: refreshing %s" % attr)
- try:
- # Wallaby API uses extensions to __getattr__ on
- # the Store to retrieve objects from the Broker
- # and return a list of proxy objects.
- start = time.time()
- d = call(*args)
- except:
- d = []
- delta = time.time() - start
- log.debug("WallabyOperations: %s seconds to refresh %s" % (delta, attr))
- return d
-
# Wrap the entire cache thread with an exception handler
def wrap_maintain_cache():
try:
@@ -595,6 +583,23 @@
# Super secret private implementation stuff. Don't look!
+ def _get_current_wallaroo_tag(self):
+ return self._store.cm.fetch_json_resource("/tags/current")
+
+ def _compare_tags(self, first, second):
+ try:
+ return first["commit"] == second["commit"]
+ except:
+ pass
+ return False
+
+ def _my_branch_is_current(self):
+ my_branch = self._store.cm.how.to_q()
+ return 'tag' in my_branch and my_branch['tag'] == 'current'
+
+ def _update_branch_to_current(self):
+ self._store.cm.how = wallaroo.client.cmeta.mk_how({"tag": "current"})
+
def _find_min_remaining(self, min):
# None indicates forever, the biggest value
# Note though that None < int is True in Python!
11 years, 2 months