Branch 'cloudfsd' - scripts/cfs_add_directory.py scripts/cfs_add_volume.py scripts/cfs_start_volume.py scripts/cfs_utils.py scripts/cloudfsd.py scripts/styles scripts/views ToDo
by Jeff Darcy
ToDo | 3 -
scripts/cfs_add_directory.py | 120 ++++++++++++++++++++++++++++++++++++++++
scripts/cfs_add_volume.py | 36 +++++++-----
scripts/cfs_start_volume.py | 3 +
scripts/cfs_utils.py | 13 ++++
scripts/cloudfsd.py | 15 +++++
scripts/styles/cfgmain.css | 7 +-
scripts/styles/provlist.css | 16 -----
scripts/views/add_dir_done.html | 15 +++++
scripts/views/add_vol_done.html | 8 ++
scripts/views/tenants.html | 2
scripts/views/volumes.html | 18 ++++--
12 files changed, 218 insertions(+), 38 deletions(-)
New commits:
commit 70633f02811345a63e67f6767b1be444248678a3
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 12:59:00 2011 -0400
Allow user to add non-mountpoint directories.
diff --git a/ToDo b/ToDo
index e51d2bd..db4cf21 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,4 @@
= High Priority =
-Use arbitrary directories, not just mountpoints
Create log/pid directories, move port links to /var/run
Interface to remove volumes
Make sure CLI equivalents work
@@ -7,6 +6,7 @@ Make sure CLI equivalents work
= Medium Priority =
SSL
Sanitize volume/tenant names etc. to avoid XSS/injection
+Add/document code to generate brick list for volume creation
Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
Add "bread crumbs" everywhere for navigation
Interface to remove servers
@@ -16,6 +16,7 @@ Sanity checking for volume configs
Handle zero-length tenant list without alice/bob filler
= Low Priority =
+Interface to remove non-mountpoint directories
Import GlusterFS volume to CloudFS
Eliminate clean_and_run in favor of cfs_utils.run_cmd
diff --git a/scripts/cfs_add_directory.py b/scripts/cfs_add_directory.py
new file mode 100644
index 0000000..508f501
--- /dev/null
+++ b/scripts/cfs_add_directory.py
@@ -0,0 +1,120 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib
+import urllib2
+
+from bottle import request, template
+import cfs_utils
+import cfs_paths
+
+c_range_re = re.compile("\[(?P<first>.)-(?P<last>.)\\]")
+n_range_re = re.compile("\[(?P<first>[1-9][0-9]*)-(?P<last>[1-9][0-9]*)\\]")
+a_range_re = re.compile("\{(?P<stuff>[^}]+)\}")
+
+def expand (path):
+ old_paths = [path]
+ # Keep going until there are no more expansions.
+ while True:
+ new_paths = []
+ n_subs = 0
+ for p in old_paths:
+ m = c_range_re.search(p)
+ if m:
+ pfx = p[:m.start()]
+ sfx = p[m.end():]
+ first = m.group("first")
+ last = m.group("last")
+ cur = first
+ while True:
+ munged = pfx + cur + sfx
+ new_paths.append(munged)
+ if cur == last:
+ break
+ cur = chr(ord(cur)+1)
+ n_subs += 1
+ continue
+ m = n_range_re.search(p)
+ if m:
+ pfx = p[:m.start()]
+ sfx = p[m.end():]
+ first = int(m.group("first"))
+ last = int(m.group("last"))
+ cur = first
+ while True:
+ munged = "%s%d%s" % (pfx, cur, sfx)
+ new_paths.append(munged)
+ if cur == last:
+ break
+ cur += 1
+ n_subs += 1
+ continue
+ m = a_range_re.search(p)
+ if m:
+ pfx = p[:m.start()]
+ sfx = p[m.end():]
+ for alt in m.group("stuff").split(","):
+ munged = pfx + alt + sfx
+ new_paths.append(munged)
+ n_subs += 1
+ continue
+ new_paths.append(p)
+ if not n_subs:
+ return new_paths
+ old_paths = new_paths
+ # Go back and do it again.
+
+def add_local (path):
+ # TBD: all sorts of input-validity checking
+ db_obj = cfs_utils.open_db()
+ # Get the current list of extra paths.
+ paths_by_node = {}
+ for node in [k for k in db_obj.keys() if k.startswith("xp_")]:
+ xp_list = db_obj[k].split(",")
+ if xp_list == [""]:
+ xp_list = []
+ paths_by_node[k[3:]] = set(xp_list)
+ # Add the user's paths.
+ for p in expand(path):
+ parts = string.split(p,":")
+ if len(parts) != 2:
+ return "add_local(%s) rejected %s on %s" %(
+ path, p, socket.gethostname())
+ node, dir = parts
+ if paths_by_node.has_key(node):
+ paths_by_node[node].add(dir)
+ else:
+ paths_by_node[node] = set([dir])
+ # Store back the results.
+ for key, value in paths_by_node.iteritems():
+ db_obj["xp_"+key] = string.join(value,",")
+ return "add_local(%s) OK on %s" % (path, socket.gethostname())
+
+def run_common (path):
+ node_list = cfs_utils.get_members()
+ blob = []
+ for node in node_list:
+ scratch = [node, []]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [add_local(path)]
+ else:
+ url = "http://%s:%d/volumes/add_dir_local" % (
+ node, cfs_paths.CLOUDFSD_PORT)
+ data = urllib.urlencode([("path",path)])
+ url_obj = urllib2.urlopen(url,data=data)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("add_dir_done.html",path=path,blob=blob);
+
+def run_www ():
+ path = request.forms.get("path")
+ return run_common(path)
+
+if __name__ == "__main__":
+ path = sys.argv[1]
+ #run_common(tn_name,tn_pw)
+ print add_local(path)
diff --git a/scripts/cfs_add_volume.py b/scripts/cfs_add_volume.py
index 1835b51..4af663b 100644
--- a/scripts/cfs_add_volume.py
+++ b/scripts/cfs_add_volume.py
@@ -1,14 +1,22 @@
import os
import re
+import socket
import string
import sys
+import urllib
+import urllib2
from bottle import request, template
+import cfs_paths
import cfs_utils
-def run_common (vname, vtype, vcount, bricks):
+def add_local (vname):
+ db_obj = cfs_utils.open_db()
+ db_obj["vt_"+vname] = ""
+ return "add_local(%s) OK on %s" % (vname, socket.gethostname())
+def run_common (vname, vtype, vcount, bricks):
# TBD: all sorts of input-validity checking
# TBD: construct sane brick list
cmd = "volume create %s" % vname
@@ -19,18 +27,20 @@ def run_common (vname, vtype, vcount, bricks):
if sts:
return template("add_vol_fail.html", name=vname,
action="gluster", status=sts)
-
- db_obj = cfs_utils.open_db()
- db_obj["vt_"+vname] = ""
-
- # TBD: generating the cloudfs volfiles (client+server) and starting the
- # glusterfsd daemons should be part of a separate "start" action, so
- # that it's done with a tenant list that's as up-to-date as possible.
- # It should also be done using the cloudfsd fetch/map infrastructure so
- # that cfs_mount.py/mount.cloudfs can actually work. Doing it as a
- # hack on top of Gluster's fetching/portmapping mess, even if it seems
- # to work in some bogus test environment, is a total waste of time.
- return template("add_vol_done.html",name=vname);
+ blob = []
+ for node in cfs_utils.get_nodes_for_vol(vname):
+ scratch = [node,[]]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [add_local(vname)]
+ else:
+ url = "http://%s:%d/volumes/add_vol_local" % (
+ node, cfs_paths.CLOUDFSD_PORT)
+ data = urllib.urlencode([("vname",vname)])
+ url_obj = urllib2.urlopen(url,data=data)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("add_vol_done.html",name=vname,blob=blob);
def run_www ():
volume_id = request.forms.get("VOLUMEID")
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
index 26f0980..c0b8ce7 100644
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -122,6 +122,9 @@ def create_tenant_dirs(vol_file):
for opt_dir_line in opt_dir_lines:
tokens = re.split(' ', string.lstrip(opt_dir_line))
path = string.rstrip(tokens[2])
+ ppath = os.path.dirname(path)
+ if not os.path.exists(ppath):
+ os.mkdir(ppath)
if not os.path.exists(path):
os.mkdir(path)
opt_dir_lines.close()
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index 94be076..9655b58 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -128,8 +128,19 @@ def get_mounts (brick_list):
if bits[4] not in good_fs_types:
continue
mount = "%s:%s" % (node_ip, bits[2])
- scratch.append((bits[2],dir_in_use(brick_list,mount)))
+ inuse = dir_in_use(brick_list,mount)
+ scratch.append((bits[2],inuse))
volumes_on_nodes[node_ip] = scratch
+ # Add user-defined non-mountpoint directories.
+ db_obj = open_db()
+ for node in [k[3:] for k in db_obj.keys() if k.startswith("xp_")]:
+ dirs = db_obj["xp_"+node].split(",")
+ if dirs == [""]:
+ dirs = []
+ for dir in dirs:
+ mount = "%s:%s" % (node, dir)
+ inuse = dir_in_use(brick_list,mount)
+ volumes_on_nodes[node].append((dir,inuse))
return volumes_on_nodes
# Get the list of nodes that are serving (any part of) a specific volume.
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 99e715a..2cc7259 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -11,6 +11,7 @@ import cfs_utils
import volmap
import cfs_utils
import cfs_add_node
+import cfs_add_directory
import cfs_add_volume
import cfs_start_volume
import cfs_stop_volume
@@ -49,10 +50,24 @@ def show_volumes():
# TBD: allow adding arbitrary directories instead of just mountpoints
return dict(bricks=brick_list,mounts=mount_list)
+@post("/volumes/add_directory")
+def add_directory ():
+ return cfs_add_directory.run_www()
+
+@post("/volumes/add_dir_local")
+def add_dir_local ():
+ path = request.forms.get("path")
+ return cfs_add_directory.add_local(path)
+
@post("/volumes/add_volume")
def add_volume():
return cfs_add_volume.run_www()
+@post("/volumes/add_vol_local")
+def add_vol_local ():
+ vname = request.forms.get("vname")
+ return cfs_add_volume.add_local(vname)
+
@route("/volumes/:vol_name/start")
def start_volume(vol_name):
return cfs_start_volume.run_www(vol_name)
diff --git a/scripts/styles/cfgmain.css b/scripts/styles/cfgmain.css
index dc79620..82043e8 100644
--- a/scripts/styles/cfgmain.css
+++ b/scripts/styles/cfgmain.css
@@ -25,16 +25,19 @@ body {
table {
border: 2px black solid;
+ background-color: #cccccc;
}
th {
+ text-align: left;
border: 1px black solid;
- padding: 3px;
+ padding: 5px;
background-color: #b0b0ff;
}
td {
+ text-align: left;
border: 1px black solid;
- padding: 2px;
+ padding: 5px;
background-color: #d0d0ff;
}
diff --git a/scripts/styles/provlist.css b/scripts/styles/provlist.css
index 70132a4..2bd771f 100644
--- a/scripts/styles/provlist.css
+++ b/scripts/styles/provlist.css
@@ -1,25 +1,9 @@
-.header {
- float: left;
- width: 100%;
- background-color: #f4f4f4;
-}
-
-.wrapper {
- position: relative;
- float: left;
- left: 0.00%;
- width: 100.00%;
- background-color: #cccccc;
-}
-
tr.d0 td {
background-color: #CC9999;
- color: black;
}
tr.d1 td {
background-color: #9999CC;
- color: black;
}
.footer {
diff --git a/scripts/views/add_dir_done.html b/scripts/views/add_dir_done.html
new file mode 100644
index 0000000..122e4e5
--- /dev/null
+++ b/scripts/views/add_dir_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Directory {{path}} added.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/add_vol_done.html b/scripts/views/add_vol_done.html
index d6ddf58..6044fa6 100644
--- a/scripts/views/add_vol_done.html
+++ b/scripts/views/add_vol_done.html
@@ -1,6 +1,14 @@
<html><head>
<meta http-equiv="pragma" content="no-cache">
</head><body>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
<p>Volume {{name}} created.</p>
<p><a href="/volumes">Back to volume configuration</a></p>
<p><a href="/cfgmain">Back to main menu</a></p>
diff --git a/scripts/views/tenants.html b/scripts/views/tenants.html
index c55dc0a..6b5d069 100644
--- a/scripts/views/tenants.html
+++ b/scripts/views/tenants.html
@@ -10,7 +10,7 @@
</div>
<div class="content">
<h2>Existing Tenants:</h2>
- <table cellspacing="0">
+ <table>
<tr><th>Name</th><th>Password</th><th></th></tr>
%tn_list = tenants.keys()
%tn_list.sort()
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
index 489778f..7595052 100644
--- a/scripts/views/volumes.html
+++ b/scripts/views/volumes.html
@@ -22,10 +22,21 @@
</ul>
%end
<hr>
-<h2>Provision a Volume From Available Bricks:</h2>
+<h2>Add Directories</h2>
+<p>Add non-mountpoint directories here. The format is host:path, with wildcard support in the following forms:</p>
+<ul>
+<li>character ranges, e.g. server1:/bricks/brick1</li>
+<li>number ranges, e.g. server1:/bricks/brick[9-10]</li>
+<li>alternatives, e.g. {alpha,bravo}:/bricks/brick20</li>
+</ul>
+<p>Expansions are attempted in the order shown, and the results are stored persistently as alternatives to the server mount points that are used as a default.</p>
+<form method="post" action="/volumes/add_directory">
+ <input type="text" name="path" size="40" />
+ <input type="submit" value="Add" />
+</form>
+<hr />
+<h2>Provision a Volume From Available Directories:</h2>
<form method="post" name="provision" action="/volumes/add_volume">
-<div class="header"><hr></div>
-<div class="wrapper">
<table>
%color_index = 0
%for node, mount_list in mounts.iteritems():
@@ -45,7 +56,6 @@
</tr>
%end
</table>
-</div>
<div class="footer"><hr></div>
Volume Type: <input type="radio" name="TYPE" value="plain" checked />Plain
<input type="radio" name="TYPE" value="replica" />Replicated
12 years, 7 months
Branch 'cloudfsd' - scripts/cfs_enable_tenant.py scripts/cfs_start_volume.py ToDo
by Jeff Darcy
ToDo | 1 -
scripts/cfs_enable_tenant.py | 10 +++++++---
scripts/cfs_start_volume.py | 28 +++++++++++-----------------
3 files changed, 18 insertions(+), 21 deletions(-)
New commits:
commit 399e471f61b471d0531f8b1cf06502304ec035a3
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed May 11 17:13:54 2011 -0400
Make volume-start code use same DB as tenant add/remove/enable.
diff --git a/ToDo b/ToDo
index 8ed0469..e51d2bd 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,4 @@
= High Priority =
-Use real tenant list for start/mount (no default, handle zero)
Use arbitrary directories, not just mountpoints
Create log/pid directories, move port links to /var/run
Interface to remove volumes
diff --git a/scripts/cfs_enable_tenant.py b/scripts/cfs_enable_tenant.py
index 26ca099..3855546 100644
--- a/scripts/cfs_enable_tenant.py
+++ b/scripts/cfs_enable_tenant.py
@@ -17,13 +17,17 @@ def enable_local (tn_name, vol_list):
db_obj = cfs_utils.open_db()
db_obj["tv_"+tn_name] = vol_list
for vol in [v[3:] for v in db_obj.keys() if v.startswith("vt_")]:
- vt_list = set(db_obj["vt_"+vol].split(","))
+ vt_list = db_obj["vt_"+vol].split(",")
+ if vt_list == ['']:
+ # This is one of the stupider split() behaviors.
+ vt_list = []
+ vt_list = set(vt_list)
if vol in vol_list.split(","):
print "enabling %s" % vol
- vt_list.add(vol)
+ vt_list.add(tn_name)
else:
print "disabling %s" % vol
- vt_list.discard(vol)
+ vt_list.discard(tn_name)
db_obj["vt_"+vol] = string.join(vt_list,",")
return "Volumes enabled for %s on %s" % (tn_name, socket.gethostname())
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
index 4c4e7bb..26f0980 100644
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -18,12 +18,6 @@ import cfs_utils
def check_volume_directory(vol_name):
if not os.path.exists(cfs_paths.info_dir):
os.mkdir(cfs_paths.info_dir)
- user_file = open("%s/%s" % (cfs_paths.info_dir, "default_users"), "w")
- # TBD: big gaping security hole until other code can deal
- # with having zero users defined.
- user_file.write("alice password1\nbob password2\n")
- user_file.flush()
- user_file.close()
os.mkdir(cfs_paths.idle_subdir)
for i in range(24010, 24030):
fp = open("%s/%d" % (cfs_paths.idle_subdir, i), "w")
@@ -79,18 +73,16 @@ def allocate_port(vol_file):
# everything that uses this is in Python we could just make it a pickle/shelf
# or whatever, but it would all go away with a real volume database so it's not
# worth the trouble to re-do it now.
-def parse_user_file(vol_name):
- try:
- user_file = open("%s/%s/users" % (cfs_paths.info_dir, vol_name), "r")
- except IOError:
- user_file = open("%s/default_users" % cfs_paths.info_dir, "r")
-
+def parse_user_file (vol_name):
+ db_obj = cfs_utils.open_db()
users = []
- for line in user_file.readlines():
- space = line.find(" ")
- if space == -1:
- print >> sys.stderr, "Bad line in userfile: %s" % line
- users.append([line[:space],line[space+1:-1]])
+ user_list = db_obj["vt_"+vol_name].split(",")
+ if user_list == ['']:
+ user_list = []
+ for name in user_list:
+ print "found user %s" % name
+ pw = db_obj["tp_"+name]
+ users.append([name,pw])
return users
def cloudify_server (input, output, users, port):
@@ -137,6 +129,8 @@ def create_tenant_dirs(vol_file):
def start_local (vol_name):
vol_base = check_volume_directory(vol_name)
users = parse_user_file(vol_name)
+ if not len(users):
+ return "no users for %s on %s" % (vol_name, socket.gethostname())
# TBD: deal with more than one brick on the same server
vf = scan_gfs_volfiles(vol_name)
new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
12 years, 7 months
Branch 'cloudfsd' - scripts/cfs_add_tenant.py scripts/cfs_add_volume.py scripts/cfs_delete_tenant.py scripts/cfs_enable_tenant.py scripts/cfs_mount.py scripts/cfs_start_volume.py scripts/cfs_stop_volume.py scripts/cfs_utils.py scripts/cloudfsd.py scripts/styles scripts/views ToDo
by Jeff Darcy
ToDo | 15 +++---
scripts/cfs_add_tenant.py | 48 +++++++++++++++++++
scripts/cfs_add_volume.py | 34 +-------------
scripts/cfs_delete_tenant.py | 42 +++++++++++++++++
scripts/cfs_enable_tenant.py | 60 ++++++++++++++++++++++++
scripts/cfs_mount.py | 11 ++++
scripts/cfs_start_volume.py | 14 ++---
scripts/cfs_stop_volume.py | 12 ++--
scripts/cfs_utils.py | 29 ++++++++++-
scripts/cloudfsd.py | 92 ++++++++++++++++++++++++--------------
scripts/styles/cfgmain.css | 17 +++++++
scripts/views/cfgmain.html | 2
scripts/views/tenant_volumes.html | 25 ++++++++++
scripts/views/tenants.html | 33 +++++++++++++
scripts/views/tn_act_done.html | 15 ++++++
scripts/views/volumes.html | 1
16 files changed, 361 insertions(+), 89 deletions(-)
New commits:
commit 6713c3178f4ca54f7959eb663e7f0b3795ad77e7
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed May 11 16:21:01 2011 -0400
More progress on volume and tenant interfaces.
Volume interfaces now work across nodes, without inducing reentrancy, and
also actually start the glusterfs daemons. Tenant interfaces include list,
add, remove, and enable volumes.
diff --git a/ToDo b/ToDo
index c769c94..8ed0469 100644
--- a/ToDo
+++ b/ToDo
@@ -1,19 +1,20 @@
= High Priority =
-Interface to start/stop volumes (includes volfile generation and mkdir)
-Get mount.cloudfs to work
-Interface to add/remove tenants
-Interface to map between tenants and volumes
+Use real tenant list for start/mount (no default, handle zero)
Use arbitrary directories, not just mountpoints
+Create log/pid directories, move port links to /var/run
Interface to remove volumes
-Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
-SSL
+Make sure CLI equivalents work
= Medium Priority =
+SSL
+Sanitize volume/tenant names etc. to avoid XSS/injection
+Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
+Add "bread crumbs" everywhere for navigation
Interface to remove servers
+Add mapping from volumes to tenants (already have vice versa)
Deal with glusterd startup consistently
Sanity checking for volume configs
Handle zero-length tenant list without alice/bob filler
-Handle multiple bricks per server when generating volfiles
= Low Priority =
Import GlusterFS volume to CloudFS
diff --git a/scripts/cfs_add_tenant.py b/scripts/cfs_add_tenant.py
new file mode 100644
index 0000000..d36837e
--- /dev/null
+++ b/scripts/cfs_add_tenant.py
@@ -0,0 +1,48 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib
+import urllib2
+
+from bottle import request, template
+import cfs_utils
+import cfs_paths
+
+def add_local (tn_name, tn_pw):
+ # TBD: all sorts of input-validity checking
+ db_obj = cfs_utils.open_db()
+ db_obj["tp_"+tn_name] = tn_pw
+ db_obj["tv_"+tn_name] = ""
+ return "add_local(%s) OK on %s" % (tn_name, socket.gethostname())
+
+def run_common (tn_name, tn_pw):
+ node_list = cfs_utils.get_members()
+ blob = []
+ for node in node_list:
+ scratch = [node, []]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [add_local(tn_name,tn_pw)]
+ else:
+ url = "http://%s:%d/tenants/add_local" % (
+ node, cfs_paths.CLOUDFSD_PORT)
+ data = urllib.urlencode([("tn_name",tn_name),
+ ("tn_pw",tn_pw)])
+ url_obj = urllib2.urlopen(url,data=data)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("tn_act_done.html",name=tn_name,action="added",
+ blob=blob);
+
+def run_www ():
+ tn_name = request.forms.get("tn_name")
+ tn_pw = request.forms.get("tn_pw")
+ return run_common(tn_name,tn_pw)
+
+if __name__ == "__main__":
+ tn_name = sys.argv[1]
+ tn_pw = sys.argv[2]
+ run_common(tn_name,tn_pw)
diff --git a/scripts/cfs_add_volume.py b/scripts/cfs_add_volume.py
index a9e7617..1835b51 100644
--- a/scripts/cfs_add_volume.py
+++ b/scripts/cfs_add_volume.py
@@ -1,6 +1,6 @@
-import re
import os
+import re
import string
import sys
@@ -20,36 +20,8 @@ def run_common (vname, vtype, vcount, bricks):
return template("add_vol_fail.html", name=vname,
action="gluster", status=sts)
- ## make the cloudfs vol files with cloudfs
- # TBD: this should go away in favor of generating these at start
- # time; see big TBD near the end of the function
- cmd = "init %s /var/lib/glusterd/cloudfs.tenants" % vname
- for node in cfs_utils.get_members()[1:]:
- sts = cfs_utils.run_cmd("cloudfs",cmd,host=node).wait()
- if sts:
- act = "cloudfs on %s" % node
- return template("add_vol_fail.html", name=vname,
- action=act, status=sts)
-
- ## make the dirs on each node/volume
- ## first get all the tenants
- tenants = []
- tenants.append("junk")
- tenant_file = open("/var/lib/glusterd/cloudfs.tenants","r")
- for tenantline in tenant_file:
- scratch = re.split(' ', tenantline)
- tenants.append(scratch[0])
- ## now make the dirs on every volume
- for b in bricks:
- scratch = re.split(':', b)
- for tenant in tenants :
- node = scratch[0]
- cmd = "%s/%s" % (scratch[1], tenant)
- sts = cfs_utils.run_cmd("mkdir",cmd,host=node).wait()
- if sts:
- act = "mkdir on %s" % node
- return template("add_vol_fail.html", name=vname,
- action=act, status=sts)
+ db_obj = cfs_utils.open_db()
+ db_obj["vt_"+vname] = ""
# TBD: generating the cloudfs volfiles (client+server) and starting the
# glusterfsd daemons should be part of a separate "start" action, so
diff --git a/scripts/cfs_delete_tenant.py b/scripts/cfs_delete_tenant.py
new file mode 100644
index 0000000..ebbd35d
--- /dev/null
+++ b/scripts/cfs_delete_tenant.py
@@ -0,0 +1,42 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib2
+
+from bottle import request, template
+import cfs_utils
+import cfs_paths
+
+def delete_local (tn_name):
+ # TBD: all sorts of input-validity checking
+ db_obj = cfs_utils.open_db()
+ del db_obj["tp_"+tn_name]
+ del db_obj["tv_"+tn_name]
+ return "delete_local(%s) OK on %s" % (tn_name, socket.gethostname())
+
+def run_common (tn_name):
+ node_list = cfs_utils.get_members()
+ blob = []
+ for node in node_list:
+ scratch = [node, []]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [delete_local(tn_name)]
+ else:
+ url = "http://%s:%d/tenants/%s/delete_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, tn_name)
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("tn_act_done.html",name=tn_name,action="deleted",
+ blob=blob);
+
+def run_www (tn_name):
+ return run_common(tn_name)
+
+if __name__ == "__main__":
+ tn_name = sys.argv[1]
+ run_common(tn_name)
diff --git a/scripts/cfs_enable_tenant.py b/scripts/cfs_enable_tenant.py
new file mode 100644
index 0000000..26ca099
--- /dev/null
+++ b/scripts/cfs_enable_tenant.py
@@ -0,0 +1,60 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib
+import urllib2
+
+from bottle import request, template
+import cfs_paths
+import cfs_utils
+
+def enable_local (tn_name, vol_list):
+
+ # TBD: all sorts of input-validity checking
+ db_obj = cfs_utils.open_db()
+ db_obj["tv_"+tn_name] = vol_list
+ for vol in [v[3:] for v in db_obj.keys() if v.startswith("vt_")]:
+ vt_list = set(db_obj["vt_"+vol].split(","))
+ if vol in vol_list.split(","):
+ print "enabling %s" % vol
+ vt_list.add(vol)
+ else:
+ print "disabling %s" % vol
+ vt_list.discard(vol)
+ db_obj["vt_"+vol] = string.join(vt_list,",")
+ return "Volumes enabled for %s on %s" % (tn_name, socket.gethostname())
+
+def run_common (tn_name, vol_list):
+ vol_list = string.join(vol_list,",")
+ node_list = cfs_utils.get_members()
+ blob = []
+ for node in node_list:
+ scratch = [node, []]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [enable_local(tn_name,vol_list)]
+ else:
+ url = "http://%s:%d/tenants/enable_local" % (
+ node, cfs_paths.CLOUDFSD_PORT)
+ data = urllib.urlencode([("tn_name",tn_name),
+ ("vol_list",vol_list)])
+ url_obj = urllib2.urlopen(url,data=data)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("tn_act_done.html",name=tn_name,action="enabled",
+ blob=blob)
+
+def run_www (tn_name):
+ vol_list = []
+ for prop in request.forms.iterkeys():
+ print prop
+ if prop.startswith("VOLUME_"):
+ vol_list.append(prop[7:])
+ return run_common(tn_name,vol_list)
+
+if __name__ == "__main__":
+ run_common(sys.argv[1],sys.argv[2:])
+
diff --git a/scripts/cfs_mount.py b/scripts/cfs_mount.py
index b6d64d1..af4ad06 100755
--- a/scripts/cfs_mount.py
+++ b/scripts/cfs_mount.py
@@ -1,7 +1,12 @@
+#!/usr/bin/env python
+
import json
+import os
import sys
import urllib2
+import cfs_paths
+import cfs_utils
import volfilter
# Simple cache of brick-to-port mappings, so we don't have to keep re-fetching
@@ -56,7 +61,11 @@ if __name__ == "__main__":
xl.opts["username"] = username
xl.opts["password"] = password
- volfilter.generate(graph,last,sys.stdout)
+ outfile = os.path.join(cfs_paths.info_dir,"%s.vol"%volume)
+ volfilter.generate(graph,last,open(outfile,"w"))
+ cmd = "-f %s %s" % (outfile, mount)
+ print "running glusterfs %s" % cmd
+ cfs_utils.run_cmd("glusterfs",cmd).wait()
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
index 989bff9..4c4e7bb 100644
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -69,8 +69,8 @@ def allocate_port(vol_file):
for pf in glob.iglob("%s/*" % cfs_paths.idle_subdir):
base = os.path.basename(pf)
new_name = "%s/%s" % (cfs_paths.used_subdir, base)
- os.symlink(vol_file, new_name)
os.remove(pf)
+ os.symlink(vol_file, new_name)
return base
else:
raise RuntimeError, "no ports available"
@@ -168,16 +168,16 @@ def run_www (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
for node in node_list:
- url = "http://%s:%d/volumes/%s/start_local" % (
- node, cfs_paths.CLOUDFSD_PORT, vol_name)
scratch = [node, []]
- print "opening %s" % url
- url_obj = urllib2.urlopen(url)
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [start_local(vol_name)]
+ else:
+ url = "http://%s:%d/volumes/%s/start_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ url_obj = urllib2.urlopen(url)
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- print "done with %s" % url
- print blob
return template("start_done.html",name=vol_name,blob=blob)
if __name__ == "__main__":
diff --git a/scripts/cfs_stop_volume.py b/scripts/cfs_stop_volume.py
index ebfa47a..0851d6b 100644
--- a/scripts/cfs_stop_volume.py
+++ b/scripts/cfs_stop_volume.py
@@ -46,16 +46,16 @@ def run_www (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
for node in node_list:
- url = "http://%s:%d/volumes/%s/stop_local" % (
- node, cfs_paths.CLOUDFSD_PORT, vol_name)
scratch = [node, []]
- print "opening %s" % url
- url_obj = urllib2.urlopen(url)
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [stop_local(vol_name)]
+ else:
+ url = "http://%s:%d/volumes/%s/stop_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ url_obj = urllib2.urlopen(url)
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- print "done with %s" % url
- print blob
return template("stop_done.html",name=vol_name,blob=blob)
if __name__ == "__main__":
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index fd981cd..94be076 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -1,3 +1,4 @@
+import dbm
import glob
import os
import socket
@@ -43,16 +44,28 @@ def run_cmd (program, args, host=None, filters=[]):
if host:
cmd = make_remote(host,cmd)
if len(filters):
- print "executing %s using shell" % cmd
- child = subprocess.Popen(cmd,shell=True,
+ #print "executing %s using shell" % cmd
+ child = subprocess.Popen(cmd,close_fds=True,shell=True,
stdout=subprocess.PIPE)
else:
- print "executing %s without shell" % cmd
- child = subprocess.Popen(cmd.split(" "),shell=False,
+ #print "executing %s without shell" % cmd
+ cmd = cmd.split()
+ child = subprocess.Popen(cmd,close_fds=True,shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return child
+# Figure out what our local IP addresses are, for future reference.
+# TBD: IPv6 yadda yadda
+local_addrs = []
+for line in run_cmd("ip","addr").stdout:
+ parts = line.lstrip().split(" ")
+ if parts[0] == "inet":
+ addr = parts[1]
+ if not addr.startswith("127."):
+ local_addrs.append(addr.split("/")[0])
+#print local_addrs
+
# NB this node is always first in the list
def get_members ():
peer_ips = run_cmd("gluster","peer status",
@@ -129,3 +142,11 @@ def get_nodes_for_vol (vol_name):
if m:
node_list.add(m.groups(1)[0])
return node_list
+
+# Open our configuration database.
+# We use dbm explicitly because we don't want to get into a situation where
+# a DB was created using some more "advanced" library and then restored/moved
+# without that library present.
+def open_db ():
+ db_path = os.path.join(cfs_paths.info_dir,"config.db")
+ return dbm.open(db_path,"c",0600)
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 15d0a5c..99e715a 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,39 +1,22 @@
#!/usr/bin/python
-from bottle import route, post, run, view, debug, CherryPyServer
+from bottle import route, post, run, view, debug, request
import os
import socket
import string
import cfs_paths
+import cfs_utils
import volmap
import cfs_utils
import cfs_add_node
import cfs_add_volume
import cfs_start_volume
import cfs_stop_volume
-
-@route("/:vol_name/fetch")
-def fetch_client_vf(vol_name):
- vf_path = "%s/vols/%s/%s-fuse.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
- return open(vf_path,"r")
-
-@route("/:vol_name/map")
-def map_paths(vol_name):
- return volmap.vol_map(vol_name)
-
-@route("/:user_name/adduser")
-def add_user(user_name):
- print "add user: " + user_name
-
-@route("/:user_name/deleteuser")
-def delete_user(user_name):
- print "delete user: " + user_name
-
-@route("/listusers")
-def list_users():
- print "list users"
+import cfs_add_tenant
+import cfs_delete_tenant
+import cfs_enable_tenant
@route("/")
@route("/cfg")
@@ -86,17 +69,61 @@ def stop_volume(vol_name):
def stop_local (vol_name):
return cfs_stop_volume.stop_local(vol_name)
-@route("/wwwaddtenant")
-def www_addtenant():
- print "www addtenant"
+# Used by mount.cloudfs
+@route("/:vol_name/fetch")
+def fetch_client_vf(vol_name):
+ vf_path = "%s/vols/%s/%s-fuse.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
+ return open(vf_path,"r")
-@post("/wwwdoaddtenant")
-def www_doaddtenant():
- print "www doaddtenant"
+# Used by mount.cloudfs
+@route("/:vol_name/map")
+def map_paths(vol_name):
+ return volmap.vol_map(vol_name)
-@route("/wwwlisttenants")
-def www_listtenants():
- print "www listtenants"
+@route("/tenants")
+@view("tenants.html")
+def show_tenants ():
+ db_obj = cfs_utils.open_db()
+ return dict(tenants=db_obj)
+
+@post("/tenants/add")
+def add_tenant():
+ return cfs_add_tenant.run_www()
+
+@post("/tenants/add_local")
+def add_tenant_local ():
+ tn_name = request.forms.get("tn_name")
+ tn_pw = request.forms.get("tn_pw")
+ return cfs_add_tenant.add_local(tn_name,tn_pw)
+
+@route("/tenants/:name/delete")
+def delete_tenant (name):
+ return cfs_delete_tenant.run_www(name)
+
+@route("/tenants/:tn_name/delete_local")
+def delete_tenant_local (tn_name):
+ return cfs_delete_tenant.delete_local(tn_name)
+
+@route("/tenants/:tn_name/volumes")
+@view("tenant_volumes.html")
+def show_tenant_volumes (tn_name):
+ db_obj = cfs_utils.open_db()
+ all_vols = [v[3:] for v in db_obj.keys() if v.startswith("vt_")]
+ print all_vols
+ active = db_obj["tv_"+tn_name].split(",")
+ active.sort()
+ print active
+ return dict(tn_name=tn_name,all_vols=all_vols,active=active)
+
+@post("/tenants/:tn_name/enable")
+def enable_tenant_volumes (tn_name):
+ return cfs_enable_tenant.run_www(tn_name)
+
+@post("/tenants/enable_local")
+def add_tenant_local ():
+ tn_name = request.forms.get("tn_name")
+ vol_list = request.forms.get("vol_list")
+ return cfs_enable_tenant.enable_local(tn_name,vol_list)
@route("/styles/:sheet")
def get_style (sheet):
@@ -104,5 +131,6 @@ def get_style (sheet):
if __name__ == "__main__":
debug(True)
- run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT,server=CherryPyServer)
+ #run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT,server=CherryPyServer)
+ run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT)
diff --git a/scripts/styles/cfgmain.css b/scripts/styles/cfgmain.css
index 364d88b..dc79620 100644
--- a/scripts/styles/cfgmain.css
+++ b/scripts/styles/cfgmain.css
@@ -21,3 +21,20 @@ body {
.content {
padding: 1em 5em;
}
+
+
+table {
+ border: 2px black solid;
+}
+
+th {
+ border: 1px black solid;
+ padding: 3px;
+ background-color: #b0b0ff;
+}
+
+td {
+ border: 1px black solid;
+ padding: 2px;
+ background-color: #d0d0ff;
+}
diff --git a/scripts/views/cfgmain.html b/scripts/views/cfgmain.html
index 3f46b78..059a8f9 100644
--- a/scripts/views/cfgmain.html
+++ b/scripts/views/cfgmain.html
@@ -10,7 +10,7 @@
<div class="content">
<p><a href="/cluster">Manage Servers</a></p>
<p><a href="/volumes">Manage Volumes</a></p>
- <p><a href="/wwwlisttenants">Manage Tenants</a></p>
+ <p><a href="/tenants">Manage Tenants</a></p>
</div>
</body></html>
diff --git a/scripts/views/tenant_volumes.html b/scripts/views/tenant_volumes.html
new file mode 100644
index 0000000..dd4eb09
--- /dev/null
+++ b/scripts/views/tenant_volumes.html
@@ -0,0 +1,25 @@
+<html><head>
+<title>_CloudFS Tenant Access_</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="Pragma" content="no-cache" />
+<link href="/styles/cfgmain.css" rel="stylesheet" type="text/css" />
+</head><body>
+<div class="banner">
+<h1>Configure Volumes for {{tn_name}}</h1>
+</div>
+<div class="content">
+<h2>Volume List</h2>
+ <form method="post" action="/tenants/{{tn_name}}/enable">
+ <table cellspacing="0">
+ <tr><th>Volume Name</th><th></th></tr>
+ %for vol in all_vols:
+ <tr>
+ <td>{{vol}}</td>
+ %value = "checked" if (vol in active) else ""
+ <td><input type="checkbox" name="VOLUME_{{vol}}" {{value}} /></td>
+ </tr>
+ %end
+ </table>
+ <input type="submit" value="Update" />
+</div>
+</body></html>
diff --git a/scripts/views/tenants.html b/scripts/views/tenants.html
new file mode 100644
index 0000000..c55dc0a
--- /dev/null
+++ b/scripts/views/tenants.html
@@ -0,0 +1,33 @@
+<html><head>
+<title>_CloudFS Tenant Management_</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="Pragma" content="no-cache" />
+<link href="/styles/cfgmain.css" rel="stylesheet" type="text/css" />
+<link href="/styles/provlist.css" rel="stylesheet" type="text/css" />
+</head><body>
+<div class="banner">
+<h1>CloudFS Tenant Management</h1>
+</div>
+<div class="content">
+<h2>Existing Tenants:</h2>
+ <table cellspacing="0">
+ <tr><th>Name</th><th>Password</th><th></th></tr>
+ %tn_list = tenants.keys()
+ %tn_list.sort()
+ %for tn_name in [t[3:] for t in tn_list if t.startswith("tp_")]:
+ %tn_pw = tenants["tp_"+tn_name]
+ <tr><td>{{tn_name}}</td><td>{{tn_pw}}</td>
+ <td><a href="/tenants/{{tn_name}}/volumes">volumes</a>
+ <a href="/tenants/{{tn_name}}/delete">delete</a></td>
+ </tr>
+ %end
+ </table>
+<hr>
+<h2>Add a Tenant:</h2>
+<form method="post" action="/tenants/add">
+ Tenant Name: <input type="text" name="tn_name" size="20" />
+ Tenant Password: <input type="text" name="tn_pw" size="20" />
+ <input type="submit" name="PROVISION" value="Add" />
+</form>
+</div>
+</body></html>
diff --git a/scripts/views/tn_act_done.html b/scripts/views/tn_act_done.html
new file mode 100644
index 0000000..5fb27ae
--- /dev/null
+++ b/scripts/views/tn_act_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Tenant {{name}} {{action}}.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/tenants">Back to tenant configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
index f4d7436..489778f 100644
--- a/scripts/views/volumes.html
+++ b/scripts/views/volumes.html
@@ -12,6 +12,7 @@
<h2>Existing Volumes:</h2>
%for vol_name, brick_list in bricks.iteritems():
<p><b>{{vol_name}}</b>
+ <a href="/volumes/{{vol_name}}/tenants">tenants</a>
<a href="/volumes/{{vol_name}}/start">start</a>
<a href="/volumes/{{vol_name}}/stop">stop</a></p>
<ul>
12 years, 7 months
Branch 'cloudfsd' - scripts/cfs_mount.py scripts/cfs_paths.py scripts/cfs_start_volume.py scripts/cfs_stop_volume.py scripts/cfs_utils.py scripts/cloudfsd.py scripts/paths.py scripts/views scripts/volmap.py scripts/volstart.py scripts/volstop.py ToDo
by Jeff Darcy
ToDo | 21 ++++
scripts/cfs_mount.py | 6 -
scripts/cfs_paths.py | 13 ++
scripts/cfs_start_volume.py | 184 ++++++++++++++++++++++++++++++++++++++++++
scripts/cfs_stop_volume.py | 62 ++++++++++++++
scripts/cfs_utils.py | 13 ++
scripts/cloudfsd.py | 39 +++++---
scripts/paths.py | 10 --
scripts/views/start_done.html | 15 +++
scripts/views/stop_done.html | 15 +++
scripts/views/volumes.html | 9 +-
scripts/volmap.py | 6 -
scripts/volstart.py | 173 ---------------------------------------
scripts/volstop.py | 45 ----------
14 files changed, 357 insertions(+), 254 deletions(-)
New commits:
commit 491da1780e466cfd60c3df0c0b063094f39bd104
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue May 10 13:57:17 2011 -0400
Got volume start/stop interfaces to work.
diff --git a/ToDo b/ToDo
new file mode 100644
index 0000000..c769c94
--- /dev/null
+++ b/ToDo
@@ -0,0 +1,21 @@
+= High Priority =
+Interface to start/stop volumes (includes volfile generation and mkdir)
+Get mount.cloudfs to work
+Interface to add/remove tenants
+Interface to map between tenants and volumes
+Use arbitrary directories, not just mountpoints
+Interface to remove volumes
+Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
+SSL
+
+= Medium Priority =
+Interface to remove servers
+Deal with glusterd startup consistently
+Sanity checking for volume configs
+Handle zero-length tenant list without alice/bob filler
+Handle multiple bricks per server when generating volfiles
+
+= Low Priority =
+Import GlusterFS volume to CloudFS
+Eliminate clean_and_run in favor of cfs_utils.run_cmd
+
diff --git a/scripts/cfs_mount.py b/scripts/cfs_mount.py
index af0715d..b6d64d1 100755
--- a/scripts/cfs_mount.py
+++ b/scripts/cfs_mount.py
@@ -4,8 +4,6 @@ import urllib2
import volfilter
-CLOUDFSD_PORT = 8080
-
# Simple cache of brick-to-port mappings, so we don't have to keep re-fetching
# the maps from the same host if it has multiple bricks.
class mapper:
@@ -16,7 +14,7 @@ class mapper:
mydict = self.cache[host]
else:
url = "http://%s:%d/%s/map" % \
- (host, CLOUDFSD_PORT, volume)
+ (host, cfs_paths.CLOUDFSD_PORT, volume)
mydict = json.load(urllib2.urlopen(url))
self.cache[host] = mydict
if mydict.has_key(subv):
@@ -34,7 +32,7 @@ if __name__ == "__main__":
(host, volume, username, password, mount) = sys.argv[1:6]
# Fetch the GlusterFS client-side volfile.
- url = "http://%s:%d/%s/fetch" % (host, CLOUDFSD_PORT, volume)
+ url = "http://%s:%d/%s/fetch" % (host, cfs_paths.CLOUDFSD_PORT, volume)
vol_file = urllib2.urlopen(url)
# Load the volfile and clean out some of the crud.
diff --git a/scripts/cfs_paths.py b/scripts/cfs_paths.py
new file mode 100644
index 0000000..27e11b0
--- /dev/null
+++ b/scripts/cfs_paths.py
@@ -0,0 +1,13 @@
+
+import re
+import os
+
+gfs_dir = "/var/lib/glusterd"
+info_dir = "/var/lib/cloudfs"
+idle_subdir = os.path.join(info_dir,".idle_ports")
+used_subdir = os.path.join(info_dir,".used_ports")
+log_dir = "/var/log/cloudfs"
+pid_dir = "/var/run/cloudfs"
+volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
+CLOUDFSD_PORT = 8080
+
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
new file mode 100644
index 0000000..989bff9
--- /dev/null
+++ b/scripts/cfs_start_volume.py
@@ -0,0 +1,184 @@
+
+import glob
+import os
+import re
+import socket
+import string
+import subprocess
+import sys
+import urllib2
+
+from bottle import template
+
+import volfilter
+import cfs_paths
+import cfs_utils
+
+# Make sure the volume directory exists and has the right stuff in it.
+def check_volume_directory(vol_name):
+ if not os.path.exists(cfs_paths.info_dir):
+ os.mkdir(cfs_paths.info_dir)
+ user_file = open("%s/%s" % (cfs_paths.info_dir, "default_users"), "w")
+ # TBD: big gaping security hole until other code can deal
+ # with having zero users defined.
+ user_file.write("alice password1\nbob password2\n")
+ user_file.flush()
+ user_file.close()
+ os.mkdir(cfs_paths.idle_subdir)
+ for i in range(24010, 24030):
+ fp = open("%s/%d" % (cfs_paths.idle_subdir, i), "w")
+ fp.close()
+ os.mkdir(cfs_paths.used_subdir)
+
+ vol_dir = "%s/%s" % (cfs_paths.info_dir, vol_name)
+ if not os.path.exists(vol_dir):
+ os.mkdir(vol_dir)
+ return vol_dir
+
+# Figure out which of the GlusterFS volfiles belong to us. Volfiles might have
+# names based on partial host names, fully qualified names, or addresses, or
+# even a mix thanks to "gluster peer probe" silliness. To deal with all of
+# these possibilities, we resolve everything to addresses and compare those.
+# ### bear in mind that depending on how a machine is set up, the IP addrs
+# ### for a node might include 127.0.0.1 and ::1 first
+def scan_gfs_volfiles(vol_name):
+ ret = ""
+ my_name = os.uname()[1]
+ # Getaddrinfo returns a list of tuples, each:
+ # family, socktype, proto, canonname, sockaddr
+ # We extract the sockaddr of the first item, and the IP addr from that
+ # TBD: handle IPv6, multi-homed hosts, etc.
+ # TBD: skip loopback addresses based on note above
+ my_addrs = socket.getaddrinfo(my_name, 0)
+ my_glob = "%s/vols/%s/%s.*.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
+ for vf in glob.iglob(my_glob):
+ m = cfs_paths.volfile_re.match(os.path.basename(vf))
+ if m:
+ this_host = m.groups(1)[0]
+ this_addr = socket.getaddrinfo(this_host, 0)[0][4][0]
+ for addr in my_addrs:
+ if this_addr == addr[4][0]:
+ ret = vf
+ break
+ return ret
+
+# Allocate a port for a server to run on. Right now we do this in a very
+# "clever" way, by creating files to match ports and then grabbing a file here.
+# When we have a real volume database such games will be unnecessary.
+def allocate_port(vol_file):
+ for pf in glob.iglob("%s/*" % cfs_paths.idle_subdir):
+ base = os.path.basename(pf)
+ new_name = "%s/%s" % (cfs_paths.used_subdir, base)
+ os.symlink(vol_file, new_name)
+ os.remove(pf)
+ return base
+ else:
+ raise RuntimeError, "no ports available"
+
+# Parse the user file into a list of [name,password] sub-lists. Since
+# everything that uses this is in Python we could just make it a pickle/shelf
+# or whatever, but it would all go away with a real volume database so it's not
+# worth the trouble to re-do it now.
+def parse_user_file(vol_name):
+ try:
+ user_file = open("%s/%s/users" % (cfs_paths.info_dir, vol_name), "r")
+ except IOError:
+ user_file = open("%s/default_users" % cfs_paths.info_dir, "r")
+
+ users = []
+ for line in user_file.readlines():
+ space = line.find(" ")
+ if space == -1:
+ print >> sys.stderr, "Bad line in userfile: %s" % line
+ users.append([line[:space],line[space+1:-1]])
+ return users
+
+def cloudify_server (input, output, users, port):
+ print "# Cloudifying server %s" % input
+ graph, last = volfilter.load(input)
+ last = volfilter.cleanup(last,graph)
+
+ if last.type != "protocol/server":
+ print >> sys.stderr, "Top translator must be protocol/server"
+ sys.exit(1)
+ old_stack = last.subvols[0]
+
+ bad_opts = []
+ for opt in last.opts.iterkeys():
+ if opt[:9] == "auth.addr":
+ bad_opts.append(opt)
+ elif opt[:10] == "auth.login":
+ bad_opts.append(opt)
+ for opt in bad_opts:
+ print "# stripping auth option %s = %s" % (opt, last.opts[opt])
+ del last.opts[opt]
+
+ last.subvols = []
+ for user, pw in users:
+ new_stack = volfilter.copy_stack(old_stack,user)
+ last.subvols.append(new_stack)
+ last.opts["auth.login.%s.allow"%new_stack.name] = user
+ last.opts["auth.login.%s.password"%new_stack.name] = pw
+
+ last.opts["transport.socket.listen-port"] = port
+ volfilter.generate(graph,last,output)
+
+def create_tenant_dirs(vol_file):
+ cmd = "/bin/grep \"option directory\" %s" % vol_file
+ path = ""
+ opt_dir_lines = os.popen(cmd)
+ for opt_dir_line in opt_dir_lines:
+ tokens = re.split(' ', string.lstrip(opt_dir_line))
+ path = string.rstrip(tokens[2])
+ if not os.path.exists(path):
+ os.mkdir(path)
+ opt_dir_lines.close()
+
+def start_local (vol_name):
+ vol_base = check_volume_directory(vol_name)
+ users = parse_user_file(vol_name)
+ # TBD: deal with more than one brick on the same server
+ vf = scan_gfs_volfiles(vol_name)
+ new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
+ outfile = open(new_vf, "w")
+ port = allocate_port(new_vf)
+ cloudify_server(vf, outfile, users, port)
+ outfile.flush()
+ outfile.close()
+ v_key = string.replace(os.path.basename(new_vf), ".vol", "")
+ # print "v_key: %s" % v_key
+ # make dirs for each of the users
+ create_tenant_dirs(new_vf)
+ # actually start the server
+ # note: pid file in /var/lib/glusterd/vols/<vol_name>/... so that
+ # gluster can find it
+ logfile = os.path.join(cfs_paths.log_dir,"%s.log"%v_key)
+ pidfile = os.path.join(cfs_paths.pid_dir,"%s.pid"%v_key)
+ cmd = "--volfile %s" % new_vf
+ cmd += (" --log-file %s" % logfile)
+ cmd += (" --pid-file %s" % pidfile)
+ cmd += (" --xlator-option %s-server.transport.socket.listen-port=%s" % (
+ vol_name, port))
+ print "whole command = glusterfsd %s" % cmd
+ retcode = cfs_utils.run_cmd("glusterfsd",cmd).wait()
+ return "start_local(%s) returned %d on %s\n" % (
+ vol_name, retcode, socket.gethostname())
+
+def run_www (vol_name):
+ node_list = cfs_utils.get_nodes_for_vol(vol_name)
+ blob = []
+ for node in node_list:
+ url = "http://%s:%d/volumes/%s/start_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ scratch = [node, []]
+ print "opening %s" % url
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ print "done with %s" % url
+ print blob
+ return template("start_done.html",name=vol_name,blob=blob)
+
+if __name__ == "__main__":
+ run_www(sys.argv[1])
diff --git a/scripts/cfs_stop_volume.py b/scripts/cfs_stop_volume.py
new file mode 100644
index 0000000..ebfa47a
--- /dev/null
+++ b/scripts/cfs_stop_volume.py
@@ -0,0 +1,62 @@
+
+import fileinput
+import glob
+import os
+import socket
+import subprocess
+import sys
+import urllib2
+
+from bottle import template
+
+import cfs_paths
+import cfs_utils
+
+def kill_daemon (vol_name):
+ myglob = os.path.join(cfs_paths.pid_dir,"%s.*"%vol_name)
+ for f in glob.iglob(myglob):
+ fp = open(f,"r")
+ pid = fp.read()[:-1]
+ print "killing %s" % pid
+ cfs_utils.run_cmd("kill",pid).wait()
+ break
+ return 0
+
+def recycle_port (path):
+ print "recycling %s" % path
+ port_num = os.path.basename(path)
+ os.unlink(path)
+ fp = open("%s/%s" % (cfs_paths.idle_subdir, port_num), "w")
+ fp.close()
+
+def stop_local (vol_name):
+ retcode = kill_daemon(vol_name)
+ for symlink in glob.glob(cfs_paths.used_subdir + "/*"):
+ vol_link = os.readlink(symlink)
+ vol = os.path.basename(vol_link)
+ tokens = vol.split(".")
+ if vol_name == tokens[0]:
+ recycle_port(symlink)
+ break
+ return "stop_local(%s) returned %d on %s\n" % (
+ vol_name, retcode, socket.gethostname())
+
+
+def run_www (vol_name):
+ node_list = cfs_utils.get_nodes_for_vol(vol_name)
+ blob = []
+ for node in node_list:
+ url = "http://%s:%d/volumes/%s/stop_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ scratch = [node, []]
+ print "opening %s" % url
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ print "done with %s" % url
+ print blob
+ return template("stop_done.html",name=vol_name,blob=blob)
+
+if __name__ == "__main__":
+ run_www(sys.argv[1])
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index 09e5ace..fd981cd 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -1,7 +1,10 @@
+import glob
import os
import socket
import subprocess
+import cfs_paths
+
# The list of filesystems that work is much shorter than the list of
# filesystems that don't support xattrs, already-remote filesystems (e.g. NFS),
# pseudo-filesystems (e.g. devfs) or other things that won't work for one
@@ -116,3 +119,13 @@ def get_mounts (brick_list):
volumes_on_nodes[node_ip] = scratch
return volumes_on_nodes
+# Get the list of nodes that are serving (any part of) a specific volume.
+def get_nodes_for_vol (vol_name):
+ node_list = set()
+ my_glob = "%s/vols/%s/%s.*.vol" % (cfs_paths.gfs_dir,
+ vol_name, vol_name)
+ for vf in glob.iglob(my_glob):
+ m = cfs_paths.volfile_re.match(os.path.basename(vf))
+ if m:
+ node_list.add(m.groups(1)[0])
+ return node_list
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index e510823..15d0a5c 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,32 +1,22 @@
#!/usr/bin/python
-from bottle import route, post, run, view, debug
+from bottle import route, post, run, view, debug, CherryPyServer
import os
import socket
import string
-import paths
-import volstart
-import volstop
+import cfs_paths
import volmap
import cfs_utils
import cfs_add_node
import cfs_add_volume
-
-CLOUDFSD_PORT = 8080
-
-@route("/:vol_name/start")
-def start_server(vol_name):
- volstart.vol_start(vol_name)
-
-@route("/:vol_name/stop")
-def stop_server(vol_name):
- volstop.vol_stop(vol_name)
+import cfs_start_volume
+import cfs_stop_volume
@route("/:vol_name/fetch")
def fetch_client_vf(vol_name):
- vf_path = "%s/vols/%s/%s-fuse.vol" % (paths.gfs_dir, vol_name, vol_name)
+ vf_path = "%s/vols/%s/%s-fuse.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
return open(vf_path,"r")
@route("/:vol_name/map")
@@ -80,6 +70,22 @@ def show_volumes():
def add_volume():
return cfs_add_volume.run_www()
+@route("/volumes/:vol_name/start")
+def start_volume(vol_name):
+ return cfs_start_volume.run_www(vol_name)
+
+@route("/volumes/:vol_name/start_local")
+def start_local (vol_name):
+ return cfs_start_volume.start_local(vol_name)
+
+@route("/volumes/:vol_name/stop")
+def stop_volume(vol_name):
+ return cfs_stop_volume.run_www(vol_name)
+
+@route("/volumes/:vol_name/stop_local")
+def stop_local (vol_name):
+ return cfs_stop_volume.stop_local(vol_name)
+
@route("/wwwaddtenant")
def www_addtenant():
print "www addtenant"
@@ -98,4 +104,5 @@ def get_style (sheet):
if __name__ == "__main__":
debug(True)
- run(host='',port=CLOUDFSD_PORT)
+ run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT,server=CherryPyServer)
+
diff --git a/scripts/paths.py b/scripts/paths.py
deleted file mode 100644
index 51bdafd..0000000
--- a/scripts/paths.py
+++ /dev/null
@@ -1,10 +0,0 @@
-
-import re
-import os
-
-gfs_dir = "/var/lib/glusterd"
-info_dir = "/var/lib/cloudfs"
-idle_subdir = "/var/lib/cloudfs/.idle_ports"
-used_subdir = "/var/lib/cloudfs/.used_ports"
-volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
-
diff --git a/scripts/views/start_done.html b/scripts/views/start_done.html
new file mode 100644
index 0000000..5e35b6c
--- /dev/null
+++ b/scripts/views/start_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Volume {{name}} started.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/stop_done.html b/scripts/views/stop_done.html
new file mode 100644
index 0000000..69616c0
--- /dev/null
+++ b/scripts/views/stop_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Volume {{name}} stopped.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
index 11c964c..f4d7436 100644
--- a/scripts/views/volumes.html
+++ b/scripts/views/volumes.html
@@ -11,11 +11,14 @@
<div class="content">
<h2>Existing Volumes:</h2>
%for vol_name, brick_list in bricks.iteritems():
- <p>{{vol_name}}
+ <p><b>{{vol_name}}</b>
+ <a href="/volumes/{{vol_name}}/start">start</a>
+ <a href="/volumes/{{vol_name}}/stop">stop</a></p>
+ <ul>
%for brick in brick_list:
- <br /> {{brick}}
+ <li>{{brick}}</li>
%end
- </p>
+ </ul>
%end
<hr>
<h2>Provision a Volume From Available Bricks:</h2>
diff --git a/scripts/volmap.py b/scripts/volmap.py
index 92b77e9..f5a81b2 100644
--- a/scripts/volmap.py
+++ b/scripts/volmap.py
@@ -4,13 +4,13 @@ import os
import re
import volfilter
-import paths
+import cfs_paths
def vol_map (vol_name):
- vol_dir = "%s/%s" % (paths.info_dir, vol_name)
+ vol_dir = "%s/%s" % (cfs_paths.info_dir, vol_name)
vol_re = re.compile(vol_dir+"/")
map = {}
- for link in glob.iglob("%s/*" % paths.used_subdir):
+ for link in glob.iglob("%s/*" % cfs_paths.used_subdir):
real_file = os.readlink(link)
m = vol_re.match(real_file)
if not m:
diff --git a/scripts/volstart.py b/scripts/volstart.py
deleted file mode 100644
index 3c699f7..0000000
--- a/scripts/volstart.py
+++ /dev/null
@@ -1,173 +0,0 @@
-
-import glob
-import os
-import re
-import socket
-import string
-import subprocess
-import sys
-
-import volfilter
-import paths
-
-# Make sure the volume directory exists and has the right stuff in it.
-def check_volume_directory(vol_name):
- if not os.path.exists(paths.info_dir):
- os.mkdir(paths.info_dir)
- user_file = open("%s/%s" % (paths.info_dir, "default_users"), "w")
- # TBD: big gaping security hole until other code can deal
- # with having zero users defined.
- user_file.write("alice password1\nbob password2\n")
- user_file.flush()
- user_file.close()
- os.mkdir(paths.idle_subdir)
- for i in range(24010, 24030):
- fp = open("%s/%d" % (paths.idle_subdir, i), "w")
- fp.close()
- os.mkdir(paths.used_subdir)
-
- vol_dir = "%s/%s" % (paths.info_dir, vol_name)
- if not os.path.exists(vol_dir):
- os.mkdir(vol_dir)
- return vol_dir
-
-# Figure out which of the GlusterFS volfiles belong to us. Volfiles might have
-# names based on partial host names, fully qualified names, or addresses, or
-# even a mix thanks to "gluster peer probe" silliness. To deal with all of
-# these possibilities, we resolve everything to addresses and compare those.
-# ### bear in mind that depending on how a machine is set up, the IP addrs
-# ### for a node might include 127.0.0.1 and ::1 first
-def scan_gfs_volfiles(vol_name):
- ret = ""
- my_name = os.uname()[1]
- # Getaddrinfo returns a list of tuples, each:
- # family, socktype, proto, canonname, sockaddr
- # We extract the sockaddr of the first item, and the IP addr from that
- # TBD: handle IPv6, multi-homed hosts, etc.
- # TBD: skip loopback addresses based on note above
- my_addrs = socket.getaddrinfo(my_name, 0)
- my_glob = "%s/vols/%s/%s.*.vol" % (paths.gfs_dir, vol_name, vol_name)
- for vf in glob.iglob(my_glob):
- m = paths.volfile_re.match(os.path.basename(vf))
- if m:
- this_host = m.groups(1)[0]
- this_addr = socket.getaddrinfo(this_host, 0)[0][4][0]
- for addr in my_addrs:
- if this_addr == addr[4][0]:
- ret = vf
- break
- return ret
-
-# Allocate a port for a server to run on. Right now we do this in a very
-# "clever" way, by creating files to match ports and then grabbing a file here.
-# When we have a real volume database such games will be unnecessary.
-def allocate_port(vol_file):
- for pf in glob.iglob("%s/*" % paths.idle_subdir):
- base = os.path.basename(pf)
- new_name = "%s/%s" % (paths.used_subdir, base)
- os.symlink(vol_file, new_name)
- os.remove(pf)
- return base
- else:
- raise RuntimeError, "no ports available"
-
-# Parse the user file into a list of [name,password] sub-lists. Since
-# everything that uses this is in Python we could just make it a pickle/shelf
-# or whatever, but it would all go away with a real volume database so it's not
-# worth the trouble to re-do it now.
-def parse_user_file(vol_name):
- try:
- user_file = open("%s/%s/users" % (paths.info_dir, vol_name), "r")
- except IOError:
- user_file = open("%s/default_users" % paths.info_dir, "r")
-
- users = []
- for line in user_file.readlines():
- space = line.find(" ")
- if space == -1:
- print >> sys.stderr, "Bad line in userfile: %s" % line
- users.append([line[:space],line[space+1:-1]])
- return users
-
-# Convert a single GlusterFS server volfile to its CloudFS form, with one
-# translator stack per tenant and "evil" translators stripped out. Some day
-# this will also involve adding translators (e.g. UID mapping) at the top of
-# each stack.
-def cloudify_volfile(input, output, users, port):
- graph, last = volfilter.load(input)
- last = volfilter.cleanup(last, graph)
-
- if last.type != "protocol/server":
- print >> sys.stderr, "Top translator must be protocol/server"
- sys.exit(1)
- old_stack = last.subvols[0]
-
- bad_opts = []
- for opt in last.opts.iterkeys():
- if opt[:9] == "auth.addr":
- bad_opts.append(opt)
- elif opt[:10] == "auth.login":
- bad_opts.append(opt)
- for opt in bad_opts:
- print "# stripping auth option %s = %s" % (opt, last.opts[opt])
- del last.opts[opt]
-
- last.subvols = []
- for user, pw in users:
- new_stack = volfilter.copy_stack(old_stack, user)
- last.subvols.append(new_stack)
- last.opts["auth.login.%s.allow" % new_stack.name] = user
- last.opts["auth.login.%s.password" % new_stack.name] = pw
-
- last.opts["transport.socket.listen-port"] = port
- volfilter.generate(graph, last, output)
-
-def create_tenant_dirs(vol_file):
- cmd = "/bin/grep \"option directory\" %s" % vol_file
- path = ""
- opt_dir_lines = os.popen(cmd)
- for opt_dir_line in opt_dir_lines:
- tokens = re.split(' ', string.lstrip(opt_dir_line))
- path = string.rstrip(tokens[2])
- if not os.path.exists(path):
- os.mkdir(path)
- opt_dir_lines.close()
- junkdir = os.path.dirname(path) + "/junk"
- if not os.path.exists(junkdir):
- os.mkdir(junkdir)
-
-def vol_start(vol_name):
- vol_base = check_volume_directory(vol_name)
- users = parse_user_file(vol_name)
- # TBD: deal with more than one brick on the same server
- vf = scan_gfs_volfiles(vol_name)
- new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
- outfile = open(new_vf, "w")
- port = allocate_port(new_vf)
- cloudify_volfile(vf, outfile, users, port)
- outfile.flush()
- outfile.close()
- v_key = string.replace(os.path.basename(new_vf), ".vol", "")
- # print "v_key: %s" % v_key
- # make dirs for each of the users
- create_tenant_dirs(new_vf)
- # actually start the server
- # note: pid file in /var/lib/glusterd/vols/<vol_name>/... so that
- # gluster can find it
- cmd = "/usr/sbin/glusterfsd --volfile=%s --xlator-option %s-server.listen-port=%s --pid-file=/var/lib/glusterd/vols/%s/run/%s.pid --socket-file=/tmp/%s.socket --log-file=/var/log/glusterfs/bricks/%s.log" % (new_vf, vol_name, port, vol_name, v_key, v_key, vol_name)
- # before 3.1.4 there were --brick-name and --brick-port for use by
- # the gluster port mapper. These were secret/hidden cmdline options.
- try:
- p = subprocess.Popen(cmd, close_fds=True, shell=True)
- retcode = os.waitpid(p.pid, 0)[1]
- if retcode < 0:
- print >>sys.stderr, "killed ", -retcode
- except ValueError, v:
- print >>sys.stderr, "value error: ", v
- except OSError, o:
- print >>sys.stderr, "os error: ", o
- except NameError, n:
- print >>sys.stderr, "name error: ", n
- except:
- print >>sys.stderr, "unknown error: ", sys.exc_info()[0]
-
diff --git a/scripts/volstop.py b/scripts/volstop.py
deleted file mode 100644
index 068baeb..0000000
--- a/scripts/volstop.py
+++ /dev/null
@@ -1,45 +0,0 @@
-
-import fileinput
-import glob
-import os
-import subprocess
-import sys
-
-import paths
-
-def kill_daemon(vol_name, vol_id):
- cooked = vol_id.rsplit(".", 1)
- pid_file_name = paths.gfs_dir + "/vols/" + vol_name + "/run/" + cooked[0] + ".pid"
- for pid in fileinput.input(pid_file_name):
- cmd = "/bin/kill " + pid
- try:
- p = subprocess.Popen(cmd, close_fds=True, shell=True);
- retcode = os.waitpid(p.pid, 0)[1]
- if retcode < 0:
- print >>sys.stderr, "killed ", -retcode
- except ValueError, v:
- print >>sys.stderr, "value error: ", v
- except OSError, o:
- print >>sys.stderr, "os error: ", o
- except NameError, n:
- print >>sys.stderr, "name error: ", n
- except:
- print >>sys.stderr, "unknown error: ", sys.exc_info()[0]
- fileinput.close()
-
-def recycle_port(path):
- port_num = os.path.basename(path)
- os.unlink(path)
- fp = open("%s/%s" % (paths.idle_subdir, port_num), "w")
- fp.close()
-
-def vol_stop(vol_name):
- for symlink in glob.glob(paths.used_subdir + "/*"):
- vol_link = os.readlink(symlink)
- vol = os.path.basename(vol_link)
- tokens = vol.split(".")
- if vol_name == tokens[0]:
- kill_daemon(vol_name, vol)
- recycle_port(symlink)
- break
-
12 years, 7 months
Branch 'cloudfsd' - scripts/cfs_add_node.py scripts/cfs_add_volume.py scripts/cfs_utils.py scripts/cloudfsd.py scripts/styles scripts/views scripts/volstart.py scripts/wwwcfgmain.py scripts/wwwcfgroot.py scripts/wwwconfirmprovision.py scripts/wwwcss.py scripts/wwwdoinitcluster.py scripts/wwwdoprovision.py scripts/wwwinitcluster.py scripts/wwwprovision.py scripts/wwwroot.py
by Jeff Darcy
scripts/cfs_add_node.py | 53 +++++++++++++++++
scripts/cfs_add_volume.py | 79 +++++++++++++++++++++++++
scripts/cfs_utils.py | 118 ++++++++++++++++++++++++++++++++++++++
scripts/cloudfsd.py | 84 ++++++++++++++-------------
scripts/styles/cfgmain.css | 23 +++++++
scripts/styles/provlist.css | 29 +++++++++
scripts/views/add_node_done.html | 7 ++
scripts/views/add_vol_done.html | 7 ++
scripts/views/add_vol_fail.html | 7 ++
scripts/views/cfgmain.html | 16 +++++
scripts/views/cluster.html | 23 +++++++
scripts/views/volumes.html | 54 +++++++++++++++++
scripts/volstart.py | 2
scripts/wwwcfgmain.py | 19 ------
scripts/wwwcfgroot.py | 34 -----------
scripts/wwwconfirmprovision.py | 59 -------------------
scripts/wwwcss.py | 79 -------------------------
scripts/wwwdoinitcluster.py | 59 -------------------
scripts/wwwdoprovision.py | 114 -------------------------------------
scripts/wwwinitcluster.py | 40 -------------
scripts/wwwprovision.py | 119 ---------------------------------------
scripts/wwwroot.py | 10 ---
22 files changed, 462 insertions(+), 573 deletions(-)
New commits:
commit 152e08b73f136a05c26fd90190d0654c9f0993ea
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue May 10 12:40:29 2011 -0400
Main task was to use routing and templates instead of commingling logic
with presentation, and coalesce common functionality into a library instead
of duplicating it, all for maintainability. Many other fixes/changes have
been rolled in, including:
form handling that actually works (i.e. not parsing stdin)
consistently include self when getting cluster member list
include last brick in each volume
add missing parameters (e.g. brick_in_use could never have worked)
use "mount" and explicit list of valid fs types
don't let glusterd start our daemons, which it will do the wrong way
Still a long way to go, but it's a start.
diff --git a/scripts/cfs_add_node.py b/scripts/cfs_add_node.py
new file mode 100644
index 0000000..497db36
--- /dev/null
+++ b/scripts/cfs_add_node.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+import fileinput
+import re
+import os
+import socket
+import sys
+
+from bottle import request, template
+
+# TBD: move this into a library if we're going to re-use it elsewhere
+# (or remove it entirely since it seems rather pointless)
+def clean_and_run (cmd):
+ clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", " ")
+ return os.system(clean_cmd + " > /dev/null 2>&1")
+
+def run_common (node_name):
+ host_name = socket.gethostname()
+
+ # derive the "real" node addr, e.g. if the user enters, e.g.,
+ # 192.168.122.55 and that is this IP for 'this' node then when we're
+ # done can_node_addr will either be 192.168.122.55 or 127.0.0.1.
+ # similarly if the user enters, e.g., <principalnode>.foo.bar.baz.com,
+ # the result will be the same
+ host_addr = socket.gethostbyname(host_name)
+ node_addr = socket.gethostbyname(node_name)
+ can_node_name = socket.gethostbyaddr(node_addr)
+ can_node_addr = socket.gethostbyname(can_node_name[0])
+
+
+ # now we can do the right thing if we're on the principal node or not
+ if node_addr == host_addr or "127.0.0.1" == can_node_addr :
+ sts = os.system("/usr/bin/sudo /sbin/chkconfig --add glusterd; \
+ /usr/bin/sudo /sbin/chkconfig glusterd on; \
+ /usr/bin/sudo /sbin/service glusterd start")
+ else :
+ # Remote actions.
+ pfx = "/usr/bin/sudo /usr/bin/ssh %s " % can_node_addr
+ clean_and_run(pfx+"/sbin/chkconfig --add glusterd")
+ clean_and_run(pfx+"/sbin/chkconfig glusterd on")
+ clean_and_run(pfx+"/sbin/service glusterd start")
+ # Local actions.
+ pfx = "/usr/bin/sudo "
+ clean_and_run(pfx+"/usr/sbin/gluster peer probe %s"%node_name)
+
+ return template("add_node_done.html",node_name=node_name)
+
+def run_www ():
+ node_name = request.forms.get("NODENAME")
+ return run_common(node_name)
+
+if __name__ == "__main__":
+ run_common(sys.argv[1])
diff --git a/scripts/cfs_add_volume.py b/scripts/cfs_add_volume.py
new file mode 100644
index 0000000..a9e7617
--- /dev/null
+++ b/scripts/cfs_add_volume.py
@@ -0,0 +1,79 @@
+
+import re
+import os
+import string
+import sys
+
+from bottle import request, template
+import cfs_utils
+
+def run_common (vname, vtype, vcount, bricks):
+
+ # TBD: all sorts of input-validity checking
+ # TBD: construct sane brick list
+ cmd = "volume create %s" % vname
+ if vtype != "plain":
+ cmd = "%s %s %s" % (cmd, vtype, vcount)
+ cmd = "%s %s" % (cmd, string.join(bricks))
+ sts = cfs_utils.run_cmd("gluster",cmd).wait()
+ if sts:
+ return template("add_vol_fail.html", name=vname,
+ action="gluster", status=sts)
+
+ ## make the cloudfs vol files with cloudfs
+ # TBD: this should go away in favor of generating these at start
+ # time; see big TBD near the end of the function
+ cmd = "init %s /var/lib/glusterd/cloudfs.tenants" % vname
+ for node in cfs_utils.get_members()[1:]:
+ sts = cfs_utils.run_cmd("cloudfs",cmd,host=node).wait()
+ if sts:
+ act = "cloudfs on %s" % node
+ return template("add_vol_fail.html", name=vname,
+ action=act, status=sts)
+
+ ## make the dirs on each node/volume
+ ## first get all the tenants
+ tenants = []
+ tenants.append("junk")
+ tenant_file = open("/var/lib/glusterd/cloudfs.tenants","r")
+ for tenantline in tenant_file:
+ scratch = re.split(' ', tenantline)
+ tenants.append(scratch[0])
+ ## now make the dirs on every volume
+ for b in bricks:
+ scratch = re.split(':', b)
+ for tenant in tenants :
+ node = scratch[0]
+ cmd = "%s/%s" % (scratch[1], tenant)
+ sts = cfs_utils.run_cmd("mkdir",cmd,host=node).wait()
+ if sts:
+ act = "mkdir on %s" % node
+ return template("add_vol_fail.html", name=vname,
+ action=act, status=sts)
+
+ # TBD: generating the cloudfs volfiles (client+server) and starting the
+ # glusterfsd daemons should be part of a separate "start" action, so
+ # that it's done with a tenant list that's as up-to-date as possible.
+ # It should also be done using the cloudfsd fetch/map infrastructure so
+ # that cfs_mount.py/mount.cloudfs can actually work. Doing it as a
+ # hack on top of Gluster's fetching/portmapping mess, even if it seems
+ # to work in some bogus test environment, is a total waste of time.
+ return template("add_vol_done.html",name=vname);
+
+def run_www ():
+ volume_id = request.forms.get("VOLUMEID")
+ volume_type = request.forms.get("TYPE")
+ replica_or_stripe_count = request.forms.get("COUNT")
+ brick_list = []
+ for prop in request.forms.iterkeys():
+ if prop.startswith("VOLUME_"):
+ brick_list.append(prop[7:])
+ return run_common(volume_id,volume_type,replica_or_stripe_count,
+ brick_list)
+
+if __name__ == "__main__":
+ volume_id = sys.argv[1]
+ volume_type = sys.argv[2]
+ replica_or_stripe_count = sys.argv[3]
+ brick_list = sys.argv[4:]
+ run_common(volume_id,volume_type,replica_or_stripe_count,brick_list)
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
new file mode 100644
index 0000000..09e5ace
--- /dev/null
+++ b/scripts/cfs_utils.py
@@ -0,0 +1,118 @@
+import os
+import socket
+import subprocess
+
+# The list of filesystems that work is much shorter than the list of
+# filesystems that don't support xattrs, already-remote filesystems (e.g. NFS),
+# pseudo-filesystems (e.g. devfs) or other things that won't work for one
+# reason or another.
+good_fs_types = [ "ext2", "ext3", "ext4", "xfs", "btrfs" ]
+
+# Sudo is kind of pointless if we're already running as root.
+use_sudo = False
+
+# We use the class cache to avoid having to call through the shell to find
+# executables on $PATH every single time. To do this, we use the shell *once*
+# to find the local path and assume all nodes are configured similarly.
+class_cache = {}
+
+def get_path (program):
+ if not class_cache.has_key(program):
+ for dir in os.getenv("PATH").split(":"):
+ maybe = os.path.join(dir,program)
+ if os.access(maybe,os.X_OK):
+ class_cache[program] = maybe
+ break
+ # KeyError is as good as any exception we might throw ourselves.
+ return class_cache[program]
+
+# This lets users change access methods (e.g. pdsh, slurm, some future method
+# that proxies the command through cloudfsd's own secure connection.
+def make_remote (host, cmd):
+ return "%s %s %s" % (get_path("ssh"), host, cmd)
+
+def run_cmd (program, args, host=None, filters=[]):
+ cmd = "%s %s" % (get_path(program), args)
+ for prog2, args2 in filters:
+ cmd = "%s | %s %s" % (cmd, get_path(prog2), args2)
+ if use_sudo:
+ cmd = "%s %s" % cmd
+ if host:
+ cmd = make_remote(host,cmd)
+ if len(filters):
+ print "executing %s using shell" % cmd
+ child = subprocess.Popen(cmd,shell=True,
+ stdout=subprocess.PIPE)
+ else:
+ print "executing %s without shell" % cmd
+ child = subprocess.Popen(cmd.split(" "),shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ return child
+
+# NB this node is always first in the list
+def get_members ():
+ peer_ips = run_cmd("gluster","peer status",
+ filters=[("grep","Hostname:")]).stdout
+ my_ip = socket.gethostbyname(socket.gethostname())
+ node_list = [socket.gethostbyaddr(my_ip)[0]]
+ # For each line: strip off the trailing newline, split at colon, take
+ # item #1
+ for peer in [line[:-1].split(": ")[1] for line in peer_ips]:
+ node_list.append(peer)
+ return node_list
+
+# A brick list looks something like this:
+# {
+# "vol_A": [ "server1:/bricks/A", "server2:/bricks/A" ],
+# "vol_B": [ "server2:/bricks/B", "server3:/bricks/B: ]
+# }
+def get_bricks ():
+ vol_list = {}
+ brick_list = []
+ for line in run_cmd("gluster","volume info all").stdout:
+ parts = line[:-1].split(": ")
+ if len(parts) != 2:
+ continue
+ if parts[0] == "Volume Name":
+ if brick_list:
+ vol_list[vol_name] = brick_list
+ vol_name = parts[1]
+ brick_list = []
+ elif parts[0].startswith("Brick"):
+ brick_list.append(parts[1])
+ if brick_list:
+ vol_list[vol_name] = brick_list
+ return vol_list
+
+def dir_in_use (brick_list, dir):
+ for vol_bricks in brick_list.itervalues():
+ for brick in vol_bricks:
+ if brick == dir:
+ return True
+ return False
+
+# A mount list looks something like this:
+# {
+# "server1": [ ( "/bricks/A", True ),
+# ( "/some/other/mount", False) ]
+# "server2": [ ( "/bricks/A", True ),
+# ( "/bricks/B", True ) ],
+# "server3": [ ( "/bricks/B", True ),
+# ( "/what/ever", False ) ]
+# }
+# The second part of each tuple is the "in-use" flag, true iff the mount is
+# part of a volume.
+def get_mounts (brick_list):
+ volumes_on_nodes = {}
+ for node_ip in get_members() :
+ scratch = []
+ for line in run_cmd("mount","",host=node_ip).stdout:
+ bits = line.split(" ")
+ if bits[4] not in good_fs_types:
+ continue
+ mount = "%s:%s" % (node_ip, bits[2])
+ scratch.append((bits[2],dir_in_use(brick_list,mount)))
+ volumes_on_nodes[node_ip] = scratch
+ return volumes_on_nodes
+
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index f68d76e..e510823 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
-from bottle import route, post, run, request
+from bottle import route, post, run, view, debug
+
+import os
+import socket
+import string
import paths
import volstart
import volstop
import volmap
-import wwwroot
-import wwwcfgroot
-import wwwcfgmain
-import wwwinitcluster
-import wwwdoinitcluster
-import wwwprovision
-import wwwdoprovision
-import wwwconfirmprovision
+import cfs_utils
+import cfs_add_node
+import cfs_add_volume
CLOUDFSD_PORT = 8080
@@ -47,39 +46,39 @@ def list_users():
print "list users"
@route("/")
-def www_root():
- return wwwroot.www_root()
-
@route("/cfg")
-def www_cfgroot():
- return wwwcfgroot.www_cfgroot()
-
@route("/cfgmain")
-def www_cfgmain():
- return wwwcfgmain.www_cfgmain()
-
-@route("/wwwprovision")
-def www_provision():
- return wwwprovision.www_provision()
-
-@post("/wwwconfirmprovision")
-def www_confirmprovision():
- return wwwconfirmprovision.www_confirmprovision(request.body)
-
-@post("/wwwdoprovision")
-def www_doprovision():
- print "www doprovision"
- return wwwdoprovision.www_doprovision(request.body)
-
-@route("/wwwinitcluster")
-def www_initcluster():
- print "www initcluster"
- return wwwinitcluster.www_initcluster()
-
-@post("/wwwdoinitcluster")
-def www_doinitcluster():
- print "www doinitcluster"
- return wwwdoinitcluster.www_doinitcluster()
+@view("cfgmain.html")
+def cfg_main():
+ return dict()
+
+@route("/cluster")
+@view("cluster.html")
+def show_cluster():
+ # TBD: handle glusterd presence/startup check sanely
+ cfs_utils.run_cmd("chkconfig","--add glusterd")
+ cfs_utils.run_cmd("chkconfig","glusterd on")
+ cfs_utils.run_cmd("service","glusterd start")
+ node_list = cfs_utils.get_members()
+ return dict(node_list=string.join(node_list,"<br />"))
+
+@post("/cluster/add_node")
+def add_node():
+ return cfs_add_node.run_www()
+
+# TBD: implement remove_node
+
+@route("/volumes")
+@view("volumes.html")
+def show_volumes():
+ brick_list = cfs_utils.get_bricks()
+ mount_list = cfs_utils.get_mounts(brick_list)
+ # TBD: allow adding arbitrary directories instead of just mountpoints
+ return dict(bricks=brick_list,mounts=mount_list)
+
+@post("/volumes/add_volume")
+def add_volume():
+ return cfs_add_volume.run_www()
@route("/wwwaddtenant")
def www_addtenant():
@@ -93,5 +92,10 @@ def www_doaddtenant():
def www_listtenants():
print "www listtenants"
+@route("/styles/:sheet")
+def get_style (sheet):
+ return file("styles/%s"%sheet,"r")
+
if __name__ == "__main__":
+ debug(True)
run(host='',port=CLOUDFSD_PORT)
diff --git a/scripts/styles/cfgmain.css b/scripts/styles/cfgmain.css
new file mode 100644
index 0000000..364d88b
--- /dev/null
+++ b/scripts/styles/cfgmain.css
@@ -0,0 +1,23 @@
+body {
+ background-color: #fff;
+ color: #000;
+ font-size: 0.9em;
+ font-family: sans-serif,helvetica;
+ margin: 0;
+ padding: 0;
+}
+
+.banner {
+ text-align: center;
+ margin: 0;
+ padding: 0.6em 2em 0.4em;
+ background-color: #900;
+ color: #fff;
+ font-weight: bold;
+ font-size: 1.75em;
+ border-bottom: 2px solid #000;
+}
+
+.content {
+ padding: 1em 5em;
+}
diff --git a/scripts/styles/provlist.css b/scripts/styles/provlist.css
new file mode 100644
index 0000000..70132a4
--- /dev/null
+++ b/scripts/styles/provlist.css
@@ -0,0 +1,29 @@
+.header {
+ float: left;
+ width: 100%;
+ background-color: #f4f4f4;
+}
+
+.wrapper {
+ position: relative;
+ float: left;
+ left: 0.00%;
+ width: 100.00%;
+ background-color: #cccccc;
+}
+
+tr.d0 td {
+ background-color: #CC9999;
+ color: black;
+}
+
+tr.d1 td {
+ background-color: #9999CC;
+ color: black;
+}
+
+.footer {
+ float: left;
+ width: 100%;
+ background-color: #f4f4f4;
+}
diff --git a/scripts/views/add_node_done.html b/scripts/views/add_node_done.html
new file mode 100644
index 0000000..23308e3
--- /dev/null
+++ b/scripts/views/add_node_done.html
@@ -0,0 +1,7 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Node {{node_name}} added.</p>
+<p><a href="/cluster">Back to cluster configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/add_vol_done.html b/scripts/views/add_vol_done.html
new file mode 100644
index 0000000..d6ddf58
--- /dev/null
+++ b/scripts/views/add_vol_done.html
@@ -0,0 +1,7 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Volume {{name}} created.</p>
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/add_vol_fail.html b/scripts/views/add_vol_fail.html
new file mode 100644
index 0000000..5ecf1c5
--- /dev/null
+++ b/scripts/views/add_vol_fail.html
@@ -0,0 +1,7 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Creating volume {{name}} failed (stage {{action}}, status {{status}}).</p>
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/cfgmain.html b/scripts/views/cfgmain.html
new file mode 100644
index 0000000..3f46b78
--- /dev/null
+++ b/scripts/views/cfgmain.html
@@ -0,0 +1,16 @@
+<html><head>
+<title>_Red Hat CloudFS Configuration_</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="Pragma" content="no-cache" />
+<link href="/styles/cfgmain.css" rel="stylesheet" type="text/css" />
+</head><body>
+<div class="banner">
+ <h1>Red Hat CloudFS Configuration Main</h1>
+</div>
+<div class="content">
+ <p><a href="/cluster">Manage Servers</a></p>
+ <p><a href="/volumes">Manage Volumes</a></p>
+ <p><a href="/wwwlisttenants">Manage Tenants</a></p>
+</div>
+</body></html>
+
diff --git a/scripts/views/cluster.html b/scripts/views/cluster.html
new file mode 100644
index 0000000..58e83d2
--- /dev/null
+++ b/scripts/views/cluster.html
@@ -0,0 +1,23 @@
+<html><head>
+<title>Initialize CloudFS Cluster</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="pragma" content="no-cache">
+<link rel="stylesheet" type="text/css" href="/styles/cfgmain.css" />
+</head><body>
+<div class="banner">
+ <h1>Initialize CloudFS Cluster</h1>
+</div>
+<div class="content">
+<h2>Cluster Nodes</h2>
+<p>{{!node_list}}</p>
+<br />
+Enter the hostname of a node to add to the cluster
+<form method="post" action="cluster/add_node">
+ Node Name: <input type="text" name="NODENAME" />
+ <input type="submit" name="ACTION" value="Add" />
+</form>
+<br />
+<form method="get" action="/cfgmain">
+ <input type="submit" value="Done" />
+</form>
+</body></html>
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
new file mode 100644
index 0000000..11c964c
--- /dev/null
+++ b/scripts/views/volumes.html
@@ -0,0 +1,54 @@
+<html><head>
+<title>_Provision CloudFS Volume_</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="Pragma" content="no-cache" />
+<link href="/styles/cfgmain.css" rel="stylesheet" type="text/css" />
+<link href="/styles/provlist.css" rel="stylesheet" type="text/css" />
+</head><body>
+<div class="banner">
+<h1>Provision CloudFS Volume</h1>
+</div>
+<div class="content">
+<h2>Existing Volumes:</h2>
+ %for vol_name, brick_list in bricks.iteritems():
+ <p>{{vol_name}}
+ %for brick in brick_list:
+ <br /> {{brick}}
+ %end
+ </p>
+ %end
+<hr>
+<h2>Provision a Volume From Available Bricks:</h2>
+<form method="post" name="provision" action="/volumes/add_volume">
+<div class="header"><hr></div>
+<div class="wrapper">
+<table>
+ %color_index = 0
+ %for node, mount_list in mounts.iteritems():
+ %color = "d%d" % color_index
+ %color_index = (color_index + 1) % 2
+ <tr class="{{color}}">
+ <td>{{node}}</td>
+ %for mpath, minuse in mount_list:
+ %full_path = "%s:%s" % (node,mpath)
+ %if minuse:
+ <td><input type="checkbox" name="VOLUME" value=0 disabled />{{mpath}} (in use)</td>
+ %else:
+ <td><input type="checkbox" name="VOLUME_{{full_path}}" value=0 />{{mpath}}</td>
+ %end
+
+ %end
+ </tr>
+ %end
+</table>
+</div>
+<div class="footer"><hr></div>
+Volume Type: <input type="radio" name="TYPE" value="plain" checked />Plain
+<input type="radio" name="TYPE" value="replica" />Replicated
+<input type="radio" name="TYPE" value="stripe" />Striped
+<br>Replica or Stripe count: <input type="text" name="COUNT" size="2" />
+<br>Volume ID: <input type="text" name="VOLUMEID" />
+<input type="submit" name="PROVISION" value="Provision" />
+</form>
+</div>
+</body></html>
diff --git a/scripts/volstart.py b/scripts/volstart.py
index a8f0655..3c699f7 100644
--- a/scripts/volstart.py
+++ b/scripts/volstart.py
@@ -44,6 +44,7 @@ def scan_gfs_volfiles(vol_name):
# family, socktype, proto, canonname, sockaddr
# We extract the sockaddr of the first item, and the IP addr from that
# TBD: handle IPv6, multi-homed hosts, etc.
+ # TBD: skip loopback addresses based on note above
my_addrs = socket.getaddrinfo(my_name, 0)
my_glob = "%s/vols/%s/%s.*.vol" % (paths.gfs_dir, vol_name, vol_name)
for vf in glob.iglob(my_glob):
@@ -138,6 +139,7 @@ def create_tenant_dirs(vol_file):
def vol_start(vol_name):
vol_base = check_volume_directory(vol_name)
users = parse_user_file(vol_name)
+ # TBD: deal with more than one brick on the same server
vf = scan_gfs_volfiles(vol_name)
new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
outfile = open(new_vf, "w")
diff --git a/scripts/wwwcfgmain.py b/scripts/wwwcfgmain.py
deleted file mode 100644
index b577a4a..0000000
--- a/scripts/wwwcfgmain.py
+++ /dev/null
@@ -1,19 +0,0 @@
-
-import wwwcss
-
-def www_cfgmain() :
-
- ret = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
- ret = ret + "<html><head>"
- ret = ret + "<title>_Red Hat CloudFS Configuration_</title>"
- ret = ret + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
- ret = ret + "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
- ret = ret + wwwcss.css
- ret = ret + "</head><body>"
- ret = ret + "<h1>Red Hat CloudFS Configuration Main<h1/>"
- ret = ret + "<p><a href=\"wwwinitcluster\">Initialize Cluster</a></p>"
- ret = ret + "<p><a href=\"wwwlisttenants\">Tenant Management</a></p>"
- ret = ret + "<p><a href=\"wwwprovision\">Provision Storage</a></p>"
- ret = ret + "</body></html>"
- return ret
-
diff --git a/scripts/wwwcfgroot.py b/scripts/wwwcfgroot.py
deleted file mode 100644
index b9faede..0000000
--- a/scripts/wwwcfgroot.py
+++ /dev/null
@@ -1,34 +0,0 @@
-
-import wwwcss
-
-def www_cfgroot() :
-
- ret = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
- ret = ret + "<html><head>"
- ret = ret + "<title>_Red Hat CloudFS Configuration_</title>"
- ret = ret + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
- ret = ret + "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
- ret = ret + wwwcss.css
- ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=/cfgmain\" />"
- ret = ret + "</head><body>"
- ret = ret + "<h1>Red Hat CloudFS Configuration<h1/>"
-# authentication disabled for now
-# <h2>Sign On<h2/>
-# <form method="post" action="/cgi-bin/authenticate">
-# <p>
-# <strong>Please enter user ID and password:</strong>
-# <br>
-# <strong>User ID</strong>
-# <input type="text" size="20" name="USERNAME">
-# <strong>Password</strong>
-# <input type="password" size="20" name="PASSWORD">
-# </p>
-# <p>
-# <input type="submit" name="signon" value="Sign On">
-# </p>
-# </form>
- ret = ret + "<p>Please follow <a href=\"/cfgmain\">link</a>!</p>"
- ret = ret + "</body></html>"
-
- return ret
-
diff --git a/scripts/wwwconfirmprovision.py b/scripts/wwwconfirmprovision.py
deleted file mode 100644
index 8b3ccba..0000000
--- a/scripts/wwwconfirmprovision.py
+++ /dev/null
@@ -1,59 +0,0 @@
-
-import fileinput
-import re
-import os
-
-def www_confirmprovision(body):
-
- # f = fileinput.input()
- line = body.readline()
- # fileinput.close()
-
- tokens = re.split('&', line)
-
- volume_id = ""
- volume_type = ""
- replica_or_stripe_count = ""
- command = ""
- volumes = []
-
- # assert PROVISION= will be the last one, thus we may also assert that
- # all options will be extracted before we try to cons up a cmd
- for index in range(len(tokens)) :
- if tokens[index].startswith("TYPE=") :
- scratch = re.split('=', tokens[index])
- volume_type = scratch[1]
- continue
- if tokens[index].startswith("COUNT=") :
- scratch = re.split('=', tokens[index])
- replica_or_stripe_count = scratch[1]
- continue
- elif tokens[index].startswith("VOLUMEID=") :
- scratch = re.split('=', tokens[index])
- volume_id = scratch[1]
- continue
- elif tokens[index].startswith("VOLUME=") :
- scratch = re.split('=', tokens[index])
- volumes.append(scratch[1])
- continue
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- ret = ret + "<title>Confirm Provision Volume</title>"
- ret = ret + "</head><body>"
- ret = ret + "<h2>Confirm Provision Volume</h2>"
- cmd = "/usr/sbin/gluster volume create " + volume_id
- if volume_type != "plain" :
- cmd = cmd + " " + volume_type + " " + replica_or_stripe_count
- cmd = cmd + " transport tcp"
- for volume in volumes :
- cmd = cmd + " " + volume.replace("%3A", ":").replace("%2F", "/")
-
- ret = ret + "<form method=\"post\" name=\"provision\" action=\"wwwdoprovision\">"
- ret = ret + "<input type=\"hidden\" name=\"COMMAND\" value=\"" + cmd + "\" />"
- ret = ret + cmd + " <br><input type=\"submit\" name=\"PROVISION\" value=\"Confirm\" />"
- ret = ret + "</form>"
- ret = ret + "</body></html>"
- return ret
-
diff --git a/scripts/wwwcss.py b/scripts/wwwcss.py
deleted file mode 100644
index af378a8..0000000
--- a/scripts/wwwcss.py
+++ /dev/null
@@ -1,79 +0,0 @@
-
-css = """<style type=\"text/css\">
-/*<![CDATA[*/
-body {
-background-color: #fff;
-color: #000;
-font-size: 0.9em;
-font-family: sans-serif,helvetica;
-margin: 0;
-padding: 0;
-}
-:link {
-color: #fff;" + +
-}
-:visited {
-color: #fff;
-}
-a:hover {
-color: #f50;
-}
-h1 {
-text-align: center;
-margin: 0;
-padding: 0.6em 2em 0.4em;
-background-color: #900;
-color: #fff;
-font-weight: normal;
-font-size: 1.75em;
-border-bottom: 2px solid #000;
-}
-h1 strong {
-font-weight: bold;
-}
-h2 {
-font-size: 1.1em;
-font-weight: bold;
-}
-hr {
-display: none;
-}
-.content {
-padding: 1em 5em;
-}
-.content-columns {
-/* Setting relative positioning allows for
-absolute positioning for sub-classes */
-position: relative;
-padding-top: 1em;
-}
-.content-column-left {
-/* Value for IE/Win; will be overwritten for other browsers */
-width: 47%;
-padding-right: 3%;
-float: left;
-padding-bottom: 2em;
-}
-.content-column-left hr {
-display: none;
-}
-.content-column-right {
-/* Values for IE/Win; will be overwritten for other browsers */
-width: 47%;
-padding-left: 3%;
-float: left;
-padding-bottom: 2em;
-}
-.content-columns>.content-column-left, .content-columns>.content-column-right {
-/* Non-IE/Win */
-}
-img {
-border: 2px solid #fff;
-padding: 2px;
-margin: 2px;
-}
-a:hover img {
-border: 2px solid #f50;
-}
-/*]]>*/
-</style>"""
diff --git a/scripts/wwwdoinitcluster.py b/scripts/wwwdoinitcluster.py
deleted file mode 100644
index 29526a5..0000000
--- a/scripts/wwwdoinitcluster.py
+++ /dev/null
@@ -1,59 +0,0 @@
-
-import fileinput
-import re
-import os
-import socket
-
-def www_doinitcluster():
-
- f = fileinput.input()
- line = f.readline()
- fileinput.close()
-
- host_name = socket.gethostname()
-
- tokens = re.split('&', line)
- node_name = ""
-
- # all options will be extracted before we try to cons up a cmd
- for index in range(len(tokens)) :
- if tokens[index].startswith("NODENAME=") :
- scratch = re.split('=', tokens[index])
- node_name = scratch[1]
-
- # derive the "real" node addr, e.g. if the user enters, e.g., 192.168.122.55
- # and that is this IP for 'this' node then when we're done can_node_addr will
- # either be 192.168.122.55 or 127.0.0.1.
- # similarly if the user enters, e.g., <principalnode>.foo.bar.baz.com, the
- # result will be the same
- host_addr = socket.gethostbyname(host_name)
- node_addr = socket.gethostbyname(node_name)
- can_node_name = socket.gethostbyaddr(node_addr)
- can_node_addr = socket.gethostbyname(can_node_name[0])
-
-
- # now we can do the right thing if we're on the principal node or not
- if node_addr == host_addr or "127.0.0.1" == can_node_addr :
- sts = os.system("/usr/bin/sudo /sbin/chkconfig --add glusterd; /usr/bin/sudo /sbin/chkconfig glusterd on; /usr/bin/sudo /sbin/service glusterd start")
- else :
- cmd = "/usr/bin/sudo /usr/bin/ssh " + can_node_addr + " /sbin/chkconfig --add glusterd"
- clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
- sts = os.system(clean_cmd + " > /dev/null 2>&1");
- cmd = "/usr/bin/sudo /usr/bin/ssh " + can_node_addr + " /sbin/chkconfig glusterd on"
- clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
- sts = os.system(clean_cmd + " > /dev/null 2>&1");
- cmd = "/usr/bin/sudo /usr/bin/ssh " + can_node_addr + " /sbin/service glusterd start"
- clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
- sts = os.system(clean_cmd + " > /dev/null 2>&1");
- cmd = "/usr/bin/sudo /usr/sbin/gluster peer probe " + node_name
- clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
- sts = os.system(clean_cmd + " > /dev/null 2>&1");
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=wwwinitcluster\" />"
- ret = ret + "</head><body>"
- ret = ret + "<p>Please follow <a href=\"wwwinitcluster\">link</a>!</p>"
- ret = ret + "</body></html>"
- return ret
diff --git a/scripts/wwwdoprovision.py b/scripts/wwwdoprovision.py
deleted file mode 100644
index 6535c50..0000000
--- a/scripts/wwwdoprovision.py
+++ /dev/null
@@ -1,114 +0,0 @@
-
-import fileinput
-import re
-import os
-
-def www_doprovision(body):
-
- # f = fileinput.input()
- line = body.readline()
- # fileinput.close()
-
- tokens = re.split('&', line)
-
- volume_id = ""
- volume_type = ""
- replica_or_stripe_count = ""
- command = ""
- volumes = []
-
- # assert PROVISION= will be the only one, thus we may also assert that
- # all options will be extracted before we try to cons up a cmd
- for index in range(len(tokens)) :
- if tokens[index].startswith("COMMAND=") :
- scratch = re.split('=', tokens[index])
- command = scratch[1]
- continue
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- ret = ret + "<title>Provisioned Volume</title>"
- ret = ret + "</head><body>"
- ret = ret + "<h2>Provisioned Volume</h2>"
- ## make the underlying volume with gluster
- decode_cmd = command.replace("%3A", ":").replace("%2F", "/").replace("+", " ")
- clean_cmd = decode_cmd.replace("|", "").replace("&", "").replace(">", "")
- gluster_sts = os.system("/usr/bin/sudo " + clean_cmd + " > /dev/null 2>&1")
- ## make the cloudfs vol files with cloudfs
- cloudfs_sts = -1
- cmd_tokens = re.split(' ', clean_cmd)
- if gluster_sts != -1 and cmd_tokens[0] == "/usr/sbin/gluster" and cmd_tokens[1] == "volume" and cmd_tokens[2] == "create" :
- cloudfs_cmd = "/usr/bin/sudo /usr/bin/cloudfs init " + cmd_tokens[3] + " /var/lib/glusterd/cloudfs.tenants > /dev/null 2>&1"
- cloudfs_sts = os.system(cloudfs_cmd)
-
- ## make the dirs on each node/volume
- if cloudfs_sts != -1 :
- ## first get all the tenants
- tenants = []
- tenants.append("junk")
- for tenantline in fileinput.input("/var/lib/glusterd/cloudfs.tenants") :
- scratch = re.split(' ', tenantline)
- tenants.append(scratch[0])
- fileinput.close()
- ## now make the dirs on every volume
- first_node = 6
- if cmd_tokens[4] != "transport" :
- first_node = first_node + 2
- nodes = []
- for ii in range(first_node, len(cmd_tokens)) :
- scratch = re.split(':', cmd_tokens[ii])
- nodes.append(scratch[0])
- for tenant in tenants :
- mkdir_cmd = "/usr/bin/sudo /usr/bin/ssh " + scratch[0] + " /bin/mkdir -p " + scratch[1] + "/" + tenant
- clean_cmd = mkdir_cmd.replace("|", "").replace("&", "").replace(">", "")
- mkdir_sts = os.system(clean_cmd + " > /dev/null 2>&1")
- if mkdir_sts != 0 :
- ret = ret + "<br> fail: " + clean_cmd
- # copy the modified vol files to the peers
- unique_nodes = set(nodes)
- this_ip = ""
- ifconfig_pipe = os.popen("/sbin/ifconfig eth0")
- for line in ifconfig_pipe :
- line = line.lstrip()
- tokens = re.split('[: ]', line)
- if tokens[0] == "inet" and tokens[1] == "addr" :
- this_ip = tokens[2]
- scratch = []
- scratch.append(this_ip)
-
- unique_nodes = unique_nodes.difference(scratch)
- ifconfig_pipe.close()
- # ssh and scp (i.e. pull). could just scp
- # (i.e. push) but then we would have to add
- # scp to sudoers-- (Would like to minimize
- # the number of things added to sudoers
- vol_name = cmd_tokens[3].replace("|", "").replace("&", "").replace(">", "")
- for node in unique_nodes :
- scp_cmd = "/usr/bin/sudo /usr/bin/ssh " + node + " 'cd /var/lib/glusterd/vols/" + vol_name + " && /usr/bin/scp -q -r " + this_ip + ":/var/lib/glusterd/vols/" + vol_name + "/* .'"
- scp_sts = os.system(scp_cmd)
- if scp_sts != 0 :
- ret = ret + "<br>fail: " + scp_cmd
-
- # now start the volume
- start_cmd = "/usr/bin/sudo /usr/sbin/gluster volume start " + vol_name
- start_sts = os.system(start_cmd + " > /dev/null 2>&1")
- if start_sts != 0 :
- ret = ret + "<br> fail: " + start_cmd
- else :
- ret = ret + "<br> started " + vol_name
-
- # list FUSE volume specs for download
- mkdir_sts = os.system("/usr/bin/sudo /bin/mkdir -p scratch")
- cp_sts = os.system("/usr/bin/sudo /bin/cp /var/lib/cloudfs/vols/" + vol_name + "/" + vol_name + "-fuse.vol.* /var/www/html/cfg/scratch/")
- ret = ret + "<hr><br> client/tenant volume files (right-click to save-as):"
- for tenant in tenants :
- if tenant != "junk" :
- ret = ret + "<br><a href=\"/cfg/scratch/%s-fuse.vol.%s\">%s</a>" % (vol_name, tenant, tenant)
- ret = ret + "<hr>"
- ret = ret + "<form method=\"post\" action=\"cfgmain\">"
- ret = ret + "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
-
- ret = ret + "</body></html>"
- return ret
-
diff --git a/scripts/wwwinitcluster.py b/scripts/wwwinitcluster.py
deleted file mode 100644
index 2858d6a..0000000
--- a/scripts/wwwinitcluster.py
+++ /dev/null
@@ -1,40 +0,0 @@
-
-import datetime
-import os
-import re
-import string
-import socket
-import sys
-
-def www_initcluster():
-
- hostname = socket.gethostname()
-
- node_ips = []
-
- sts = os.system("/usr/bin/sudo /sbin/chkconfig --add glusterd; /usr/bin/sudo /sbin/chkconfig glusterd on; /usr/bin/sudo /sbin/service glusterd start")
- peer_ips = os.popen("/usr/bin/sudo /usr/sbin/gluster peer status | /bin/grep Hostname:")
- for line in peer_ips :
- tokens = re.split(':', line)
- node_ips.append(socket.gethostbyname(string.strip(tokens[1])))
- peer_ips.close()
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\"><title>Initialize CloudFS Cluster</title>"
- ret = ret + "</head><body>"
- ret = ret + "<h2>Initialize CloudFS Cluster</h2>"
- ret = ret + "<hr>"
- ret = ret + "<h2>Cluster Nodes</h2>"
- for node_ip in node_ips :
- ret = ret + node_ip + "<br>"
- ret = ret + "<hr><br>"
- ret = ret + "Enter the hostname of a node to add to the cluster"
- ret = ret + "<form method=\"post\" action=\"wwwdoinitcluster\">"
- ret = ret + "Node Name: <input type=\"text\" name=\"NODENAME\">"
- ret = ret + "<input type=\"submit\" name=\"ADDNODE\" value=\"Add Node\"></form>"
- ret = ret + "<hr>"
- ret = ret + "<form method=\"post\" action=\"/cfgmain\">"
- ret = ret + "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
- ret = ret + "</body></html>"
- return ret
diff --git a/scripts/wwwprovision.py b/scripts/wwwprovision.py
deleted file mode 100644
index 72dada3..0000000
--- a/scripts/wwwprovision.py
+++ /dev/null
@@ -1,119 +0,0 @@
-
-import datetime
-import os
-import re
-import string
-import socket
-import sys
-
-def brick_used(needle, haystack) :
- for vol in haystack :
- for brick in vol :
- if brick == needle :
- return True
- return False
-
-def www_provision() :
-
- hostname = socket.gethostname()
-
- existing_vols = []
- bricks_by_vol = []
- bbv_index = -1
-
- volinfo_pipe = os.popen("/usr/bin/sudo /usr/sbin/gluster volume info all")
- for line in volinfo_pipe :
- line = line.lstrip()
- if line.startswith("Volume") :
- tokens = re.split(':', line)
- if tokens[0].strip() == "Volume Name" :
- existing_vols.append(tokens[1].strip())
- elif line.startswith("Bricks:") :
- bricks = []
- bricks_by_vol.append(bricks)
- bbv_index = bbv_index + 1
- elif line.startswith("Brick") :
- tokens = re.split(' ', line)
- bricks_by_vol[bbv_index].append(tokens[1].strip())
- volinfo_pipe.close()
-
- node_ips = []
-
- ifconfig_pipe = os.popen("/sbin/ifconfig eth0")
- for line in ifconfig_pipe :
- line = line.lstrip()
- tokens = re.split('[: ]', line)
- if tokens[0] == "inet" and tokens[1] == "addr" :
- node_ips.append(tokens[2])
- ifconfig_pipe.close()
-
- peer_ips = os.popen("/usr/bin/sudo /usr/sbin/gluster peer status | /bin/grep Hostname:")
- for line in peer_ips :
- tokens = re.split(':', line)
- node_ips.append(socket.gethostbyname(string.strip(tokens[1])))
- peer_ips.close()
-
- volumes_on_nodes = []
-
- for node_ip in node_ips :
- # ret = ret +("<p>%s</p>") % (node_ip)
- cmd = "/usr/bin/sudo /usr/bin/ssh " + node_ip + " df -H"
- volumes_on_node = os.popen(cmd)
- scratch = []
- for line in volumes_on_node :
- line = line.rstrip()
- if False == line.startswith("Filesystem") and False == line.startswith("/dev/mapper") and False == line.startswith("tmpfs") and False == line.endswith(" /") and False == line.endswith("/boot") :
- scratch.append(line)
- volumes_on_node.close()
- volumes_on_nodes.append(scratch)
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head><meta http-equiv=\"pragma\" content=\"no-cache\"><title>Provision CloudFS Volume</title>"
- ret = ret + "<style type=\"text/css\">"
- ret = ret + ".colmask{ position: relative; overflow: hidden; margin: 0px auto; width: 100%; background-color: #b4d2f7 }"
- ret = ret + ".header{ float: left; width: 100%; background-color: #f4f4f4}"
- ret = ret + ".wrapper{ position: relative; float: left; left: 0.00%; width: 100.00%; background-color: #cccccc}"
- ret = ret + "tr.d0 td { background-color: #CC9999; color: black; }"
- ret = ret + "tr.d1 td { background-color: #9999CC; color: black; }"
- ret = ret + ".footer{ float: left; width: 100%; background-color: #f4f4f4}"
- ret = ret + "body { border-width: 0px; padding: 0px; margin: 0px; font-size: 90%; background-color: #e7e7de}"
- ret = ret + "</style></head><body>"
- ret = ret + "<h1>Provision CloudFS Volume</h1>"
- ret = ret + "<h2>Existing Volumes:</h2>"
- bbv_index = 0
- for existing_vol in existing_vols :
- ret = ret + "<p>" + existing_vol + ":"
- for brick in bricks_by_vol[bbv_index] :
- ret = ret + "<br> " + brick
- bbv_index = bbv_index + 1
- ret = ret + "</p>"
- ret = ret + "<hr>"
- ret = ret + "<h2>Provision a Volume From Available Bricks:</h2>"
- ret = ret + "<form method=\"post\" name=\"provision\" action=\"wwwconfirmprovision\">"
- ret = ret + "<div class=\"header\"><hr></div>"
- ret = ret + "<div class=\"wrapper\">"
- ret = ret + "<table>"
- node_index = 0
- for node_ip in node_ips :
- ret = ret + "<tr class=\"d%d\">" % (node_index % 2)
- ret = ret + "<td>%s</td>" % (node_ip)
- for volumes_on_node in volumes_on_nodes[node_index] :
- tokens = volumes_on_node.rpartition(" ")
- if brick_used(node_ip + ":" + tokens[2], bricks_by_vol) == True :
- ret = ret + "<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" disabled />%s</td>" % (node_ip, tokens[2], tokens[2])
- else :
- ret = ret + "<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" />%s</td>" % (node_ip, tokens[2], tokens[2])
- ret = ret + "</tr>"
- node_index = node_index + 1
- ret = ret + "</table>"
- ret = ret + "</div>"
- ret = ret + "<div class=\"footer\"><hr></div>"
- ret = ret + "Volume Type: <input type=\"radio\" name=\"TYPE\" value=\"plain\" checked />Plain"
- ret = ret + "<input type=\"radio\" name=\"TYPE\" value=\"replica\" />Replicated"
- ret = ret + "<input type=\"radio\" name=\"TYPE\" value=\"stripe\" />Striped"
- ret = ret + "<br>Replica or Stripe count: <input type=\"text\" name=\"COUNT\" size=\"2\" />"
- ret = ret + "<br>Volume ID: <input type=\"text\" name=\"VOLUMEID\" />"
- ret = ret + "<input type=\"submit\" name=\"PROVISION\" value=\"Provision\" />"
- ret = ret + "</form>"
- ret = ret + "</body></html>"
- return ret
diff --git a/scripts/wwwroot.py b/scripts/wwwroot.py
deleted file mode 100644
index 6e9a910..0000000
--- a/scripts/wwwroot.py
+++ /dev/null
@@ -1,10 +0,0 @@
-
-def www_root() :
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=/cfg\" />"
- ret = ret + "</head><body>"
- ret = ret + "<p>Please follow <a href=\"/cfg\">link</a>!</p>"
- ret = ret + "</body></html>"
- return ret
12 years, 7 months
Branch 'xattr-prefetch' - xlators/performance
by Jeff Darcy
xlators/performance/xattr-prefetch/src/xattr-prefetch.c | 18 +++-------------
1 file changed, 4 insertions(+), 14 deletions(-)
New commits:
commit 7c9f35c267bac0b8f4d2d99612974db3fa591172
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon May 9 08:53:55 2011 -0400
Add dict_unrefs, make lookup check fd as well as inode.
diff --git a/xlators/performance/xattr-prefetch/src/xattr-prefetch.c b/xlators/performance/xattr-prefetch/src/xattr-prefetch.c
index 389dc4f..8c6ef54 100644
--- a/xlators/performance/xattr-prefetch/src/xattr-prefetch.c
+++ b/xlators/performance/xattr-prefetch/src/xattr-prefetch.c
@@ -73,7 +73,7 @@ xp_release (xlator_t *this, fd_t *fd)
}
dict_t *
-xp_get_inode_ctx (xlator_t *this, fd_t * fd, inode_t *inode, dict_t *newdata)
+xp_get_inode_ctx (xlator_t *this, fd_t *fd, inode_t *inode, dict_t *newdata)
{
time_t cutoff = time(NULL) - 2;
xp_inode_ctx_t *inode_ctx = NULL;
@@ -88,7 +88,7 @@ xp_get_inode_ctx (xlator_t *this, fd_t * fd, inode_t *inode, dict_t *newdata)
LOCK(&priv->lock);
for (i = 0; i < XP_BUCKET_SIZE; ++i) {
- if (inode_ctx[i].inode == inode) {
+ if ((inode_ctx[i].fd == fd) && (inode_ctx[i].inode == inode)) {
goto found;
}
}
@@ -345,15 +345,6 @@ xp_readdir (call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
return 0;
}
-int32_t
-xp_getxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
-{
- XP_STACK_UNWIND (getxattr, frame, op_ret, op_errno, dict);
- return 0;
-}
-
-
gf_boolean_t
xp_is_prefetch_xattr (char *key)
{
@@ -424,15 +415,14 @@ xp_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name)
XP_STACK_UNWIND(getxattr,frame,0,0,new_dict);
return 0;
}
+ dict_unref(new_dict);
}
}
miss:
++(priv->stats.miss);
not_ours:
- STACK_WIND (frame, xp_getxattr_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->getxattr, loc, name);
- return 0;
+ return default_getxattr(frame,this,loc,name);
}
int32_t
12 years, 7 months
Branch 'xattr-prefetch' - pkg/cloudfs.spec.in xlators/performance
by Jeff Darcy
pkg/cloudfs.spec.in | 2
xlators/performance/xattr-prefetch/src/xattr-prefetch-mem-types.h | 7
xlators/performance/xattr-prefetch/src/xattr-prefetch.c | 288 +++++++---
xlators/performance/xattr-prefetch/src/xattr-prefetch.h | 23
4 files changed, 237 insertions(+), 83 deletions(-)
New commits:
commit 600baf631510c6f095cf1f3a03fba19a5f50b87f
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri May 6 10:55:24 2011 -0400
Move cache to xlator, fix leak, add stats.
From review suggestions. The cache is now xlator-global and fixed size (a
mere 1024 entries which should be tunable some day). It's managed as a
four-way set-associative cache with LRU. I also fixed a memory leak and
added statistics accessible via getxattr(trusted.xattr.stats). This shows
a hit rate of nearly 99.9% for my 100K-file test despite the small cache
size, with approximately three hits and one eviction per file - exactly as
it should be as the oldest entries constantly get pushed out.
diff --git a/pkg/cloudfs.spec.in b/pkg/cloudfs.spec.in
index 31a8db9..4401777 100644
--- a/pkg/cloudfs.spec.in
+++ b/pkg/cloudfs.spec.in
@@ -46,7 +46,7 @@ with additional authentication/encryption/multi-tenancy features.
# Remove unwanted files from all the shared libraries
find %{buildroot}%{_libdir} -name '*.a' -delete
find %{buildroot}%{_libdir} -name '*.la' -delete
-find %{buildroot}%{_libdir} -name '*.so.0.0.0' | xargs strip
+#find %{buildroot}%{_libdir} -name '*.so.0.0.0' | xargs strip
%clean
%{__rm} -rf %{buildroot}
diff --git a/xlators/performance/xattr-prefetch/src/xattr-prefetch-mem-types.h b/xlators/performance/xattr-prefetch/src/xattr-prefetch-mem-types.h
index 046b782..89a0a31 100644
--- a/xlators/performance/xattr-prefetch/src/xattr-prefetch-mem-types.h
+++ b/xlators/performance/xattr-prefetch/src/xattr-prefetch-mem-types.h
@@ -23,9 +23,10 @@
#include "mem-types.h"
enum gf_xp_mem_types_ {
- gf_xp_mt_xp_local_t = gf_common_mt_end + 1,
- gf_xp_mt_xp_inode_ctx_t,
- gf_xp_mt_xp_private_t,
+ gf_mt_xp_local_t = gf_common_mt_end + 1,
+ gf_mt_xp_inode_ctx_t,
+ gf_mt_xp_fd_ctx_t,
+ gf_mt_xp_private_t,
gf_xp_mt_loc_t,
gf_xp_mt_end
};
diff --git a/xlators/performance/xattr-prefetch/src/xattr-prefetch.c b/xlators/performance/xattr-prefetch/src/xattr-prefetch.c
index 8aad401..389dc4f 100644
--- a/xlators/performance/xattr-prefetch/src/xattr-prefetch.c
+++ b/xlators/performance/xattr-prefetch/src/xattr-prefetch.c
@@ -1,4 +1,6 @@
/*
+* }
+* }
* Copyright (c) 2011 Red Hat, Inc.
*
* This file is part of CloudFS.
@@ -22,19 +24,121 @@
int32_t
xp_forget (xlator_t *this, inode_t *inode)
{
- struct iatt *buf = NULL;
- uint64_t value = 0;
+ xp_inode_ctx_t *inode_ctx = NULL;
+ uint32_t i = 0;
+ xp_private_t *priv = this->private;
+
+ inode_ctx = priv->table
+ + ((inode->ino >> 8) % XP_NUM_BUCKETS) * XP_BUCKET_SIZE;
+
+ LOCK(&priv->lock);
+ for (i = 0; i < XP_BUCKET_SIZE; ++i) {
+ if (inode_ctx[i].inode == inode) {
+ if (inode_ctx[i].attrs) {
+ dict_unref(inode_ctx[i].attrs);
+ inode_ctx[i].attrs = NULL;
+ }
+ inode_ctx[i].fd = NULL;
+ inode_ctx[i].inode = NULL;
+ break;
+ }
+ }
+ UNLOCK(&priv->lock);
- inode_ctx_del (inode, this, &value);
+ return 0;
+}
- if (value) {
- buf = (void *)(long)value;
- GF_FREE (buf);
- }
+int32_t
+xp_release (xlator_t *this, fd_t *fd)
+{
+ uint32_t i = 0;
+ xp_private_t *priv = this->private;
+ xp_inode_ctx_t *inode_ctx = priv->table;
+
+ LOCK(&priv->lock);
+ for (i = 0; i < XP_BUCKET_SIZE; ++i) {
+ if (inode_ctx[i].fd == fd) {
+ if (inode_ctx[i].attrs) {
+ dict_unref(inode_ctx[i].attrs);
+ inode_ctx[i].attrs = NULL;
+ }
+ inode_ctx[i].fd = NULL;
+ inode_ctx[i].inode = NULL;
+ break;
+ }
+ }
+ UNLOCK(&priv->lock);
return 0;
}
+dict_t *
+xp_get_inode_ctx (xlator_t *this, fd_t * fd, inode_t *inode, dict_t *newdata)
+{
+ time_t cutoff = time(NULL) - 2;
+ xp_inode_ctx_t *inode_ctx = NULL;
+ uint32_t i = 0;
+ dict_t *dict = NULL;
+ xp_private_t *priv = this->private;
+ uint32_t e_index = 0;
+ uint64_t e_clock = 0;
+
+ inode_ctx = priv->table
+ + ((inode->ino >> 8) % XP_NUM_BUCKETS) * XP_BUCKET_SIZE;
+ LOCK(&priv->lock);
+
+ for (i = 0; i < XP_BUCKET_SIZE; ++i) {
+ if (inode_ctx[i].inode == inode) {
+ goto found;
+ }
+ }
+
+ if (newdata) {
+ for (i = 0; i < XP_BUCKET_SIZE; ++i) {
+ if (!inode_ctx[i].inode) {
+ break;
+ }
+ if (inode_ctx[i].created <= cutoff) {
+ break;
+ }
+ }
+ if (i >= XP_BUCKET_SIZE) {
+ e_clock = inode_ctx[0].used;
+ for (i = 1; i < XP_BUCKET_SIZE; ++i) {
+ if (inode_ctx[i].used < e_clock) {
+ e_clock - inode_ctx[i].used;
+ e_index = i;
+ }
+ }
+ ++(priv->stats.evict);
+ i = e_index;
+ }
+ inode_ctx[i].fd = fd;
+ inode_ctx[i].inode = inode;
+ inode_ctx[i].created = time(NULL);
+ }
+
+found:
+ if (i < XP_BUCKET_SIZE) {
+ dict = inode_ctx[i].attrs;
+ if (newdata) {
+ if (dict) {
+ dict_unref(dict);
+ }
+ dict = newdata;
+ inode_ctx[i].attrs = dict;
+ }
+ if (dict) {
+ /* Keep one reference, give one away. */
+ dict_ref(dict);
+ }
+ inode_ctx[i].used = ++(priv->lru_clock);
+ }
+
+ UNLOCK(&priv->lock);
+ return dict;
+}
+
void
xp_local_free (xp_local_t *local)
{
@@ -54,54 +158,51 @@ xp_pre_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, inode_t *inode,
struct iatt *buf, dict_t *dict, struct iatt *postparent)
{
- uint64_t value = 0;
- xp_inode_ctx_t *inode_ctx = NULL;
+ loc_t *loc = cookie;
+ fd_t *fd = NULL;
/* The cookie is the loc we built in xp_issue_one_lookup. */
/* TBD: free inode/buf/dict/postparent? */
- gf_log(this->name,GF_LOG_DEBUG,"finished pre-lookup for %s",
- ((loc_t *)cookie)->path);
+ gf_log(this->name,GF_LOG_DEBUG,"finished pre-lookup for %s",loc->path);
if (!dict) {
goto done;
}
dict_foreach(dict,xp_show_dict_val,NULL);
- if (inode_ctx_get (inode, this, &value) == 0) {
- inode_ctx = (xp_inode_ctx_t *)(long)value;
- }
- else {
- inode_ctx = GF_CALLOC(1,sizeof(*inode_ctx),
- gf_xp_mt_xp_inode_ctx_t);
- if (!inode_ctx) {
- goto done;
- }
- value = (uint64_t)inode_ctx;
- if (inode_ctx_put(inode,this,value) != 0) {
- GF_FREE(inode_ctx);
- goto done;
- }
+ fd = fd_lookup(loc->parent,frame->root->pid);
+ if (!fd) {
+ goto done;
}
- dict_ref(dict);
- inode_ctx->stbuf = *buf;
- inode_ctx->attrs = dict;
- inode_ctx->when = time(NULL);
+ (void)xp_get_inode_ctx(this,fd,inode,dict);
+ dict_unref(dict);
done:
- GF_FREE(cookie);
+ /*
+ * This was allocated along with the structure itself, but there's
+ * no flag to tell loc_wipe that so clear the pointer first.
+ */
+ loc->path = NULL;
+ loc_wipe(loc);
+ GF_FREE(loc);
STACK_DESTROY(frame->root);
return 0;
}
loc_t *
-xp_issue_one_lookup (xlator_t *this, xp_local_t *local,
+xp_issue_one_lookup (xlator_t *this, xp_local_t *local, pid_t pid,
char *path, size_t path_len, gf_dirent_t *entry)
{
loc_t *loc = NULL;
call_frame_t *frame = NULL;
xp_private_t *priv = this->private;
dict_t *dict = NULL;
+ int junk = 0; /* just to shut up warn_unused_result */
+ /*
+ * Allocate the path along with the structure to avoid the waste of
+ * calling malloc twice when once will do.
+ */
loc = GF_MALLOC(sizeof(*loc)+path_len+strlen(entry->d_name)+2,
gf_xp_mt_loc_t);
if (!loc) {
@@ -128,13 +229,15 @@ xp_issue_one_lookup (xlator_t *this, xp_local_t *local,
"could not allocate frame for %s", this->name);
goto err_no_frame;
}
+ /* Make sure these match so fd_lookup will find the right fd. */
+ frame->root->pid = pid;
dict = dict_new();
if (dict) {
- (void)dict_set_uint32(dict,"security.capability",100);
- (void)dict_set_uint32(dict,"security.selinux",100);
- (void)dict_set_uint32(dict,"system.posix_acl_access",100);
- (void)dict_set_uint32(dict,"system.posix_acl_default",100);
+ junk = dict_set_uint32(dict,"security.capability",100);
+ junk = dict_set_uint32(dict,"security.selinux",100);
+ junk = dict_set_uint32(dict,"system.posix_acl_access",100);
+ junk = dict_set_uint32(dict,"system.posix_acl_default",100);
}
STACK_WIND_COOKIE(frame, xp_pre_lookup_cbk, loc,
@@ -150,7 +253,7 @@ err_no_inode:
}
void
-xp_issue_lookups (xlator_t *this, xp_local_t *local,
+xp_issue_lookups (xlator_t *this, xp_local_t *local, pid_t pid,
gf_dirent_t *entries, int32_t n_entries)
{
gf_dirent_t *entry = NULL;
@@ -190,7 +293,7 @@ xp_issue_lookups (xlator_t *this, xp_local_t *local,
}
gf_log(this->name,GF_LOG_DEBUG,"prefetching d_name = %s",
entry->d_name);
- loc = xp_issue_one_lookup(this,local,path,path_len,entry);
+ loc = xp_issue_one_lookup(this,local,pid,path,path_len,entry);
if (!loc) {
break;
}
@@ -216,7 +319,7 @@ xp_readdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
}
if (op_ret >= 0) {
- xp_issue_lookups(this,local,entries,op_ret);
+ xp_issue_lookups(this,local,frame->root->pid,entries,op_ret);
}
XP_STACK_UNWIND (readdir, frame, op_ret, op_errno, entries);
@@ -230,7 +333,7 @@ xp_readdir (call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
{
xp_local_t *local = NULL;
- local = GF_CALLOC (1, sizeof (*local), gf_xp_mt_xp_local_t);
+ local = GF_CALLOC (1, sizeof (*local), gf_mt_xp_local_t);
if (local) {
local->fd = fd;
frame->local = local;
@@ -272,41 +375,61 @@ xp_is_prefetch_xattr (char *key)
int32_t
xp_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name)
{
- uint64_t value = 0;
- xp_inode_ctx_t *inode_ctx = NULL;
data_t *data = NULL;
- dict_t *the_dict = NULL;
+ dict_t *old_dict = NULL;
+ dict_t *new_dict = NULL;
+ xp_private_t *priv = this->private;
+ fd_t *fd = NULL;
+ int junk = 0; /* just to shut up warn_unused_result */
gf_log(this->name,GF_LOG_DEBUG,"looking up %s on %p",name,loc->inode);
- if (__inode_ctx_get(loc->inode, this, &value) == 0) {
- inode_ctx = (xp_inode_ctx_t *)(long)value;
+ if (!strcmp(name,"trusted.xattr.stats")) {
+ new_dict = dict_new();
+ if (!new_dict) {
+ XP_STACK_UNWIND(getxattr,frame,-1,ENOENT,NULL);
+ return 0;
+ }
+ junk = dict_set_static_bin(new_dict,(char *)name,
+ &priv->stats,sizeof(priv->stats));
+ XP_STACK_UNWIND(getxattr,frame,0,0,new_dict);
+ return 0;
}
- if (inode_ctx && ((time(NULL) - inode_ctx->when) >= 2)) {
- gf_log(this->name,GF_LOG_DEBUG,"context too old");
- (void)xp_forget(this,loc->inode);
- inode_ctx = NULL;
+ if (!xp_is_prefetch_xattr((char *)name)) {
+ goto not_ours;
+ }
+
+ fd = fd_lookup(loc->parent,frame->root->pid);
+ if (!fd) {
+ goto miss;
}
- if (inode_ctx && inode_ctx->attrs) {
- the_dict = dict_new();
- if (the_dict) {
- data = dict_get(inode_ctx->attrs,(char *)name);
+
+ old_dict = xp_get_inode_ctx(this,fd,loc->inode,NULL);
+ if (old_dict) {
+ new_dict = dict_new();
+ if (new_dict) {
+ data = dict_get(old_dict,(char *)name);
if (data) {
gf_log(this->name,GF_LOG_DEBUG,
"filled from cache!");
+ ++(priv->stats.data_hit);
data_ref(data);
- dict_set(the_dict,(char *)name,data);
- XP_STACK_UNWIND(getxattr,frame,0,0,the_dict);
+ dict_set(new_dict,(char *)name,data);
+ XP_STACK_UNWIND(getxattr,frame,0,0,new_dict);
return 0;
}
- else if (xp_is_prefetch_xattr((char *)name)) {
+ else {
gf_log(this->name,GF_LOG_DEBUG,
"filled (null) from cache!");
- XP_STACK_UNWIND(getxattr,frame,0,0,the_dict);
+ ++(priv->stats.null_hit);
+ XP_STACK_UNWIND(getxattr,frame,0,0,new_dict);
return 0;
}
}
}
+miss:
+ ++(priv->stats.miss);
+not_ours:
STACK_WIND (frame, xp_getxattr_cbk, FIRST_CHILD(this),
FIRST_CHILD(this)->fops->getxattr, loc, name);
return 0;
@@ -383,16 +506,16 @@ init (xlator_t *this)
goto out;
}
- priv = GF_CALLOC (1, sizeof(xp_private_t), gf_xp_mt_xp_private_t);
+ priv = GF_CALLOC (1, sizeof(xp_private_t), gf_mt_xp_private_t);
- priv->pool.stack_mem_pool = mem_pool_new(call_stack_t,100);
+ priv->pool.stack_mem_pool = mem_pool_new(call_stack_t,1000);
if (!priv->pool.stack_mem_pool) {
gf_log(this->name,GF_LOG_ERROR,
"could not allocate call stacks");
goto err_no_stacks;
}
- priv->pool.frame_mem_pool = mem_pool_new(call_frame_t,100);
+ priv->pool.frame_mem_pool = mem_pool_new(call_frame_t,1000);
if (!priv->pool.frame_mem_pool) {
gf_log(this->name,GF_LOG_ERROR,
"could not allocate call frames");
@@ -401,6 +524,7 @@ init (xlator_t *this)
INIT_LIST_HEAD(&priv->pool.all_frames);
LOCK_INIT(&priv->pool.lock);
+ LOCK_INIT(&priv->lock);
this->private = priv;
@@ -418,24 +542,35 @@ void
fini (xlator_t *this)
{
xp_private_t *priv = NULL;
+ uint32_t i = 0;
- if (!this)
- goto out;
- else {
- priv = this->private;
- if (priv) {
- if (priv->pool.stack_mem_pool) {
- mem_pool_destroy (priv->pool.stack_mem_pool);
- }
- if (priv->pool.frame_mem_pool) {
- mem_pool_destroy (priv->pool.frame_mem_pool);
- }
- GF_FREE (priv);
- this->private = NULL;
- }
- }
-out:
- return;
+ if (!this) {
+ /* Does this ever really happen? Seems crazy. */
+ return;
+ }
+
+ priv = this->private;
+ if (!priv) {
+ return;
+ }
+
+ if (priv->pool.stack_mem_pool) {
+ mem_pool_destroy (priv->pool.stack_mem_pool);
+ }
+ if (priv->pool.frame_mem_pool) {
+ mem_pool_destroy (priv->pool.frame_mem_pool);
+ }
+
+ LOCK(&priv->lock);
+ for (i = 0; i < XP_NUM_ENTRIES; ++i) {
+ if (priv->table[i].attrs) {
+ dict_unref(priv->table[i].attrs);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ GF_FREE (priv);
+ this->private = NULL;
}
struct xlator_fops fops = {
@@ -449,5 +584,6 @@ struct xlator_fops fops = {
struct xlator_cbks cbks = {
.forget = xp_forget,
+ .release = xp_release,
};
diff --git a/xlators/performance/xattr-prefetch/src/xattr-prefetch.h b/xlators/performance/xattr-prefetch/src/xattr-prefetch.h
index 6e98888..c0567fc 100644
--- a/xlators/performance/xattr-prefetch/src/xattr-prefetch.h
+++ b/xlators/performance/xattr-prefetch/src/xattr-prefetch.h
@@ -40,16 +40,33 @@ struct xp_local {
typedef struct xp_local xp_local_t;
struct xp_inode_ctx {
- struct iatt stbuf;
+ fd_t *fd;
+ inode_t *inode;
dict_t *attrs;
- time_t when;
+ time_t created;
+ time_t used;
};
typedef struct xp_inode_ctx xp_inode_ctx_t;
+struct xp_stats {
+ uint64_t miss;
+ uint64_t data_hit;
+ uint64_t null_hit;
+ uint64_t evict;
+};
+typedef struct xp_stats xp_stats_t;
+
+#define XP_BUCKET_SIZE 4
+#define XP_NUM_BUCKETS 256
+#define XP_NUM_ENTRIES (XP_BUCKET_SIZE * XP_NUM_BUCKETS)
+
struct xp_private {
call_pool_t pool;
uint32_t entries;
gf_lock_t lock;
+ xp_inode_ctx_t table[XP_NUM_ENTRIES];
+ uint64_t lru_clock;
+ xp_stats_t stats;
};
typedef struct xp_private xp_private_t;
@@ -58,7 +75,7 @@ void xp_local_free (xp_local_t *local);
#define XP_STACK_UNWIND(op, frame, params ...) do { \
xp_local_t *__local = frame->local; \
frame->local = NULL; \
- STACK_UNWIND_STRICT (op, frame, params); \
+ STACK_UNWIND_STRICT (op, frame, ##params); \
xp_local_free (__local); \
} while (0)
12 years, 7 months
Branch 'uidmap' - xlators/features
by Jeff Darcy
xlators/features/uidmap/src/uidmap.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
New commits:
commit 2b238901a7cbf9f0f458bb2b796e6ff5dd0a5a45
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Apr 19 14:08:57 2011 -0400
Print parameters at end of init.
diff --git a/xlators/features/uidmap/src/uidmap.c b/xlators/features/uidmap/src/uidmap.c
index 6b1b853..9cab7df 100644
--- a/xlators/features/uidmap/src/uidmap.c
+++ b/xlators/features/uidmap/src/uidmap.c
@@ -90,9 +90,11 @@ init (xlator_t *this)
return -1;
}
priv->db_path = gf_strdup(db);
+ priv->tenant = tenant;
this->private = priv;
- gf_log ("uidmap", GF_LOG_INFO, "uidmap xlator loaded");
+ gf_log ("uidmap", GF_LOG_INFO, "uidmap xlator loaded with db=%s t=%u",
+ priv->db_path, priv->tenant);
return 0;
}
12 years, 7 months
the features/oplock translator
by Edward Shishkin
Hello everyone.
I find the subject suspicious.
See comments in the function below.
Where am I wrong?
Any ideas?
Thanks,
Edward.
int32_t
oplock_writev (call_frame_t *frame, xlator_t *this, fd_t *fd, struct
iovec *vector,
int32_t count, off_t offset, struct iobref *iobref)
{
server_state_t *state = CALL_STATE(frame);
inode_t *inode = fd->inode;
struct oplock_entry *entry = NULL;
oplock_private_t *priv = this->private;
int op_errno = 0;
entry = fetch_op_lock(&priv->locks,inode,state->conn);
if (entry) {
LIST_REMOVE(entry,links);
if (entry->value != inode->gen) {
gf_log(this->name,GF_LOG_DEBUG,
"would reject write for %d from %p",
inode->ino, state->conn);
op_errno = EBUSY;
goto err;
}
FREE(entry);
}
++(inode->gen);
/* Suppose thread A overwrites a byte at offset 0,
* and thread B overwrites a byte in the same file
* at offset 15 (the same block).
*
* Suppose A sets the counter above (inode->gen) to N.
* At this point global file's data is not yet modified.
* Thread B issues a lock with assigned value N, then
* successfully reads old data from disk.
*/
STACK_WIND (frame, oplock_writev_cbk,
FIRST_CHILD(this), FIRST_CHILD(this)->fops->writev,
fd, vector, count, offset, iobref);
/* At this point global file's data has been modified by A.
* Next, B increases the counter to N+1, and also modifies
* global data. Changes performed by A are lost.
*/
return 0;
err:
STACK_UNWIND_STRICT(writev,frame,-1,op_errno,NULL,NULL);
return 0;
}
12 years, 7 months
Branch 'cloudfsd' - scripts/cloudfsd.py scripts/wwwcfgmain.py scripts/wwwcfgroot.py scripts/wwwconfirmprovision.py scripts/wwwcss.py scripts/wwwdoinitcluster.py scripts/wwwdoprovision.py scripts/wwwinitcluster.py scripts/wwwprovision.py scripts/wwwroot.py
by Kaleb KEITHLEY
scripts/cloudfsd.py | 14 ++---
scripts/wwwcfgmain.py | 108 +++++------------------------------------
scripts/wwwcfgroot.py | 104 +++++----------------------------------
scripts/wwwconfirmprovision.py | 32 ++++++------
scripts/wwwcss.py | 79 +++++++++++++++++++++++++++++
scripts/wwwdoinitcluster.py | 16 +++---
scripts/wwwdoprovision.py | 46 ++++++++---------
scripts/wwwinitcluster.py | 39 ++++++--------
scripts/wwwprovision.py | 93 ++++++++++++++++-------------------
scripts/wwwroot.py | 21 +++----
10 files changed, 230 insertions(+), 322 deletions(-)
New commits:
commit 0d1a9f4cc5a7aa2074cb34efcdea8bf0c1517f09
Author: Kaleb S. KEITHLEY <kkeithle(a)cloudfs-node01.kkeithle.redhat.com>
Date: Tue May 3 12:51:46 2011 -0400
checkpoint wip
flow from page to page in www gui works
things remaining:
+ use curl loopback to start volumes, i.e. including creating the
cloudified config files (versus using gluster)
+ switch os.popen and os.system to use subprocess.Popen
but that's enough for today — I need to work on other things.
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 82324db..f68d76e 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
-from bottle import route, post, run
+from bottle import route, post, run, request
import paths
import volstart
@@ -60,18 +60,16 @@ def www_cfgmain():
@route("/wwwprovision")
def www_provision():
- print "www provision"
return wwwprovision.www_provision()
+@post("/wwwconfirmprovision")
+def www_confirmprovision():
+ return wwwconfirmprovision.www_confirmprovision(request.body)
+
@post("/wwwdoprovision")
def www_doprovision():
print "www doprovision"
- return wwwdoprovision.www_doprovision()
-
-@post("/wwwconfirmprovision")
-def www_confirmprovision():
- print "www confirmprovision"
- return wwwconfirmprovision.www_confirmprovision()
+ return wwwdoprovision.www_doprovision(request.body)
@route("/wwwinitcluster")
def www_initcluster():
diff --git a/scripts/wwwcfgmain.py b/scripts/wwwcfgmain.py
index 23f019e..b577a4a 100644
--- a/scripts/wwwcfgmain.py
+++ b/scripts/wwwcfgmain.py
@@ -1,97 +1,19 @@
-import fileinput
+import wwwcss
-def www_cfgroot() :
- fileinput.close()
+def www_cfgmain() :
- print "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
- print "<html><head>"
- print "<title>_Red Hat CloudFS Configuration_</title>"
- print "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
- print "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
- print "<style type=\"text/css\">"
- print "/*<![CDATA[*/"
- print "body {"
- print "background-color: #fff;"
- print "color: #000;"
- print "font-size: 0.9em;"
- print "font-family: sans-serif,helvetica;"
- print "margin: 0;"
- print "padding: 0;"
- print "}"
- print ":link {"
- print "color: #c00;"
- print "}"
- print ":visited {"
- print "color: #c00;"
- print "}"
- print "a:hover {"
- print "color: #f50;"
- print "}"
- print "h1 {"
- print "text-align: center;"
- print "margin: 0;"
- print "padding: 0.6em 2em 0.4em;"
- print "background-color: #900;"
- print "color: #fff;"
- print "font-weight: normal;"
- print "font-size: 1.75em;"
- print "border-bottom: 2px solid #000;"
- print "}"
- print "h1 strong {"
- print "font-weight: bold;"
- print "}"
- print "h2 {"
- print "font-size: 1.1em;"
- print "font-weight: bold;"
- print "}"
- print "hr {"
- print "display: none;"
- print "}"
- print ".content {"
- print "padding: 1em 5em;"
- print "}"
- print ".content-columns {"
- print "/* Setting relative positioning allows for "
- print "absolute positioning for sub-classes */"
- print "position: relative;"
- print "padding-top: 1em;"
- print "}"
- print ".content-column-left {"
- print "/* Value for IE/Win; will be overwritten for other browsers */"
- print "width: 47%;"
- print "padding-right: 3%;"
- print "float: left;"
- print "padding-bottom: 2em;"
- print "}"
- print ".content-column-left hr {"
- print "display: none;"
- print "}"
- print ".content-column-right {"
- print "/* Values for IE/Win; will be overwritten for other browsers */"
- print "width: 47%;"
- print "padding-left: 3%;"
- print "float: left;"
- print "padding-bottom: 2em;"
- print "}"
- print ".content-columns>.content-column-left, .content-columns>.content-column-right {"
- print "/* Non-IE/Win */"
- print "}"
- print "img {"
- print "border: 2px solid #fff;"
- print "padding: 2px;"
- print "margin: 2px;"
- print "}"
- print "a:hover img {"
- print "border: 2px solid #f50;"
- print "}"
- print "/*]]>*/"
- print "</style>"
- print "<meta http-equiv=\"Refresh\" content=\"0; url=https:/cfg/main\" />"
- print "</head><body>"
- print "<h1>Red Hat CloudFS Configuration Main<h1/>"
- print "<p><a href=\"http:8080/wwwinitcluster\">Initialize Cluster</a></p>"
- print "<p><a href=\"http:8080/wwwlisttenants\">Tenant Management</a></p>"
- print "<p><a href=\"http:8080/wwwprovision\">Provision Storage</a></p>"
- print "</body></html>"
+ ret = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
+ ret = ret + "<html><head>"
+ ret = ret + "<title>_Red Hat CloudFS Configuration_</title>"
+ ret = ret + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
+ ret = ret + "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
+ ret = ret + wwwcss.css
+ ret = ret + "</head><body>"
+ ret = ret + "<h1>Red Hat CloudFS Configuration Main<h1/>"
+ ret = ret + "<p><a href=\"wwwinitcluster\">Initialize Cluster</a></p>"
+ ret = ret + "<p><a href=\"wwwlisttenants\">Tenant Management</a></p>"
+ ret = ret + "<p><a href=\"wwwprovision\">Provision Storage</a></p>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwcfgroot.py b/scripts/wwwcfgroot.py
index 2f130cc..b9faede 100644
--- a/scripts/wwwcfgroot.py
+++ b/scripts/wwwcfgroot.py
@@ -1,95 +1,17 @@
-import fileinput
+import wwwcss
def www_cfgroot() :
- fileinput.close()
- print "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
- print "<html><head>"
- print "<title>_Red Hat CloudFS Configuration_</title>"
- print "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
- print "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
- print "<style type=\"text/css\">"
- print "/*<![CDATA[*/"
- print "body {"
- print "background-color: #fff;"
- print "color: #000;"
- print "font-size: 0.9em;"
- print "font-family: sans-serif,helvetica;"
- print "margin: 0;"
- print "padding: 0;"
- print "}"
- print ":link {"
- print "color: #c00;"
- print "}"
- print ":visited {"
- print "color: #c00;"
- print "}"
- print "a:hover {"
- print "color: #f50;"
- print "}"
- print "h1 {"
- print "text-align: center;"
- print "margin: 0;"
- print "padding: 0.6em 2em 0.4em;"
- print "background-color: #900;"
- print "color: #fff;"
- print "font-weight: normal;"
- print "font-size: 1.75em;"
- print "border-bottom: 2px solid #000;"
- print "}"
- print "h1 strong {"
- print "font-weight: bold;"
- print "}"
- print "h2 {"
- print "font-size: 1.1em;"
- print "font-weight: bold;"
- print "}"
- print "hr {"
- print "display: none;"
- print "}"
- print ".content {"
- print "padding: 1em 5em;"
- print "}"
- print ".content-columns {"
- print "/* Setting relative positioning allows for "
- print "absolute positioning for sub-classes */"
- print "position: relative;"
- print "padding-top: 1em;"
- print "}"
- print ".content-column-left {"
- print "/* Value for IE/Win; will be overwritten for other browsers */"
- print "width: 47%;"
- print "padding-right: 3%;"
- print "float: left;"
- print "padding-bottom: 2em;"
- print "}"
- print ".content-column-left hr {"
- print "display: none;"
- print "}"
- print ".content-column-right {"
- print "/* Values for IE/Win; will be overwritten for other browsers */"
- print "width: 47%;"
- print "padding-left: 3%;"
- print "float: left;"
- print "padding-bottom: 2em;"
- print "}"
- print ".content-columns>.content-column-left, .content-columns>.content-column-right {"
- print "/* Non-IE/Win */"
- print "}"
- print "img {"
- print "border: 2px solid #fff;"
- print "padding: 2px;"
- print "margin: 2px;"
- print "}"
- print "a:hover img {"
- print "border: 2px solid #f50;"
- print "}"
- print "/*]]>*/"
- print "</style>"
- print "<meta http-equiv=\"Refresh\" content=\"0; url=http:8080/cfgmain\" />"
- print "</head><body>"
- print "<h1>Red Hat CloudFS Configuration<h1/>"
+ ret = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
+ ret = ret + "<html><head>"
+ ret = ret + "<title>_Red Hat CloudFS Configuration_</title>"
+ ret = ret + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
+ ret = ret + "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
+ ret = ret + wwwcss.css
+ ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=/cfgmain\" />"
+ ret = ret + "</head><body>"
+ ret = ret + "<h1>Red Hat CloudFS Configuration<h1/>"
# authentication disabled for now
# <h2>Sign On<h2/>
# <form method="post" action="/cgi-bin/authenticate">
@@ -105,6 +27,8 @@ def www_cfgroot() :
# <input type="submit" name="signon" value="Sign On">
# </p>
# </form>
- print "<p>Please follow <a href=\"http:8080/cfgmain\">link</a>!</p>"
- print "</body></html>"
+ ret = ret + "<p>Please follow <a href=\"/cfgmain\">link</a>!</p>"
+ ret = ret + "</body></html>"
+
+ return ret
diff --git a/scripts/wwwconfirmprovision.py b/scripts/wwwconfirmprovision.py
index 801a5c3..8b3ccba 100644
--- a/scripts/wwwconfirmprovision.py
+++ b/scripts/wwwconfirmprovision.py
@@ -3,13 +3,11 @@ import fileinput
import re
import os
-def www_confirmprovision():
- print "Content-type: text/html"
- print
+def www_confirmprovision(body):
- f = fileinput.input()
- line = f.readline()
- fileinput.close()
+ # f = fileinput.input()
+ line = body.readline()
+ # fileinput.close()
tokens = re.split('&', line)
@@ -39,11 +37,12 @@ def www_confirmprovision():
volumes.append(scratch[1])
continue
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- print "<title>Confirm Provision Volume</title>"
- print "</head><body>"
- print "<h2>Confirm Provision Volume</h2>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ ret = ret + "<title>Confirm Provision Volume</title>"
+ ret = ret + "</head><body>"
+ ret = ret + "<h2>Confirm Provision Volume</h2>"
cmd = "/usr/sbin/gluster volume create " + volume_id
if volume_type != "plain" :
cmd = cmd + " " + volume_type + " " + replica_or_stripe_count
@@ -51,9 +50,10 @@ def www_confirmprovision():
for volume in volumes :
cmd = cmd + " " + volume.replace("%3A", ":").replace("%2F", "/")
- print "<form method=\"post\" name=\"provision\" action=\"wwwdoprovision\">"
- print ("<input type=\"hidden\" name=\"COMMAND\" value=\"%s\" />") % (cmd)
- print ("%s <br><input type=\"submit\" name=\"PROVISION\" value=\"Confirm\" />") % (cmd)
- print "</form>"
- print "</body></html>"
+ ret = ret + "<form method=\"post\" name=\"provision\" action=\"wwwdoprovision\">"
+ ret = ret + "<input type=\"hidden\" name=\"COMMAND\" value=\"" + cmd + "\" />"
+ ret = ret + cmd + " <br><input type=\"submit\" name=\"PROVISION\" value=\"Confirm\" />"
+ ret = ret + "</form>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwcss.py b/scripts/wwwcss.py
new file mode 100644
index 0000000..af378a8
--- /dev/null
+++ b/scripts/wwwcss.py
@@ -0,0 +1,79 @@
+
+css = """<style type=\"text/css\">
+/*<![CDATA[*/
+body {
+background-color: #fff;
+color: #000;
+font-size: 0.9em;
+font-family: sans-serif,helvetica;
+margin: 0;
+padding: 0;
+}
+:link {
+color: #fff;" + +
+}
+:visited {
+color: #fff;
+}
+a:hover {
+color: #f50;
+}
+h1 {
+text-align: center;
+margin: 0;
+padding: 0.6em 2em 0.4em;
+background-color: #900;
+color: #fff;
+font-weight: normal;
+font-size: 1.75em;
+border-bottom: 2px solid #000;
+}
+h1 strong {
+font-weight: bold;
+}
+h2 {
+font-size: 1.1em;
+font-weight: bold;
+}
+hr {
+display: none;
+}
+.content {
+padding: 1em 5em;
+}
+.content-columns {
+/* Setting relative positioning allows for
+absolute positioning for sub-classes */
+position: relative;
+padding-top: 1em;
+}
+.content-column-left {
+/* Value for IE/Win; will be overwritten for other browsers */
+width: 47%;
+padding-right: 3%;
+float: left;
+padding-bottom: 2em;
+}
+.content-column-left hr {
+display: none;
+}
+.content-column-right {
+/* Values for IE/Win; will be overwritten for other browsers */
+width: 47%;
+padding-left: 3%;
+float: left;
+padding-bottom: 2em;
+}
+.content-columns>.content-column-left, .content-columns>.content-column-right {
+/* Non-IE/Win */
+}
+img {
+border: 2px solid #fff;
+padding: 2px;
+margin: 2px;
+}
+a:hover img {
+border: 2px solid #f50;
+}
+/*]]>*/
+</style>"""
diff --git a/scripts/wwwdoinitcluster.py b/scripts/wwwdoinitcluster.py
index 615911b..29526a5 100644
--- a/scripts/wwwdoinitcluster.py
+++ b/scripts/wwwdoinitcluster.py
@@ -5,8 +5,6 @@ import os
import socket
def www_doinitcluster():
- print "Content-type: text/html"
- print
f = fileinput.input()
line = f.readline()
@@ -51,9 +49,11 @@ def www_doinitcluster():
clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
sts = os.system(clean_cmd + " > /dev/null 2>&1");
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- print "<meta http-equiv=\"Refresh\" content=\"0; url=http:8080/wwwinitcluster\" />"
- print "</head><body>"
- print "<p>Please follow <a href=\"http:8080/wwwinitcluster\">link</a>!</p>"
- print "</body></html>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=wwwinitcluster\" />"
+ ret = ret + "</head><body>"
+ ret = ret + "<p>Please follow <a href=\"wwwinitcluster\">link</a>!</p>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwdoprovision.py b/scripts/wwwdoprovision.py
index d76e451..6535c50 100644
--- a/scripts/wwwdoprovision.py
+++ b/scripts/wwwdoprovision.py
@@ -3,13 +3,11 @@ import fileinput
import re
import os
-def www_doprovision():
- print "Content-type: text/html"
- print
+def www_doprovision(body):
- f = fileinput.input()
- line = f.readline()
- fileinput.close()
+ # f = fileinput.input()
+ line = body.readline()
+ # fileinput.close()
tokens = re.split('&', line)
@@ -27,11 +25,12 @@ def www_doprovision():
command = scratch[1]
continue
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- print "<title>Provisioned Volume</title>"
- print "</head><body>"
- print "<h2>Provisioned Volume</h2>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ ret = ret + "<title>Provisioned Volume</title>"
+ ret = ret + "</head><body>"
+ ret = ret + "<h2>Provisioned Volume</h2>"
## make the underlying volume with gluster
decode_cmd = command.replace("%3A", ":").replace("%2F", "/").replace("+", " ")
clean_cmd = decode_cmd.replace("|", "").replace("&", "").replace(">", "")
@@ -65,7 +64,7 @@ def www_doprovision():
clean_cmd = mkdir_cmd.replace("|", "").replace("&", "").replace(">", "")
mkdir_sts = os.system(clean_cmd + " > /dev/null 2>&1")
if mkdir_sts != 0 :
- print "<br> fail: " + clean_cmd
+ ret = ret + "<br> fail: " + clean_cmd
# copy the modified vol files to the peers
unique_nodes = set(nodes)
this_ip = ""
@@ -89,26 +88,27 @@ def www_doprovision():
scp_cmd = "/usr/bin/sudo /usr/bin/ssh " + node + " 'cd /var/lib/glusterd/vols/" + vol_name + " && /usr/bin/scp -q -r " + this_ip + ":/var/lib/glusterd/vols/" + vol_name + "/* .'"
scp_sts = os.system(scp_cmd)
if scp_sts != 0 :
- print "<br>fail: " + scp_cmd
+ ret = ret + "<br>fail: " + scp_cmd
# now start the volume
start_cmd = "/usr/bin/sudo /usr/sbin/gluster volume start " + vol_name
start_sts = os.system(start_cmd + " > /dev/null 2>&1")
if start_sts != 0 :
- print "<br> fail: " + start_cmd
+ ret = ret + "<br> fail: " + start_cmd
else :
- print("<br> %s started") % (vol_name)
+ ret = ret + "<br> started " + vol_name
# list FUSE volume specs for download
- mkdir_sts = os.system("/usr/bin/sudo /bin/mkdir -p /var/www/html/cfg/scratch")
- cp_sts = os.system("/usr/bin/sudo /bin/cp /var/lib/glusterd/vols/" + vol_name + "/" + vol_name + "-fuse.vol.* /var/www/html/cfg/scratch/")
- print "<hr><br> client/tenant volume files (right-click to save-as):"
+ mkdir_sts = os.system("/usr/bin/sudo /bin/mkdir -p scratch")
+ cp_sts = os.system("/usr/bin/sudo /bin/cp /var/lib/cloudfs/vols/" + vol_name + "/" + vol_name + "-fuse.vol.* /var/www/html/cfg/scratch/")
+ ret = ret + "<hr><br> client/tenant volume files (right-click to save-as):"
for tenant in tenants :
if tenant != "junk" :
- print("<br><a href=\"/cfg/scratch/%s-fuse.vol.%s\">%s</a>") % (vol_name, tenant, tenant)
- print "<hr>"
- print "<form method=\"post\" action=\"cfgmain\">"
- print "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
+ ret = ret + "<br><a href=\"/cfg/scratch/%s-fuse.vol.%s\">%s</a>" % (vol_name, tenant, tenant)
+ ret = ret + "<hr>"
+ ret = ret + "<form method=\"post\" action=\"cfgmain\">"
+ ret = ret + "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
- print "</body></html>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwinitcluster.py b/scripts/wwwinitcluster.py
index 44f9f5f..2858d6a 100644
--- a/scripts/wwwinitcluster.py
+++ b/scripts/wwwinitcluster.py
@@ -1,6 +1,5 @@
import datetime
-import fileinput
import os
import re
import string
@@ -8,7 +7,6 @@ import socket
import sys
def www_initcluster():
- fileinput.close()
hostname = socket.gethostname()
@@ -21,23 +19,22 @@ def www_initcluster():
node_ips.append(socket.gethostbyname(string.strip(tokens[1])))
peer_ips.close()
- print "Content-type: text/html"
- print
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\"><title>Initialize CloudFS Cluster</title>"
- print "</head><body>"
- print "<h2>Initialize CloudFS Cluster</h2>"
- print "<hr>"
- print "<h2>Cluster Nodes</h2>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\"><title>Initialize CloudFS Cluster</title>"
+ ret = ret + "</head><body>"
+ ret = ret + "<h2>Initialize CloudFS Cluster</h2>"
+ ret = ret + "<hr>"
+ ret = ret + "<h2>Cluster Nodes</h2>"
for node_ip in node_ips :
- print node_ip + "<br>"
- print "<hr><br>"
- print "Enter the hostname of a node to add to the cluster"
- print "<form method=\"post\" action=\"wwwdoinitcluster\">"
- print "Node Name: <input type=\"text\" name=\"NODENAME\">"
- print "<input type=\"submit\" name=\"ADDNODE\" value=\"Add Node\"></form>"
- print "<hr>"
- print "<form method=\"post\" action=\"/cfgmain.\">"
- print "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
-
- print "</body></html>"
+ ret = ret + node_ip + "<br>"
+ ret = ret + "<hr><br>"
+ ret = ret + "Enter the hostname of a node to add to the cluster"
+ ret = ret + "<form method=\"post\" action=\"wwwdoinitcluster\">"
+ ret = ret + "Node Name: <input type=\"text\" name=\"NODENAME\">"
+ ret = ret + "<input type=\"submit\" name=\"ADDNODE\" value=\"Add Node\"></form>"
+ ret = ret + "<hr>"
+ ret = ret + "<form method=\"post\" action=\"/cfgmain\">"
+ ret = ret + "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwprovision.py b/scripts/wwwprovision.py
index c4e2910..72dada3 100644
--- a/scripts/wwwprovision.py
+++ b/scripts/wwwprovision.py
@@ -1,14 +1,13 @@
import datetime
-import fileinput
import os
import re
import string
import socket
import sys
-def brick_used(needle) :
- for vol in bricks_by_vol :
+def brick_used(needle, haystack) :
+ for vol in haystack :
for brick in vol :
if brick == needle :
return True
@@ -16,14 +15,8 @@ def brick_used(needle) :
def www_provision() :
- fileinput.close()
-
hostname = socket.gethostname()
- print "Content-type: text/html"
- print
- print "<html><head><meta http-equiv=\"pragma\" content=\"no-cache\"><title>Provision CloudFS Volume</title>"
-
existing_vols = []
bricks_by_vol = []
bbv_index = -1
@@ -63,7 +56,7 @@ def www_provision() :
volumes_on_nodes = []
for node_ip in node_ips :
- # print("<p>%s</p>") % (node_ip)
+ # ret = ret +("<p>%s</p>") % (node_ip)
cmd = "/usr/bin/sudo /usr/bin/ssh " + node_ip + " df -H"
volumes_on_node = os.popen(cmd)
scratch = []
@@ -74,53 +67,53 @@ def www_provision() :
volumes_on_node.close()
volumes_on_nodes.append(scratch)
- print "<style type=\"text/css\">"
- print ".colmask{ position: relative; overflow: hidden; margin: 0px auto; width: 100%; background-color: #b4d2f7 }"
- print ".header{ float: left; width: 100%; background-color: #f4f4f4}"
- print ".wrapper{ position: relative; float: left; left: 0.00%; width: 100.00%; background-color: #cccccc}"
- #for node_ip in node_ips :
- # print(".col%s{ position: relative; float: left; width: %d%%; left: 1%%; background-color: #b4d2f7}") % (node_ip.replace(".", "_"), 100/len(node_ips)-1)
- print "tr.d0 td { background-color: #CC9999; color: black; }"
- print "tr.d1 td { background-color: #9999CC; color: black; }"
- print ".footer{ float: left; width: 100%; background-color: #f4f4f4}"
- print "body { border-width: 0px; padding: 0px; margin: 0px; font-size: 90%; background-color: #e7e7de}"
- print "</style></head><body>"
- print "<h1>Provision CloudFS Volume</h1>"
- print "<h2>Existing Volumes:</h2>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head><meta http-equiv=\"pragma\" content=\"no-cache\"><title>Provision CloudFS Volume</title>"
+ ret = ret + "<style type=\"text/css\">"
+ ret = ret + ".colmask{ position: relative; overflow: hidden; margin: 0px auto; width: 100%; background-color: #b4d2f7 }"
+ ret = ret + ".header{ float: left; width: 100%; background-color: #f4f4f4}"
+ ret = ret + ".wrapper{ position: relative; float: left; left: 0.00%; width: 100.00%; background-color: #cccccc}"
+ ret = ret + "tr.d0 td { background-color: #CC9999; color: black; }"
+ ret = ret + "tr.d1 td { background-color: #9999CC; color: black; }"
+ ret = ret + ".footer{ float: left; width: 100%; background-color: #f4f4f4}"
+ ret = ret + "body { border-width: 0px; padding: 0px; margin: 0px; font-size: 90%; background-color: #e7e7de}"
+ ret = ret + "</style></head><body>"
+ ret = ret + "<h1>Provision CloudFS Volume</h1>"
+ ret = ret + "<h2>Existing Volumes:</h2>"
bbv_index = 0
for existing_vol in existing_vols :
- print "<p>" + existing_vol + ":"
+ ret = ret + "<p>" + existing_vol + ":"
for brick in bricks_by_vol[bbv_index] :
- print "<br> " + brick
+ ret = ret + "<br> " + brick
bbv_index = bbv_index + 1
- print "</p>"
- print "<hr>"
- print "<h2>Provision a Volume From Available Bricks:</h2>"
- print "<form method=\"post\" name=\"provision\" action=\"wwwconfirmprovision\">"
- print "<div class=\"header\"><hr></div>"
- print "<div class=\"wrapper\">"
- print "<table>"
+ ret = ret + "</p>"
+ ret = ret + "<hr>"
+ ret = ret + "<h2>Provision a Volume From Available Bricks:</h2>"
+ ret = ret + "<form method=\"post\" name=\"provision\" action=\"wwwconfirmprovision\">"
+ ret = ret + "<div class=\"header\"><hr></div>"
+ ret = ret + "<div class=\"wrapper\">"
+ ret = ret + "<table>"
node_index = 0
for node_ip in node_ips :
- print("<tr class=\"d%d\">") % (node_index % 2)
- print("<td>%s</td>") % (node_ip)
+ ret = ret + "<tr class=\"d%d\">" % (node_index % 2)
+ ret = ret + "<td>%s</td>" % (node_ip)
for volumes_on_node in volumes_on_nodes[node_index] :
tokens = volumes_on_node.rpartition(" ")
- if brick_used(node_ip + ":" + tokens[2]) == True :
- print("<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" disabled />%s</td>") % (node_ip, tokens[2], tokens[2])
+ if brick_used(node_ip + ":" + tokens[2], bricks_by_vol) == True :
+ ret = ret + "<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" disabled />%s</td>" % (node_ip, tokens[2], tokens[2])
else :
- print("<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" />%s</td>") % (node_ip, tokens[2], tokens[2])
- print "</tr>"
+ ret = ret + "<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" />%s</td>" % (node_ip, tokens[2], tokens[2])
+ ret = ret + "</tr>"
node_index = node_index + 1
- print "</table>"
- print "</div>"
- print "<div class=\"footer\"><hr></div>"
- print "Volume Type: <input type=\"radio\" name=\"TYPE\" value=\"plain\" checked />Plain"
- print "<input type=\"radio\" name=\"TYPE\" value=\"replica\" />Replicated"
- print "<input type=\"radio\" name=\"TYPE\" value=\"stripe\" />Striped"
- print "<br>Replica or Stripe count: <input type=\"text\" name=\"COUNT\" size=\"2\" />"
- print "<br>Volume ID: <input type=\"text\" name=\"VOLUMEID\" />"
- print "<input type=\"submit\" name=\"PROVISION\" value=\"Provision\" />"
- print "</form>"
-
- print "</body></html>"
+ ret = ret + "</table>"
+ ret = ret + "</div>"
+ ret = ret + "<div class=\"footer\"><hr></div>"
+ ret = ret + "Volume Type: <input type=\"radio\" name=\"TYPE\" value=\"plain\" checked />Plain"
+ ret = ret + "<input type=\"radio\" name=\"TYPE\" value=\"replica\" />Replicated"
+ ret = ret + "<input type=\"radio\" name=\"TYPE\" value=\"stripe\" />Striped"
+ ret = ret + "<br>Replica or Stripe count: <input type=\"text\" name=\"COUNT\" size=\"2\" />"
+ ret = ret + "<br>Volume ID: <input type=\"text\" name=\"VOLUMEID\" />"
+ ret = ret + "<input type=\"submit\" name=\"PROVISION\" value=\"Provision\" />"
+ ret = ret + "</form>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwroot.py b/scripts/wwwroot.py
index 5990022..6e9a910 100644
--- a/scripts/wwwroot.py
+++ b/scripts/wwwroot.py
@@ -1,15 +1,10 @@
-import fileinput
-
def www_root() :
- fileinput.close()
-
- print "Content-type: text/html"
- print
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
-# print "<meta http-equiv=\"Refresh\" content=\"0; url=https:/cfg/\" />"
- print "<meta http-equiv=\"Refresh\" content=\"0; url=http:8080/cfg/\" />"
- print "</head><body>"
- print "<p>Please follow <a href=\"https:/cfg/\">link</a>!</p>"
- print "</body></html>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=/cfg\" />"
+ ret = ret + "</head><body>"
+ ret = ret + "<p>Please follow <a href=\"/cfg\">link</a>!</p>"
+ ret = ret + "</body></html>"
+ return ret
12 years, 7 months