ToDo | 21 ++++
scripts/cfs_mount.py | 6 -
scripts/cfs_paths.py | 13 ++
scripts/cfs_start_volume.py | 184 ++++++++++++++++++++++++++++++++++++++++++
scripts/cfs_stop_volume.py | 62 ++++++++++++++
scripts/cfs_utils.py | 13 ++
scripts/cloudfsd.py | 39 +++++---
scripts/paths.py | 10 --
scripts/views/start_done.html | 15 +++
scripts/views/stop_done.html | 15 +++
scripts/views/volumes.html | 9 +-
scripts/volmap.py | 6 -
scripts/volstart.py | 173 ---------------------------------------
scripts/volstop.py | 45 ----------
14 files changed, 357 insertions(+), 254 deletions(-)
New commits:
commit 491da1780e466cfd60c3df0c0b063094f39bd104
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue May 10 13:57:17 2011 -0400
Got volume start/stop interfaces to work.
diff --git a/ToDo b/ToDo
new file mode 100644
index 0000000..c769c94
--- /dev/null
+++ b/ToDo
@@ -0,0 +1,21 @@
+= High Priority =
+Interface to start/stop volumes (includes volfile generation and mkdir)
+Get mount.cloudfs to work
+Interface to add/remove tenants
+Interface to map between tenants and volumes
+Use arbitrary directories, not just mountpoints
+Interface to remove volumes
+Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
+SSL
+
+= Medium Priority =
+Interface to remove servers
+Deal with glusterd startup consistently
+Sanity checking for volume configs
+Handle zero-length tenant list without alice/bob filler
+Handle multiple bricks per server when generating volfiles
+
+= Low Priority =
+Import GlusterFS volume to CloudFS
+Eliminate clean_and_run in favor of cfs_utils.run_cmd
+
diff --git a/scripts/cfs_mount.py b/scripts/cfs_mount.py
index af0715d..b6d64d1 100755
--- a/scripts/cfs_mount.py
+++ b/scripts/cfs_mount.py
@@ -4,8 +4,6 @@ import urllib2
import volfilter
-CLOUDFSD_PORT = 8080
-
# Simple cache of brick-to-port mappings, so we don't have to keep re-fetching
# the maps from the same host if it has multiple bricks.
class mapper:
@@ -16,7 +14,7 @@ class mapper:
mydict = self.cache[host]
else:
url = "http://%s:%d/%s/map" % \
- (host, CLOUDFSD_PORT, volume)
+ (host, cfs_paths.CLOUDFSD_PORT, volume)
mydict = json.load(urllib2.urlopen(url))
self.cache[host] = mydict
if mydict.has_key(subv):
@@ -34,7 +32,7 @@ if __name__ == "__main__":
(host, volume, username, password, mount) = sys.argv[1:6]
# Fetch the GlusterFS client-side volfile.
- url = "http://%s:%d/%s/fetch" % (host, CLOUDFSD_PORT, volume)
+ url = "http://%s:%d/%s/fetch" % (host, cfs_paths.CLOUDFSD_PORT, volume)
vol_file = urllib2.urlopen(url)
# Load the volfile and clean out some of the crud.
diff --git a/scripts/cfs_paths.py b/scripts/cfs_paths.py
new file mode 100644
index 0000000..27e11b0
--- /dev/null
+++ b/scripts/cfs_paths.py
@@ -0,0 +1,13 @@
+
+import re
+import os
+
+gfs_dir = "/var/lib/glusterd"
+info_dir = "/var/lib/cloudfs"
+idle_subdir = os.path.join(info_dir,".idle_ports")
+used_subdir = os.path.join(info_dir,".used_ports")
+log_dir = "/var/log/cloudfs"
+pid_dir = "/var/run/cloudfs"
+volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
+CLOUDFSD_PORT = 8080
+
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
new file mode 100644
index 0000000..989bff9
--- /dev/null
+++ b/scripts/cfs_start_volume.py
@@ -0,0 +1,184 @@
+
+import glob
+import os
+import re
+import socket
+import string
+import subprocess
+import sys
+import urllib2
+
+from bottle import template
+
+import volfilter
+import cfs_paths
+import cfs_utils
+
+# Make sure the volume directory exists and has the right stuff in it.
+def check_volume_directory(vol_name):
+ if not os.path.exists(cfs_paths.info_dir):
+ os.mkdir(cfs_paths.info_dir)
+ user_file = open("%s/%s" % (cfs_paths.info_dir, "default_users"), "w")
+ # TBD: big gaping security hole until other code can deal
+ # with having zero users defined.
+ user_file.write("alice password1\nbob password2\n")
+ user_file.flush()
+ user_file.close()
+ os.mkdir(cfs_paths.idle_subdir)
+ for i in range(24010, 24030):
+ fp = open("%s/%d" % (cfs_paths.idle_subdir, i), "w")
+ fp.close()
+ os.mkdir(cfs_paths.used_subdir)
+
+ vol_dir = "%s/%s" % (cfs_paths.info_dir, vol_name)
+ if not os.path.exists(vol_dir):
+ os.mkdir(vol_dir)
+ return vol_dir
+
+# Figure out which of the GlusterFS volfiles belong to us. Volfiles might have
+# names based on partial host names, fully qualified names, or addresses, or
+# even a mix thanks to "gluster peer probe" silliness. To deal with all of
+# these possibilities, we resolve everything to addresses and compare those.
+# ### bear in mind that depending on how a machine is set up, the IP addrs
+# ### for a node might include 127.0.0.1 and ::1 first
+def scan_gfs_volfiles(vol_name):
+ ret = ""
+ my_name = os.uname()[1]
+ # Getaddrinfo returns a list of tuples, each:
+ # family, socktype, proto, canonname, sockaddr
+ # We extract the sockaddr of the first item, and the IP addr from that
+ # TBD: handle IPv6, multi-homed hosts, etc.
+ # TBD: skip loopback addresses based on note above
+ my_addrs = socket.getaddrinfo(my_name, 0)
+ my_glob = "%s/vols/%s/%s.*.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
+ for vf in glob.iglob(my_glob):
+ m = cfs_paths.volfile_re.match(os.path.basename(vf))
+ if m:
+ this_host = m.groups(1)[0]
+ this_addr = socket.getaddrinfo(this_host, 0)[0][4][0]
+ for addr in my_addrs:
+ if this_addr == addr[4][0]:
+ ret = vf
+ break
+ return ret
+
+# Allocate a port for a server to run on. Right now we do this in a very
+# "clever" way, by creating files to match ports and then grabbing a file here.
+# When we have a real volume database such games will be unnecessary.
+def allocate_port(vol_file):
+ for pf in glob.iglob("%s/*" % cfs_paths.idle_subdir):
+ base = os.path.basename(pf)
+ new_name = "%s/%s" % (cfs_paths.used_subdir, base)
+ os.symlink(vol_file, new_name)
+ os.remove(pf)
+ return base
+ else:
+ raise RuntimeError, "no ports available"
+
+# Parse the user file into a list of [name,password] sub-lists. Since
+# everything that uses this is in Python we could just make it a pickle/shelf
+# or whatever, but it would all go away with a real volume database so it's not
+# worth the trouble to re-do it now.
+def parse_user_file(vol_name):
+ try:
+ user_file = open("%s/%s/users" % (cfs_paths.info_dir, vol_name), "r")
+ except IOError:
+ user_file = open("%s/default_users" % cfs_paths.info_dir, "r")
+
+ users = []
+ for line in user_file.readlines():
+ space = line.find(" ")
+ if space == -1:
+ print >> sys.stderr, "Bad line in userfile: %s" % line
+ users.append([line[:space],line[space+1:-1]])
+ return users
+
+def cloudify_server (input, output, users, port):
+ print "# Cloudifying server %s" % input
+ graph, last = volfilter.load(input)
+ last = volfilter.cleanup(last,graph)
+
+ if last.type != "protocol/server":
+ print >> sys.stderr, "Top translator must be protocol/server"
+ sys.exit(1)
+ old_stack = last.subvols[0]
+
+ bad_opts = []
+ for opt in last.opts.iterkeys():
+ if opt[:9] == "auth.addr":
+ bad_opts.append(opt)
+ elif opt[:10] == "auth.login":
+ bad_opts.append(opt)
+ for opt in bad_opts:
+ print "# stripping auth option %s = %s" % (opt, last.opts[opt])
+ del last.opts[opt]
+
+ last.subvols = []
+ for user, pw in users:
+ new_stack = volfilter.copy_stack(old_stack,user)
+ last.subvols.append(new_stack)
+ last.opts["auth.login.%s.allow"%new_stack.name] = user
+ last.opts["auth.login.%s.password"%new_stack.name] = pw
+
+ last.opts["transport.socket.listen-port"] = port
+ volfilter.generate(graph,last,output)
+
+def create_tenant_dirs(vol_file):
+ cmd = "/bin/grep \"option directory\" %s" % vol_file
+ path = ""
+ opt_dir_lines = os.popen(cmd)
+ for opt_dir_line in opt_dir_lines:
+ tokens = re.split(' ', string.lstrip(opt_dir_line))
+ path = string.rstrip(tokens[2])
+ if not os.path.exists(path):
+ os.mkdir(path)
+ opt_dir_lines.close()
+
+def start_local (vol_name):
+ vol_base = check_volume_directory(vol_name)
+ users = parse_user_file(vol_name)
+ # TBD: deal with more than one brick on the same server
+ vf = scan_gfs_volfiles(vol_name)
+ new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
+ outfile = open(new_vf, "w")
+ port = allocate_port(new_vf)
+ cloudify_server(vf, outfile, users, port)
+ outfile.flush()
+ outfile.close()
+ v_key = string.replace(os.path.basename(new_vf), ".vol", "")
+ # print "v_key: %s" % v_key
+ # make dirs for each of the users
+ create_tenant_dirs(new_vf)
+ # actually start the server
+ # note: pid file in /var/lib/glusterd/vols/<vol_name>/... so that
+ # gluster can find it
+ logfile = os.path.join(cfs_paths.log_dir,"%s.log"%v_key)
+ pidfile = os.path.join(cfs_paths.pid_dir,"%s.pid"%v_key)
+ cmd = "--volfile %s" % new_vf
+ cmd += (" --log-file %s" % logfile)
+ cmd += (" --pid-file %s" % pidfile)
+ cmd += (" --xlator-option %s-server.transport.socket.listen-port=%s" % (
+ vol_name, port))
+ print "whole command = glusterfsd %s" % cmd
+ retcode = cfs_utils.run_cmd("glusterfsd",cmd).wait()
+ return "start_local(%s) returned %d on %s\n" % (
+ vol_name, retcode, socket.gethostname())
+
+def run_www (vol_name):
+ node_list = cfs_utils.get_nodes_for_vol(vol_name)
+ blob = []
+ for node in node_list:
+ url = "http://%s:%d/volumes/%s/start_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ scratch = [node, []]
+ print "opening %s" % url
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ print "done with %s" % url
+ print blob
+ return template("start_done.html",name=vol_name,blob=blob)
+
+if __name__ == "__main__":
+ run_www(sys.argv[1])
diff --git a/scripts/cfs_stop_volume.py b/scripts/cfs_stop_volume.py
new file mode 100644
index 0000000..ebfa47a
--- /dev/null
+++ b/scripts/cfs_stop_volume.py
@@ -0,0 +1,62 @@
+
+import fileinput
+import glob
+import os
+import socket
+import subprocess
+import sys
+import urllib2
+
+from bottle import template
+
+import cfs_paths
+import cfs_utils
+
+def kill_daemon (vol_name):
+ myglob = os.path.join(cfs_paths.pid_dir,"%s.*"%vol_name)
+ for f in glob.iglob(myglob):
+ fp = open(f,"r")
+ pid = fp.read()[:-1]
+ print "killing %s" % pid
+ cfs_utils.run_cmd("kill",pid).wait()
+ break
+ return 0
+
+def recycle_port (path):
+ print "recycling %s" % path
+ port_num = os.path.basename(path)
+ os.unlink(path)
+ fp = open("%s/%s" % (cfs_paths.idle_subdir, port_num), "w")
+ fp.close()
+
+def stop_local (vol_name):
+ retcode = kill_daemon(vol_name)
+ for symlink in glob.glob(cfs_paths.used_subdir + "/*"):
+ vol_link = os.readlink(symlink)
+ vol = os.path.basename(vol_link)
+ tokens = vol.split(".")
+ if vol_name == tokens[0]:
+ recycle_port(symlink)
+ break
+ return "stop_local(%s) returned %d on %s\n" % (
+ vol_name, retcode, socket.gethostname())
+
+
+def run_www (vol_name):
+ node_list = cfs_utils.get_nodes_for_vol(vol_name)
+ blob = []
+ for node in node_list:
+ url = "http://%s:%d/volumes/%s/stop_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ scratch = [node, []]
+ print "opening %s" % url
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ print "done with %s" % url
+ print blob
+ return template("stop_done.html",name=vol_name,blob=blob)
+
+if __name__ == "__main__":
+ run_www(sys.argv[1])
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index 09e5ace..fd981cd 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -1,7 +1,10 @@
+import glob
import os
import socket
import subprocess
+import cfs_paths
+
# The list of filesystems that work is much shorter than the list of
# filesystems that don't support xattrs, already-remote filesystems (e.g. NFS),
# pseudo-filesystems (e.g. devfs) or other things that won't work for one
@@ -116,3 +119,13 @@ def get_mounts (brick_list):
volumes_on_nodes[node_ip] = scratch
return volumes_on_nodes
+# Get the list of nodes that are serving (any part of) a specific volume.
+def get_nodes_for_vol (vol_name):
+ node_list = set()
+ my_glob = "%s/vols/%s/%s.*.vol" % (cfs_paths.gfs_dir,
+ vol_name, vol_name)
+ for vf in glob.iglob(my_glob):
+ m = cfs_paths.volfile_re.match(os.path.basename(vf))
+ if m:
+ node_list.add(m.groups(1)[0])
+ return node_list
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index e510823..15d0a5c 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,32 +1,22 @@
#!/usr/bin/python
-from bottle import route, post, run, view, debug
+from bottle import route, post, run, view, debug, CherryPyServer
import os
import socket
import string
-import paths
-import volstart
-import volstop
+import cfs_paths
import volmap
import cfs_utils
import cfs_add_node
import cfs_add_volume
-
-CLOUDFSD_PORT = 8080
-
-@route("/:vol_name/start")
-def start_server(vol_name):
- volstart.vol_start(vol_name)
-
-@route("/:vol_name/stop")
-def stop_server(vol_name):
- volstop.vol_stop(vol_name)
+import cfs_start_volume
+import cfs_stop_volume
@route("/:vol_name/fetch")
def fetch_client_vf(vol_name):
- vf_path = "%s/vols/%s/%s-fuse.vol" % (paths.gfs_dir, vol_name, vol_name)
+ vf_path = "%s/vols/%s/%s-fuse.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
return open(vf_path,"r")
@route("/:vol_name/map")
@@ -80,6 +70,22 @@ def show_volumes():
def add_volume():
return cfs_add_volume.run_www()
+@route("/volumes/:vol_name/start")
+def start_volume(vol_name):
+ return cfs_start_volume.run_www(vol_name)
+
+@route("/volumes/:vol_name/start_local")
+def start_local (vol_name):
+ return cfs_start_volume.start_local(vol_name)
+
+@route("/volumes/:vol_name/stop")
+def stop_volume(vol_name):
+ return cfs_stop_volume.run_www(vol_name)
+
+@route("/volumes/:vol_name/stop_local")
+def stop_local (vol_name):
+ return cfs_stop_volume.stop_local(vol_name)
+
@route("/wwwaddtenant")
def www_addtenant():
print "www addtenant"
@@ -98,4 +104,5 @@ def get_style (sheet):
if __name__ == "__main__":
debug(True)
- run(host='',port=CLOUDFSD_PORT)
+ run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT,server=CherryPyServer)
+
diff --git a/scripts/paths.py b/scripts/paths.py
deleted file mode 100644
index 51bdafd..0000000
--- a/scripts/paths.py
+++ /dev/null
@@ -1,10 +0,0 @@
-
-import re
-import os
-
-gfs_dir = "/var/lib/glusterd"
-info_dir = "/var/lib/cloudfs"
-idle_subdir = "/var/lib/cloudfs/.idle_ports"
-used_subdir = "/var/lib/cloudfs/.used_ports"
-volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
-
diff --git a/scripts/views/start_done.html b/scripts/views/start_done.html
new file mode 100644
index 0000000..5e35b6c
--- /dev/null
+++ b/scripts/views/start_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Volume {{name}} started.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/stop_done.html b/scripts/views/stop_done.html
new file mode 100644
index 0000000..69616c0
--- /dev/null
+++ b/scripts/views/stop_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Volume {{name}} stopped.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
index 11c964c..f4d7436 100644
--- a/scripts/views/volumes.html
+++ b/scripts/views/volumes.html
@@ -11,11 +11,14 @@
<div class="content">
<h2>Existing Volumes:</h2>
%for vol_name, brick_list in bricks.iteritems():
- <p>{{vol_name}}
+ <p><b>{{vol_name}}</b>
+ <a href="/volumes/{{vol_name}}/start">start</a>
+ <a href="/volumes/{{vol_name}}/stop">stop</a></p>
+ <ul>
%for brick in brick_list:
- <br /> {{brick}}
+ <li>{{brick}}</li>
%end
- </p>
+ </ul>
%end
<hr>
<h2>Provision a Volume From Available Bricks:</h2>
diff --git a/scripts/volmap.py b/scripts/volmap.py
index 92b77e9..f5a81b2 100644
--- a/scripts/volmap.py
+++ b/scripts/volmap.py
@@ -4,13 +4,13 @@ import os
import re
import volfilter
-import paths
+import cfs_paths
def vol_map (vol_name):
- vol_dir = "%s/%s" % (paths.info_dir, vol_name)
+ vol_dir = "%s/%s" % (cfs_paths.info_dir, vol_name)
vol_re = re.compile(vol_dir+"/")
map = {}
- for link in glob.iglob("%s/*" % paths.used_subdir):
+ for link in glob.iglob("%s/*" % cfs_paths.used_subdir):
real_file = os.readlink(link)
m = vol_re.match(real_file)
if not m:
diff --git a/scripts/volstart.py b/scripts/volstart.py
deleted file mode 100644
index 3c699f7..0000000
--- a/scripts/volstart.py
+++ /dev/null
@@ -1,173 +0,0 @@
-
-import glob
-import os
-import re
-import socket
-import string
-import subprocess
-import sys
-
-import volfilter
-import paths
-
-# Make sure the volume directory exists and has the right stuff in it.
-def check_volume_directory(vol_name):
- if not os.path.exists(paths.info_dir):
- os.mkdir(paths.info_dir)
- user_file = open("%s/%s" % (paths.info_dir, "default_users"), "w")
- # TBD: big gaping security hole until other code can deal
- # with having zero users defined.
- user_file.write("alice password1\nbob password2\n")
- user_file.flush()
- user_file.close()
- os.mkdir(paths.idle_subdir)
- for i in range(24010, 24030):
- fp = open("%s/%d" % (paths.idle_subdir, i), "w")
- fp.close()
- os.mkdir(paths.used_subdir)
-
- vol_dir = "%s/%s" % (paths.info_dir, vol_name)
- if not os.path.exists(vol_dir):
- os.mkdir(vol_dir)
- return vol_dir
-
-# Figure out which of the GlusterFS volfiles belong to us. Volfiles might have
-# names based on partial host names, fully qualified names, or addresses, or
-# even a mix thanks to "gluster peer probe" silliness. To deal with all of
-# these possibilities, we resolve everything to addresses and compare those.
-# ### bear in mind that depending on how a machine is set up, the IP addrs
-# ### for a node might include 127.0.0.1 and ::1 first
-def scan_gfs_volfiles(vol_name):
- ret = ""
- my_name = os.uname()[1]
- # Getaddrinfo returns a list of tuples, each:
- # family, socktype, proto, canonname, sockaddr
- # We extract the sockaddr of the first item, and the IP addr from that
- # TBD: handle IPv6, multi-homed hosts, etc.
- # TBD: skip loopback addresses based on note above
- my_addrs = socket.getaddrinfo(my_name, 0)
- my_glob = "%s/vols/%s/%s.*.vol" % (paths.gfs_dir, vol_name, vol_name)
- for vf in glob.iglob(my_glob):
- m = paths.volfile_re.match(os.path.basename(vf))
- if m:
- this_host = m.groups(1)[0]
- this_addr = socket.getaddrinfo(this_host, 0)[0][4][0]
- for addr in my_addrs:
- if this_addr == addr[4][0]:
- ret = vf
- break
- return ret
-
-# Allocate a port for a server to run on. Right now we do this in a very
-# "clever" way, by creating files to match ports and then grabbing a file here.
-# When we have a real volume database such games will be unnecessary.
-def allocate_port(vol_file):
- for pf in glob.iglob("%s/*" % paths.idle_subdir):
- base = os.path.basename(pf)
- new_name = "%s/%s" % (paths.used_subdir, base)
- os.symlink(vol_file, new_name)
- os.remove(pf)
- return base
- else:
- raise RuntimeError, "no ports available"
-
-# Parse the user file into a list of [name,password] sub-lists. Since
-# everything that uses this is in Python we could just make it a pickle/shelf
-# or whatever, but it would all go away with a real volume database so it's not
-# worth the trouble to re-do it now.
-def parse_user_file(vol_name):
- try:
- user_file = open("%s/%s/users" % (paths.info_dir, vol_name), "r")
- except IOError:
- user_file = open("%s/default_users" % paths.info_dir, "r")
-
- users = []
- for line in user_file.readlines():
- space = line.find(" ")
- if space == -1:
- print >> sys.stderr, "Bad line in userfile: %s" % line
- users.append([line[:space],line[space+1:-1]])
- return users
-
-# Convert a single GlusterFS server volfile to its CloudFS form, with one
-# translator stack per tenant and "evil" translators stripped out. Some day
-# this will also involve adding translators (e.g. UID mapping) at the top of
-# each stack.
-def cloudify_volfile(input, output, users, port):
- graph, last = volfilter.load(input)
- last = volfilter.cleanup(last, graph)
-
- if last.type != "protocol/server":
- print >> sys.stderr, "Top translator must be protocol/server"
- sys.exit(1)
- old_stack = last.subvols[0]
-
- bad_opts = []
- for opt in last.opts.iterkeys():
- if opt[:9] == "auth.addr":
- bad_opts.append(opt)
- elif opt[:10] == "auth.login":
- bad_opts.append(opt)
- for opt in bad_opts:
- print "# stripping auth option %s = %s" % (opt, last.opts[opt])
- del last.opts[opt]
-
- last.subvols = []
- for user, pw in users:
- new_stack = volfilter.copy_stack(old_stack, user)
- last.subvols.append(new_stack)
- last.opts["auth.login.%s.allow" % new_stack.name] = user
- last.opts["auth.login.%s.password" % new_stack.name] = pw
-
- last.opts["transport.socket.listen-port"] = port
- volfilter.generate(graph, last, output)
-
-def create_tenant_dirs(vol_file):
- cmd = "/bin/grep \"option directory\" %s" % vol_file
- path = ""
- opt_dir_lines = os.popen(cmd)
- for opt_dir_line in opt_dir_lines:
- tokens = re.split(' ', string.lstrip(opt_dir_line))
- path = string.rstrip(tokens[2])
- if not os.path.exists(path):
- os.mkdir(path)
- opt_dir_lines.close()
- junkdir = os.path.dirname(path) + "/junk"
- if not os.path.exists(junkdir):
- os.mkdir(junkdir)
-
-def vol_start(vol_name):
- vol_base = check_volume_directory(vol_name)
- users = parse_user_file(vol_name)
- # TBD: deal with more than one brick on the same server
- vf = scan_gfs_volfiles(vol_name)
- new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
- outfile = open(new_vf, "w")
- port = allocate_port(new_vf)
- cloudify_volfile(vf, outfile, users, port)
- outfile.flush()
- outfile.close()
- v_key = string.replace(os.path.basename(new_vf), ".vol", "")
- # print "v_key: %s" % v_key
- # make dirs for each of the users
- create_tenant_dirs(new_vf)
- # actually start the server
- # note: pid file in /var/lib/glusterd/vols/<vol_name>/... so that
- # gluster can find it
- cmd = "/usr/sbin/glusterfsd --volfile=%s --xlator-option %s-server.listen-port=%s --pid-file=/var/lib/glusterd/vols/%s/run/%s.pid --socket-file=/tmp/%s.socket --log-file=/var/log/glusterfs/bricks/%s.log" % (new_vf, vol_name, port, vol_name, v_key, v_key, vol_name)
- # before 3.1.4 there were --brick-name and --brick-port for use by
- # the gluster port mapper. These were secret/hidden cmdline options.
- try:
- p = subprocess.Popen(cmd, close_fds=True, shell=True)
- retcode = os.waitpid(p.pid, 0)[1]
- if retcode < 0:
- print >>sys.stderr, "killed ", -retcode
- except ValueError, v:
- print >>sys.stderr, "value error: ", v
- except OSError, o:
- print >>sys.stderr, "os error: ", o
- except NameError, n:
- print >>sys.stderr, "name error: ", n
- except:
- print >>sys.stderr, "unknown error: ", sys.exc_info()[0]
-
diff --git a/scripts/volstop.py b/scripts/volstop.py
deleted file mode 100644
index 068baeb..0000000
--- a/scripts/volstop.py
+++ /dev/null
@@ -1,45 +0,0 @@
-
-import fileinput
-import glob
-import os
-import subprocess
-import sys
-
-import paths
-
-def kill_daemon(vol_name, vol_id):
- cooked = vol_id.rsplit(".", 1)
- pid_file_name = paths.gfs_dir + "/vols/" + vol_name + "/run/" + cooked[0] + ".pid"
- for pid in fileinput.input(pid_file_name):
- cmd = "/bin/kill " + pid
- try:
- p = subprocess.Popen(cmd, close_fds=True, shell=True);
- retcode = os.waitpid(p.pid, 0)[1]
- if retcode < 0:
- print >>sys.stderr, "killed ", -retcode
- except ValueError, v:
- print >>sys.stderr, "value error: ", v
- except OSError, o:
- print >>sys.stderr, "os error: ", o
- except NameError, n:
- print >>sys.stderr, "name error: ", n
- except:
- print >>sys.stderr, "unknown error: ", sys.exc_info()[0]
- fileinput.close()
-
-def recycle_port(path):
- port_num = os.path.basename(path)
- os.unlink(path)
- fp = open("%s/%s" % (paths.idle_subdir, port_num), "w")
- fp.close()
-
-def vol_stop(vol_name):
- for symlink in glob.glob(paths.used_subdir + "/*"):
- vol_link = os.readlink(symlink)
- vol = os.path.basename(vol_link)
- tokens = vol.split(".")
- if vol_name == tokens[0]:
- kill_daemon(vol_name, vol)
- recycle_port(symlink)
- break
-