[RFC] oplock with a guarantee of progress (description of the primitive) - take 2
by Edward Shishkin
fixed bad tabs/whitespaces stuff
/*
* Oplock translator:
*
* Serialize rmw requests submission with a quarantee
* of progress.
* Maintain per-inode queues of submission requests;
*
*
* Glossary:
*
* Read-modify-write request (rmw-request)
*
* is a write request, which contains extra-data
* to write. Such requests are specific to atoimic
* cipher modes.
* Submitting an rmw request can make other pending
* rmw-requests out-of-date, and it will lead to loss
* of operations. In order to prevent this we serialize
* rmw-requests submission and make sure that every
* request which is going to be submitted contains
* uptodate information.
*
* Request for submission (sub-request)
*
* An element in inode's rmw_queue. A sub-request is
* issued for every new rmw-request in the case when
* the file is "busy" (i.e. the rmw_queue is not empty).
* In this case a sub-request with a unique serial number
* is issued and put to the rmw-queue. With this number
* the process will uptodate and try to re-submit the
* rmw request later.
*
*
*
* struct _inode {
* ...
*
* gf_lock_t rmwq_lock; /* protects the rmw-queue */
* rmw_queue_head_t head; /* queue of submission requests */
* uint64_t generation; /* last allocated serial number */
* }
*
* lock_queue(inode) is LOCK(&inode->rmwq_lock)
* unlock_queue(inode) is UNLOCK(&inode->rmwq_lock)
*/
/*
* oplock_witev():
*
* Pre-condition: a read-modify-write request rmw_req
* has been received.
*
* Check if we can proceed with this.
*
* If we can not, then allocate a serial number and issue
* a sub-request if the rmw-req is new (i.e. it doesn't
* have a serial number and we didn't try to submit this
* request before). Than return error to re-submit this
* later.
*/
oplock_witev()
{
lock_queue(inode);
if (the inode's rmw_queue is empty) {
empty:
/*
* submit this rmw_req, but first
* we need to "occupy the queue
*/
allocate a serial number;
create a sub-request and insert it to the queue";
goto submit;
}
else {
if (is_new_request(rmw_req)) {
/*
* queue is not empty, so we are not
* allowed to proceed (there are
* requests with higher priorities
*/
allocate a serial number,
create a sub-request and insert it to the queue;
/*
* refuse to proceed, will be
* restarted with this serial
* number later
*/
goto refuse;
}
/*
* queue is not empty and rmw_req is
* not new (has serial number)
*/
next:
find the next sub_req in the queue;
if (sub_req is too old, timeout is over) {
/* the owner has died */
remove sub_req;
if (rmw_queue is empty)
goto empty;
else
goto next;
}
if (serial_number_of(rmw_req) != serial_number_of(next_sub_req){
/*
* this is not our turn to submit
*/
refuse:
unlock_queue(inode);
/*
* refuse to proceed,
* will be re-submitted
*/
op_errno = EBUSY;
STACK_UNWIND();
return;
}
submit:
/*
* our turn to submit,
* allow to proceed
*/
unlock_queue(inode);
STACK_WIND();
return;
}
}
/*
* this is called only if we
* were allowed to submit.
*/
oplock_writew_cbk()
{
if (op_ret > = 0) {
/*
* it was our turn to submit, and our
* rmw-request has been successfully
* submitted
*/
lock_queue(inode);
remove the sub-request from the queue;
unlock_queue(inode);
}
else {
/*
* it was our turn to submit, but error
* happened in other layers, so don't remove
* sub-request, will retry.
*/
STACK_UNWIND();
}
}
12 years, 3 months
[RFC] oplock with a guarantee of progress (description of the primitive)
by Edward Shishkin
/*
* Oplock translator:
*
* Serialize rmw requests submission with a quarantee
* of progress.
* Maintain per-inode queues of submission requests;
*
*
* Glossary:
*
* Read-modify-write request (rmw-request)
*
* is a write request, which contains extra-data
* to write. Such requests are specific to atomic
* cipher modes.
* Submitting an rmw request can make other pending
* rmw-requests out-of-date, and it will lead to loss
* of operations. In order to prevent this we serialize
* rmw-requests submission and make sure that every
* request which is going to be submitted contains
* uptodate information.
*
* Request for submission (sub-request)
*
* An element in inode's rmw_queue. A sub-request is
* issued for every new rmw-request in the case when
* the file is "busy" (i.e. the rmw_queue is not empty).
* In this case a sub-request with a unique serial number
* is issued and put to the rmw-queue. With this number
* the process will uptodate and try to re-submit the
* rmw request later.
*
*
*
* struct _inode {
* ...
*
* gf_lock_t rmwq_lock; /* protects the rmw-queue */
* rmw_queue_head_t head; /* queue of submission requests */
* uint64_t generation; /* last allocated serial number */
* }
*
* lock_queue(inode) is LOCK(&inode->rmwq_lock)
* unlock_queue(inode) is UNLOCK(&inode->rmwq_lock)
*/
/*
* oplock_witev():
*
* Pre-condition: a read-modify-write request rmw_req
* has been received.
*
* Check if we can proceed with this.
*
* If we can not, then allocate a serial number and issue
* a sub-request if the rmw-req is new (i.e. it doesn't
* have a serial number and we didn't try to submit this
* request before). Than return error to re-submit this
* later.
*/
oplock_witev()
{
lock_queue(inode);
if (the inode's rmw_queue is empty) {
empty:
/*
* submit this rmw_req, but first
* we need to "occupy the queue"
*/
allocate a serial number;
create a sub-request and insert it to the queue";
goto submit;
}
else {
if (is_new_request(rmw_req)) {
/*
* queue is not empty, so we are not
* allowed to proceed (there are
* requests with higher priorities.
*/
allocate a serial number,
create a sub-request and insert it to the queue;
/*
* refuse to proceed, will be
* restarted with this serial
* number later
*/
goto refuse;
}
/* queue is not empty and rmw_req is
* not new (has serial number)
*/
next:
find the next sub_req in the queue;
if (sub_req is too old, timeout is over) {
/* the owner has died */
remove sub_req;
if (rmw_queue is empty)
goto empty;
else
goto next;
}
if (serial_number_of(rmw_req) != serial_number_of(next_sub_req){
/*
* this is not our turn to submit
*/
refuse:
unlock_queue(inode);
/*
* refuse to proceed,
* will be re-submitted
*/
op_errno = EBUSY;
STACK_UNWIND();
return;
}
submit:
/*
* our turn to submit,
* allow to proceed
*/
unlock_queue(inode);
STACK_WIND();
return;
}
}
/*
* this is called only if we
* were allowed to submit.
*/
oplock_writew_cbk()
{
if (op_ret > = 0) {
/*
* it was our turn to submit, and our
* rmw-request has been successfully
* submitted
*/
lock_queue(inode);
remove the sub-request from the queue;
unlock_queue(inode);
}
else {
/*
* it was our turn to submit, but error
* happened in other layers, so don't remove
* sub-request, will retry.
*/
STACK_UNWIND();
}
}
12 years, 3 months
Transparent encryption in Cloudfs (crypt.c, crypt.h)
by Edward Shishkin
Hello everyone.
This is only for review/comments.
Common comments:
Format of counters is:
. high 64 bits are minor object id (this is
gfid transformed by 64-hash);
. low 64 bits are an offset in a file;
Format of initial vectors:
for OFB mode IV is a counter;
for CFB mode IV is a counter, encrypted with
respective cipher key.
------------------------------------------------------------
For non-atomic cipher modes there is no problem of local data
obsolescence. For such modes the crypt translator is supposed
to work on the client side.
For atomic modes the crypt translator is supposed to work on
the server side instead of oplock translator. The problem of
local data obsolescence is resolved by serialization via special
mutex in the struct _inode that should be introduced instead of
generation counter.
Thanks,
Edward.
12 years, 4 months
More thoughts about Dynamo
by Jeff Darcy
This is mostly a re-statement of things I've said before, but as these
ideas continue to evolve it's worth capturing "snapshots" periodically.
I'll keep doing it until I get some feedback.
Problem #1: the GlusterFS replication ("afr") translator sucks. In
normal operation, it brackets each write with lock and setxattr calls,
resulting in five or more round trips. The effect on performance has
been clearly visible in every test I've run involving synchronous and/or
small-block I/O, to the point where it's simply not acceptable for many
important workloads (e.g. virtual-machine-image storage for RHEV). The
locking is also problematic from a scalability and fault-recovery
standpoint. With regard to fault recovery, it's also a problem that
ensuring full recovery requires a full scan of the entire filesystem,
which takes time proportional to the number of files.
Problem #2: the GlusterFS distribution ("dht") isn't so great either.
It fundamentally can't scale that well because it depends on directories
existing on every node. It doesn't handle adding and removing nodes
very gracefully because of the way "layouts" are stored on every
directory when it's created; new nodes won't even be used for *new*
files without an explicit and expensive "rebalance" operation to
regenerate the layouts. The mapping between server vs. global inode
numbers (using the current server count as part of the calculation) can
lead to all sorts of consistency and aliasing problems. The whole
system of lookups (including inefficient broadcast lookups) and
linkfiles and gfids and so on is enormously complicated and has lately
proven impossible to maintain.
Problem #3: the *relationship between* afr and dht precludes all sorts
of interesting and valuable features. For example, allowing different
files or groups of files to use different replica counts, or placing
replicas based on geographic concerns (e.g. different racks), fails
because the configurations are too static and the code that decides
placement (dht) is practically oblivious to the code that handles
multiple replicas once those decisions have been made (afr). Even
handling servers with non-uniform capacities or performance
characteristics is way more painful than it needs to be.
As a result of all this, I think we need a fundamentally different
distribution/replication setup for CloudFS. Here is a rough list of
requirements:
* Better replication performance by doing writes with only one or at
most two round trips in the normal (no-failure case).
* Faster replica recovery, proportional to I/O rate instead of data volume.
* No requirement that directories exist on every server.
* Handle adding and removing nodes more gracefully, with new servers
automatically used for new files as soon as they join the server pool.
* Stable mapping between server-specific and global inode numbers,
regardless of subsequent server-membership changes.
* Support dynamic per-file decisions about number and placement of
replicas, including placement on heterogeneous servers.
The basic structure that I've proposed to do all of this still has
separate distribution and replication translators, but with the
distribution translator more fully "in charge" and creating the
replication translators that it needs instead of having them statically
configured. So, for example, the distribution translator might decide
that a file X should be replicated with one copy on server A and one on
server B. It would therefore create a new replication translator across
A and B, and use that to handle requests for X. A moment later it might
decide that file Y should be replicated onto servers A/G/P, and
dynamically create another replication translator representing that
overlapping set. The subsystem to manage replica-set translators would
be a key component of the distribution translator, not the core
GlusterFS infrastructure. More details about each of these translators
follow.
== Distribution Translator ("dynamo"). This could be based more closely
on Amazon's Dynamo key/value store - hence the name. Servers are each
assigned one or more virtual node IDs, which are used to determine the
ranges in a consistent-hashing ring each for which each is responsible.
More capable nodes (higher capacity or bandwidth) might have more
virtual node IDs, or node IDs covering larger sections of the ring.
Files are looked up by hashing to points on the consistent-hashing ring,
and by searching "around the ring" probing servers at their virtual node
IDs until one returns an answer - no broadcast, no static "layouts"
stored on directories. For replicated files, the next replica will be
found as a "natural" consequence of searching around the ring. New
servers will also "naturally" be found as new files probe them first.
Rebalancing can be done efficiently by assigning new or different
virtual node IDs, and servers "pushing away" files hashing to ranges for
which they're no longer responsible. Linkfiles will still be used, but
strictly as performance-enhancing hints. Inode numbers are permanently
assigned to files when they're created, in a way guaranteed to ensure
their continuing uniqueness. Linkfiles will always carry the version of
the object they point to, and stale linkfiles will always be removed as
soon as they're encountered; neither the presence nor absence of a
linkfile (with staleness being identical to absence) should affect the
final answer of where a file is currently located. There's a lot more,
some of it straight out of the Dynamo papers and some of it in previous
designs or code. For anyone who might want think or claim I'm spending
too much time at academic conferences and not enough time looking at
code, I actually implemented 90% of this well over a year ago and tested
it enough to determine that it actually supports real workloads such as
building and running standard benchmarks. Go look in
~jdarcy/glusterfs/xlators/cluster/swipe on
kibblesnbits.boston.devel.redhat.com if you don't believe me, and let's
not hear any more of that nonsense.
== Replication Translator ("sfr" for Simple File Replication). In
contrast to the current "afr" translator, this could be based on an
operations log or journal. In normal operation, data is simply written
to all N replicas with no additional overhead. The act of writing marks
the affected data region as "dirty" in a log on each server. After the
writes are done, the write is returned to the user and *asynchronously*
calls are made to each server to clear the dirty markers. In case of a
failure, a separate call is made *to each surviving replica* to make the
dirty markers persistent along with time/version information to support
future recovery. Before the failed server can come all the way back up
(i.e. start serving requests) it must contact its peers as a special
client, to retrieve failed-operation information and bring itself up to
date. Yes, there are all sorts of split-brain conditions to worry
about, and edge conditions involving files which have been
removed/renamed/recreated since they were modified, but for the most
part standard replica-repair mechanisms from Dynamo or Coda or in other
relevant systems can be applied.
Once we have a full design that addresses these needs, we can start
thinking about the asynchronous multi-site replication that will form
the basis for the third CloudFS release. Even though that's different
in a lot of ways than sfr's synchronous/local replication, some of the
replica-repair code in both might be usable as common code so it's worth
thinking about that kind of future re-use. Even further out beyond that
are things like Reed-Solomon/erasure codes, automatic tiering between
nodes equipped with SSDs and those equipped with Plain Old Disks,
striping huge directories across servers, etc. There are plenty of
directions to go with this, but for now we still need to focus on 1.0
(multi-tenancy, encryption, management) and 2.0 (improved distribution,
replication, and scale). Hoepfully this will serve as a reference point
for how we're likely to reach those goals.
12 years, 4 months
Branch 'uidmap' - pkg/cloudfs.spec pkg/configure.ac scripts/cloudfs scripts/volfilter.py xlators/features
by Kaleb KEITHLEY
pkg/cloudfs.spec | 4
pkg/configure.ac | 2
scripts/cloudfs | 1
scripts/volfilter.py | 1
xlators/features/uidmap/src/Makefile.am | 1
xlators/features/uidmap/src/uidmap.c | 2570 +++++++++++++++++++++++++++++++-
xlators/features/uidmap/src/uidmap.h | 17
7 files changed, 2507 insertions(+), 89 deletions(-)
New commits:
commit f3416443abb324d7aef5b45fc28b47c9954ddb64
Author: Kaleb S. KEITHLEY <kkeithle(a)cloudfs-node01.kkeithle.redhat.com>
Date: Thu May 19 15:01:17 2011 -0400
xlator to map tenant uids and gids from the client to unique uids and
gids on the server.
No attempt is made to avoid collisions between server-side uids and
gids among muliple tenants. To avoid collisions the per-tenant xlator
configuration may specify ranges that can not collide. (In fact that
seems like a good enhancement for the cloudfs python script that creates
the gluster volume config file.)
uids and gids are mapped for every fop, on the premise that we do not
(and probably cannot) know what down-stream xlators might do. Thus they
are mapped, early, so that any effects on the fop resulting from, e.g.
SELinux, will be performed with the nominally correct uid and gid.
The uidmap xlator has a simplistic map built in as the default,
implemented with a linear array that is malloced/realloced when new
mappings are created. This default mapper can be over-ridden by configuring
a smarter/faster mapper. And in fact one of the next tasks will be to
provide a tree-based mapper. Watch this space.
diff --git a/pkg/cloudfs.spec b/pkg/cloudfs.spec
index 6e5d336..503e99d 100644
--- a/pkg/cloudfs.spec
+++ b/pkg/cloudfs.spec
@@ -17,10 +17,10 @@ URL: http://cloudfs.org
Source0: http://cloudfs.org/dist/0.5/cloudfs-0.5.tgz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
-Requires: glusterfs = 3.1.4
+Requires: glusterfs = 3.1.3git
Requires: openssl
Requires: python
-BuildRequires: glusterfs-devel = 3.1.4
+BuildRequires: glusterfs-devel = 3.1.3git
BuildRequires: bison flex
BuildRequires: gcc make
BuildRequires: openssl-devel
diff --git a/pkg/configure.ac b/pkg/configure.ac
index f4d940d..c185be5 100644
--- a/pkg/configure.ac
+++ b/pkg/configure.ac
@@ -65,7 +65,7 @@ if test "x${have_spinlock}" = "xyes"; then
AC_DEFINE(HAVE_SPINLOCK, 1, [define if found spinlock])
fi
-GLUSTER_VERSION=3.1.4git
+GLUSTER_VERSION=3.1.3git
GF_HOST_OS=""
GF_LDFLAGS="-rdynamic"
GF_HOST_OS="GF_LINUX_HOST_OS"
diff --git a/scripts/cloudfs b/scripts/cloudfs
index ed521f1..7add059 100755
--- a/scripts/cloudfs
+++ b/scripts/cloudfs
@@ -100,6 +100,7 @@ def cloudify_server (volfile, users):
for user, pw in users:
new_stack = copy_stack(last.subvols[0],user)
volfilter.push_filter(graph,new_stack,"features/oplock")
+ volfilter.push_filter(graph,new_stack,"features/uidmap")
new_stack.name = user
subvols.append(new_stack)
diff --git a/scripts/volfilter.py b/scripts/volfilter.py
index 1b61595..b68611f 100755
--- a/scripts/volfilter.py
+++ b/scripts/volfilter.py
@@ -32,6 +32,7 @@ good_xlators = [
"features/access-control",
"features/locks",
"features/marker",
+ "features/uidmap",
"performance/io-threads",
"protocol/client",
"protocol/server",
diff --git a/xlators/features/uidmap/src/Makefile.am b/xlators/features/uidmap/src/Makefile.am
index daba512..f603b77 100644
--- a/xlators/features/uidmap/src/Makefile.am
+++ b/xlators/features/uidmap/src/Makefile.am
@@ -1,3 +1,4 @@
+
xlator_LTLIBRARIES = uidmap.la
xlatordir = $(libdir)/glusterfs/$(GLUSTER_VERSION)/xlator/features
diff --git a/xlators/features/uidmap/src/uidmap.c b/xlators/features/uidmap/src/uidmap.c
index 9cab7df..3cd86c7 100644
--- a/xlators/features/uidmap/src/uidmap.c
+++ b/xlators/features/uidmap/src/uidmap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011 Red Hat, Inc.
+ * Copyright © 2011 Red Hat, Inc.
*
* This file is part of CloudFS.
*
@@ -15,112 +15,2530 @@
*
* You should have received a copy of the GNU Affero General Public License *
* along with CloudFS. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <ctype.h>
-#include <sys/uio.h>
+*/
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
+/**
+ * xlators/cluster/uidmap :
+ */
+
+#include <time.h>
+#include <dlfcn.h>
+#include <sys/file.h>
#include "glusterfs.h"
+#include "stack.h"
#include "xlator.h"
-#include "logging.h"
-#include "server/server.h"
-#include "server/server-helpers.h"
-
+#include "common-utils.h"
#include "uidmap.h"
-int32_t
-uidmap_open_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, fd_t *fd)
+static uid_t uidmap_low_uid = 10000;
+static uid_t uidmap_hi_uid = 20000;
+static gid_t uidmap_low_gid = 10000;
+static gid_t uidmap_hi_gid = 20000;
+static short uidmap_root_squash = 1;
+static char *uidmap_tenant_override = NULL;
+
+#define CLOUDFS_NOBODY 65534
+
+typedef struct uid_map_entry {
+ uid_t me_client_uid;
+ uid_t me_server_uid;
+} uid_map_entry_t;
+
+typedef struct gid_map_entry {
+ uid_t me_client_gid;
+ uid_t me_server_gid;
+} gid_map_entry_t;
+
+typedef struct id_mapping {
+ uid_map_entry_t *im_uid_map;
+ unsigned short im_uid_map_len;
+ uid_t im_uid_low;
+ uid_t im_uid_high;
+ uid_t im_uid_next;
+ gid_map_entry_t *im_gid_map;
+ unsigned short im_gid_map_len;
+ gid_t im_gid_low;
+ gid_t im_gid_high;
+ gid_t im_gid_next;
+
+ pthread_mutex_t im_mtx;
+} id_mapping_t;
+
+static id_mapping_t uidmap_mapping = {
+ .im_uid_map = NULL,
+ .im_uid_map_len = 0,
+ .im_uid_low = 10000,
+ .im_uid_high = 20000,
+ .im_uid_next = 10000,
+ .im_gid_map = NULL,
+ .im_gid_map_len = 0,
+ .im_gid_low = 10000,
+ .im_gid_high = 20000,
+ .im_gid_next = 10000,
+ .im_mtx = PTHREAD_MUTEX_INITIALIZER
+};
+
+#define CFS_SIGNATURE "cloudfs map: default\n"
+#define CFS_UID_MAP_ENTRY "uid_map_entry:"
+#define CFS_UID_MAP_ENTRY_LEN 13
+#define CFS_UID_LOW "uid_low:"
+#define CFS_UID_LOW_LEN 8
+#define CFS_UID_HIGH "uid_high:"
+#define CFS_UID_HIGH_LEN 9
+#define CFS_UID_NEXT "uid_next:"
+#define CFS_UID_NEXT_LEN 9
+#define CFS_GID_MAP_ENTRY "gid_map_entry:"
+#define CFS_GID_MAP_ENTRY_LEN 13
+#define CFS_GID_LOW "gid_low:"
+#define CFS_GID_LOW_LEN 8
+#define CFS_GID_HIGH "gid_high:"
+#define CFS_GID_HIGH_LEN 9
+#define CFS_GID_NEXT "gid_next:"
+#define CFS_GID_NEXT_LEN 9
+
+
+static void
+uidmap_serialize_default(char *name)
+{
+ char fname[128], lkname[128];
+ (void) snprintf(lkname, sizeof lkname, "/var/lib/cloudfs/map_%s.lck", name);
+ (void) snprintf(fname, sizeof fname, "/var/lib/cloudfs/map_%s", name);
+
+ int lockfd = open(lkname, O_CREAT|O_WRONLY, 0644);
+ if (lockfd != -1) {
+ int status = flock(lockfd, LOCK_EX);
+ if (status != -1) {
+ unsigned short i;
+ FILE *file = fopen(fname, "w");
+ if (file != NULL) {
+ (void) fprintf(file, "cloudfs map: default\n");
+ for (i = 0; i < uidmap_mapping.im_uid_map_len; i++)
+ (void) fprintf(file, "%s: %u %u\n",
+ CFS_UID_MAP_ENTRY,
+ uidmap_mapping.im_uid_map[i].me_client_uid,
+ uidmap_mapping.im_uid_map[i].me_server_uid);
+ (void) fprintf(file, "%s: %u\n", CFS_UID_LOW, uidmap_mapping.im_uid_low);
+ (void) fprintf(file, "%s: %u\n", CFS_UID_HIGH, uidmap_mapping.im_uid_high);
+ (void) fprintf(file, "%s: %u\n", CFS_UID_NEXT, uidmap_mapping.im_uid_next);
+ for (i = 0; i < uidmap_mapping.im_gid_map_len; i++)
+ (void) fprintf(file, "%s: %u %u\n",
+ CFS_GID_MAP_ENTRY,
+ uidmap_mapping.im_gid_map[i].me_client_gid,
+ uidmap_mapping.im_gid_map[i].me_server_gid);
+ (void) fprintf(file, "%s: %u\n", CFS_GID_LOW, uidmap_mapping.im_gid_low);
+ (void) fprintf(file, "%s: %u\n", CFS_GID_HIGH, uidmap_mapping.im_gid_high);
+ (void) fprintf(file, "%s: %u\n", CFS_GID_NEXT, uidmap_mapping.im_gid_next);
+ (void) fclose(file);
+ }
+ (void) flock(lockfd, LOCK_UN);
+ }
+ (void) close(lockfd);
+ }
+}
+
+static int
+uidmap_deserialize_default(char *name)
{
- STACK_UNWIND_STRICT(open,frame,op_ret,op_errno,fd);
+ char fname[128], lkname[128];
+ (void) snprintf(lkname, sizeof lkname, "/var/lib/cloudfs/map_%s.lck", name);
+ (void) snprintf(fname, sizeof fname, "/var/lib/cloudfs/map_%s", name);
+
+ int lockfd = open(lkname, O_CREAT|O_RDONLY, 0644);
+ if (lockfd != -1) {
+ int status = flock(lockfd, LOCK_EX);
+ if (status != -1) {
+ char scratch[128];
+ FILE *file = fopen(fname, "r");
+ if (file != NULL) {
+ char* sts;
+ /* read the first line (signature) of the file) */
+ if ((sts = fgets(scratch, sizeof scratch, file)) != (char *) EOF && sts != NULL) {
+ if (strcmp("cloudfs map: default\n", scratch) == 0) {
+ while ((sts = fgets(scratch, sizeof scratch, file)) != (char *) EOF && sts != NULL) {
+ if (strncmp(scratch, CFS_UID_MAP_ENTRY, CFS_UID_MAP_ENTRY_LEN) == 0) {
+ uid_t server_uid;
+ uid_t client_uid;
+ int num = sscanf(&scratch[CFS_UID_MAP_ENTRY_LEN+1], " %u %u", &client_uid, &server_uid);
+ if (num == 2) {
+ uid_map_entry_t *uid_map_entry = NULL;
+ uidmap_mapping.im_uid_map = REALLOC(uidmap_mapping.im_uid_map, sizeof(uid_map_entry_t) * (uidmap_mapping.im_uid_map_len + 1));
+ if (uidmap_mapping.im_uid_map == NULL)
+ return -1;
+ uid_map_entry = &uidmap_mapping.im_uid_map[uidmap_mapping.im_uid_map_len++];
+ uid_map_entry->me_client_uid = client_uid;
+ uid_map_entry->me_server_uid = server_uid;
+ }
+ } else if (strncmp(scratch, CFS_UID_LOW, CFS_UID_LOW_LEN) == 0) {
+ uidmap_mapping.im_uid_low = (uid_t) strtoul(&scratch[CFS_UID_LOW_LEN+1], NULL, 10);
+ } else if (strncmp(scratch, CFS_UID_HIGH, CFS_UID_HIGH_LEN) == 0) {
+ uidmap_mapping.im_uid_high = (uid_t) strtoul(&scratch[CFS_UID_HIGH_LEN+1], NULL, 10);
+ } else if (strncmp(scratch, CFS_UID_NEXT, CFS_UID_NEXT_LEN) == 0) {
+ uidmap_mapping.im_uid_next = (uid_t) strtoul(&scratch[CFS_UID_NEXT_LEN+1], NULL, 10);
+ } else if (strncmp(scratch, CFS_GID_MAP_ENTRY, CFS_GID_MAP_ENTRY_LEN) == 0) {
+ gid_t server_gid;
+ gid_t client_gid;
+ int num = sscanf(&scratch[CFS_GID_MAP_ENTRY_LEN+1], " %u %u", &client_gid, &server_gid);
+ if (num == 2) {
+ gid_map_entry_t *gid_map_entry = NULL;
+ uidmap_mapping.im_gid_map = REALLOC(uidmap_mapping.im_gid_map, sizeof(gid_map_entry_t) * (uidmap_mapping.im_gid_map_len + 1));
+ if (uidmap_mapping.im_gid_map == NULL)
+ return -1;
+ gid_map_entry = &uidmap_mapping.im_gid_map[uidmap_mapping.im_gid_map_len++];
+ gid_map_entry->me_client_gid = client_gid;
+ gid_map_entry->me_server_gid = server_gid;
+ }
+ } else if (strncmp(scratch, CFS_GID_LOW, CFS_GID_LOW_LEN) == 0) {
+ uidmap_mapping.im_gid_low = (uid_t) strtoul(&scratch[CFS_GID_LOW_LEN+1], NULL, 10);
+ } else if (strncmp(scratch, CFS_GID_HIGH, CFS_GID_HIGH_LEN) == 0) {
+ uidmap_mapping.im_gid_high = (uid_t) strtoul(&scratch[CFS_GID_HIGH_LEN+1], NULL, 10);
+ } else if (strncmp(scratch, CFS_GID_NEXT, CFS_GID_NEXT_LEN) == 0) {
+ uidmap_mapping.im_gid_next = (uid_t) strtoul(&scratch[CFS_GID_NEXT_LEN+1], NULL, 10);
+ }
+ }
+ }
+ }
+ (void) fclose(file);
+ }
+ (void) flock(lockfd, LOCK_UN);
+ }
+ (void) close(lockfd);
+ }
return 0;
}
-int32_t
-uidmap_open (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int32_t flags, fd_t *fd, int32_t wbflags)
+
+static int
+uidmap_map_default(struct _call_stack_t *stack, struct _xlator *xlator)
{
- uidmap_private_t *priv = this->private;
- xlator_t *child = FIRST_CHILD(this);
+ int need_serialize = 0;
- gf_log(this->name,GF_LOG_DEBUG,"mapping tenant %u, uid %u\n",
- priv->tenant, frame->root->uid);
+ if (stack->uid == 0 && uidmap_root_squash) {
+ stack->uid = stack->gid = CLOUDFS_NOBODY;
+ return 0;
+ }
- STACK_WIND(frame, uidmap_open_cbk, child, child->fops->open,
- loc,flags,fd,wbflags);
- return 0;
+ pthread_mutex_lock(&uidmap_mapping.im_mtx);
+ do {
+ /* look for an existing match */
+ char* name = uidmap_tenant_override ? uidmap_tenant_override : xlator->name;
+ uid_map_entry_t *uid_map_entry = NULL;
+ gid_map_entry_t *gid_map_entry = NULL;
+ unsigned short index = 0;
+ for (; index < uidmap_mapping.im_uid_map_len; index++) {
+ if (uidmap_mapping.im_uid_map[index].me_client_uid == stack->uid) {
+
+ uid_map_entry = &uidmap_mapping.im_uid_map[index];
+ break;
+ }
+ }
+
+ /* couldn't find one, make a new one */
+ if (uid_map_entry == NULL) {
+ uid_map_entry_t *tmpmap = REALLOC(uidmap_mapping.im_uid_map, sizeof(uid_map_entry_t) * (uidmap_mapping.im_uid_map_len + 1));
+
+ if (tmpmap == NULL) {
+ pthread_mutex_unlock(&uidmap_mapping.im_mtx);
+ return -1;
+ }
+
+ need_serialize = 1;
+
+ uidmap_mapping.im_uid_map = tmpmap;
+ uid_map_entry = &uidmap_mapping.im_uid_map[uidmap_mapping.im_uid_map_len++];
+ uid_map_entry->me_client_uid = stack->uid;
+ uid_map_entry->me_server_uid = uidmap_mapping.im_uid_next++;
+ gf_log("map uid", GF_LOG_NORMAL,
+ "added new uid mapping for %s %u -> %u",
+ name, stack->uid, uid_map_entry->me_server_uid);
+ }
+
+ stack->uid = uid_map_entry->me_server_uid;
+
+ for (index = 0; index < uidmap_mapping.im_gid_map_len; index++) {
+ if (uidmap_mapping.im_gid_map[index].me_client_gid == stack->gid) {
+
+ gid_map_entry = &uidmap_mapping.im_gid_map[index];
+ break;
+ }
+ }
+
+ /* couldn't find one, make a new one */
+ if (gid_map_entry == NULL) {
+ gid_map_entry_t *tmpmap = REALLOC(uidmap_mapping.im_gid_map, sizeof(gid_map_entry_t) * (uidmap_mapping.im_gid_map_len + 1));
+
+ if (tmpmap == NULL) {
+ pthread_mutex_unlock(&uidmap_mapping.im_mtx);
+ return -1;
+ }
+
+ need_serialize = 1;
+
+ uidmap_mapping.im_gid_map = tmpmap;
+ gid_map_entry = &uidmap_mapping.im_gid_map[uidmap_mapping.im_gid_map_len++];
+ gid_map_entry->me_client_gid = stack->gid;
+ gid_map_entry->me_server_gid = uidmap_mapping.im_gid_next++;
+ gf_log("map gid", GF_LOG_NORMAL,
+ "added new gid mapping for %s %u -> %u",
+ name, stack->gid, gid_map_entry->me_server_gid);
+ }
+
+ stack->gid = gid_map_entry->me_server_gid;
+ } while (0);
+
+ if (need_serialize > 0)
+ uidmap_serialize_default(xlator->name);
+
+ pthread_mutex_unlock(&uidmap_mapping.im_mtx);
+
+ return 0;
}
-int32_t
-init (xlator_t *this)
-{
- uidmap_private_t *priv = NULL;
- char *db = NULL;
- uid_t tenant = 0;
-
- if (!this->children || this->children->next) {
- gf_log ("uidmap", GF_LOG_ERROR,
- "FATAL: uidmap should have exactly one child");
- return -1;
- }
-
- if (!this->parents) {
- gf_log (this->name, GF_LOG_WARNING,
- "dangling volume. check volfile ");
- }
-
- if (dict_get_str(this->options,"database",&db) != 0) {
- gf_log(this->name,GF_LOG_ERROR,"missing option: database");
- return -1;
- }
-
- if (dict_get_uint32(this->options,"tenant",&tenant) != 0) {
- gf_log(this->name,GF_LOG_ERROR,"missing option: tenant");
- return -1;
- }
-
- priv = CALLOC (1, sizeof(uidmap_private_t));
- if (!priv) {
- return -1;
- }
- priv->db_path = gf_strdup(db);
- priv->tenant = tenant;
- this->private = priv;
-
- gf_log ("uidmap", GF_LOG_INFO, "uidmap xlator loaded with db=%s t=%u",
- priv->db_path, priv->tenant);
- return 0;
+
+static void
+uidmap_revmap_default(struct _call_stack_t *stack, uid_t *uid, gid_t *gid)
+{
+ unsigned int index = 0;
+ gf_log("revmap enter", GF_LOG_NORMAL, "revmap %u:%u", *uid, *gid);
+ if (*uid != 0 && *uid != CLOUDFS_NOBODY) {
+ uid_map_entry_t *uid_map_entry = uidmap_mapping.im_uid_map;
+ for (; index < uidmap_mapping.im_uid_map_len; index++, uid_map_entry++) {
+ if (*uid == uid_map_entry->me_server_uid)
+ *uid = uid_map_entry->me_client_uid;
+ }
+ }
+
+ if (*gid != 0 && *gid != CLOUDFS_NOBODY) {
+ gid_map_entry_t *gid_map_entry = uidmap_mapping.im_gid_map;
+ for (index = 0; index < uidmap_mapping.im_gid_map_len; index++, gid_map_entry++) {
+ if (*gid == gid_map_entry->me_server_gid)
+ *gid = gid_map_entry->me_client_gid;
+ }
+ }
+ gf_log("revmap leave", GF_LOG_NORMAL, "revmap returning %u:%u", *uid, *gid);
}
-void
-fini (xlator_t *this)
+static map_fn uidmap_map = NULL;
+static revmap_fn uidmap_revmap = NULL;
+static fini_fn uidmap_plugin_fini = NULL;
+
+
+static char *
+uidmap_stat_to_str(struct iatt *stbuf)
+{
+ char *statstr = NULL;
+ char atime_buf[256] = {0,};
+ char mtime_buf[256] = {0,};
+ char ctime_buf[256] = {0,};
+ int asprint_ret_value = 0;
+ uint64_t ia_time = 0;
+
+ ia_time = stbuf->ia_atime;
+ strftime(atime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = stbuf->ia_mtime;
+ strftime(mtime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = stbuf->ia_ctime;
+ strftime(ctime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ asprint_ret_value = gf_asprintf(&statstr,
+ "ia_ino=%"PRIu64
+ ", st_mode=%o, ia_nlink=%"GF_PRI_NLINK", "
+ "ia_uid=%d, ia_gid=%d, ia_size=%"PRId64", ia_blocks=%"PRId64
+ ", ia_atime=%s, ia_mtime=%s, ia_ctime=%s",
+ stbuf->ia_ino,
+ st_mode_from_ia(stbuf->ia_prot, stbuf->ia_type),
+ stbuf->ia_nlink, stbuf->ia_uid,
+ stbuf->ia_gid, stbuf->ia_size,
+ stbuf->ia_blocks, atime_buf,
+ mtime_buf, ctime_buf);
+
+ if (asprint_ret_value < 0)
+ statstr = NULL;
+
+ return statstr;
+}
+
+
+int
+uidmap_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd,
+ inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent)
+{
+ char *statstr = NULL;
+ char *preparentstr = NULL;
+ char *postparentstr = NULL;
+
+ if (op_ret >= 0) {
+ statstr = uidmap_stat_to_str(buf);
+ preparentstr = uidmap_stat_to_str(preparent);
+ postparentstr = uidmap_stat_to_str(postparent);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, fd=%p, ino=%"PRIu64" "
+ "*stbuf {%s}, *preparent {%s}, *postparent = "
+ "{%s})",
+ frame->root->unique, op_ret, fd, inode->ino,
+ statstr, preparentstr, postparentstr);
+
+ if (statstr)
+ GF_FREE(statstr);
+ if (preparentstr)
+ GF_FREE(preparentstr);
+ if (postparentstr)
+ GF_FREE(postparentstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+
+ STACK_UNWIND_STRICT(create, frame, op_ret, op_errno, fd, inode, buf,
+ preparent, postparent);
+ return 0;
+}
+
+
+int
+uidmap_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd)
+{
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d, *fd=%p)",
+ frame->root->unique, op_ret, op_errno, fd);
+
+ STACK_UNWIND_STRICT(open, frame, op_ret, op_errno, fd);
+ return 0;
+}
+
+
+int
+uidmap_stat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf)
+{
+ uint64_t ia_time = 0;
+ char atime_buf[256];
+ char mtime_buf[256];
+ char ctime_buf[256];
+
+ if (op_ret >= 0) {
+ ia_time = buf->ia_atime;
+ strftime(atime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = buf->ia_mtime;
+ strftime(mtime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = buf->ia_ctime;
+ strftime(ctime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, buf {"
+ "ia_ino=%"PRIu64", st_mode=%o, ia_nlink=%"GF_PRI_NLINK", "
+ "ia_uid=%d, ia_gid=%d, ia_rdev=%"PRIu64", ia_size=%"PRId64
+ ", ia_blksize=%"GF_PRI_BLKSIZE", ia_blocks=%"PRId64", "
+ "ia_atime=%s, ia_mtime=%s, ia_ctime=%s})",
+ frame->root->unique, op_ret, buf->ia_ino,
+ st_mode_from_ia(buf->ia_prot, buf->ia_type),
+ buf->ia_nlink, buf->ia_uid, buf->ia_gid,
+ buf->ia_rdev, buf->ia_size, buf->ia_blksize,
+ buf->ia_blocks, atime_buf, mtime_buf, ctime_buf);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+
+ STACK_UNWIND_STRICT(stat, frame, op_ret, op_errno, buf);
+ return 0;
+}
+
+
+int
+uidmap_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iovec *vector,
+ int32_t count, struct iatt *buf, struct iobref *iobref)
+{
+ uint64_t ia_time = 0;
+ char atime_buf[256];
+ char mtime_buf[256];
+ char ctime_buf[256];
+
+ if (op_ret >= 0) {
+ ia_time = buf->ia_atime;
+ strftime(atime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = buf->ia_mtime;
+ strftime(mtime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = buf->ia_ctime;
+ strftime(ctime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d, *buf {"
+ "ia_ino=%"PRIu64", st_mode=%o, ia_nlink=%"GF_PRI_NLINK", "
+ "ia_uid=%d, ia_gid=%d, ia_rdev=%"PRIu64", "
+ "ia_size=%"PRId64", ia_blksize=%"GF_PRI_BLKSIZE", "
+ "ia_blocks=%"PRId64", ia_atime=%s, ia_mtime=%s, ia_ctime=%s})",
+ frame->root->unique, op_ret, op_errno, buf->ia_ino,
+ st_mode_from_ia(buf->ia_prot, buf->ia_type),
+ buf->ia_nlink, buf->ia_uid, buf->ia_gid,
+ buf->ia_rdev, buf->ia_size, buf->ia_blksize, buf->ia_blocks,
+ atime_buf, mtime_buf, ctime_buf);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+
+ STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, vector, count,
+ buf, iobref);
+ return 0;
+}
+
+
+int
+uidmap_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *prebuf, struct iatt *postbuf)
+{
+ char *preopstr = NULL;
+ char *postopstr = NULL;
+
+ if (op_ret >= 0) {
+ preopstr = uidmap_stat_to_str(prebuf);
+ postopstr = uidmap_stat_to_str(postbuf);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, ino = %"PRIu64
+ ", *prebuf = {%s}, *postbuf = {%s})",
+ frame->root->unique, op_ret, postbuf->ia_ino,
+ preopstr, postopstr);
+
+ if (preopstr)
+ GF_FREE(preopstr);
+
+ if (postopstr)
+ GF_FREE(postopstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ /* (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid); */
+
+ STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, prebuf, postbuf);
+ return 0;
+}
+
+
+
+int
+uidmap_readdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, gf_dirent_t *buf)
+{
+ struct _gf_dirent_t *entry;
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64" :(op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+
+ list_for_each_entry(entry, &buf->list, list) {
+ (*uidmap_revmap)(frame->root, &entry->d_stat.ia_uid, &entry->d_stat.ia_gid);
+ }
+
+ STACK_UNWIND_STRICT(readdir, frame, op_ret, op_errno, buf);
+
+ return 0;
+}
+
+
+int
+uidmap_readdirp_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, gf_dirent_t *buf)
+{
+ struct _gf_dirent_t *entry = NULL;
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64" :(op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+
+ list_for_each_entry(entry, &buf->list, list) {
+ (*uidmap_revmap)(frame->root, &entry->d_stat.ia_uid, &entry->d_stat.ia_gid);
+ }
+
+ STACK_UNWIND_STRICT(readdirp, frame, op_ret, op_errno, buf);
+
+ return 0;
+}
+
+
+int
+uidmap_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *prebuf, struct iatt *postbuf)
+{
+ char *preopstr = NULL;
+ char *postopstr = NULL;
+
+ if (op_ret >= 0) {
+ preopstr = uidmap_stat_to_str(prebuf);
+ postopstr = uidmap_stat_to_str(postbuf);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, ino = %"PRIu64
+ ", *prebuf = {%s}, *postbuf = {%s}",
+ frame->root->unique, op_ret, postbuf->ia_ino,
+ preopstr, postopstr);
+
+ if (preopstr)
+ GF_FREE(preopstr);
+
+ if (postopstr)
+ GF_FREE(postopstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &prebuf->ia_uid, &prebuf->ia_gid);
+ (*uidmap_revmap)(frame->root, &postbuf->ia_uid, &postbuf->ia_gid);
+
+ STACK_UNWIND_STRICT(fsync, frame, op_ret, op_errno, prebuf, postbuf);
+
+ return 0;
+}
+
+
+int
+uidmap_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *statpre, struct iatt *statpost)
+{
+ uint64_t ia_time = 0;
+ char atime_pre[256] = {0,};
+ char mtime_pre[256] = {0,};
+ char ctime_pre[256] = {0,};
+ char atime_post[256] = {0,};
+ char mtime_post[256] = {0,};
+ char ctime_post[256] = {0,};
+
+ if (op_ret >= 0) {
+ ia_time = statpre->ia_atime;
+ strftime(atime_pre, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpre->ia_mtime;
+ strftime(mtime_pre, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpre->ia_ctime;
+ strftime(ctime_pre, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpost->ia_atime;
+ strftime(atime_post, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpost->ia_mtime;
+ strftime(mtime_post, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpost->ia_ctime;
+ strftime(ctime_post, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, *statpre "
+ "{ia_ino=%"PRIu64", st_mode=%o, ia_uid=%d, "
+ "ia_gid=%d, ia_atime=%s, ia_mtime=%s, "
+ "ia_ctime=%s}, *statpost {ia_ino=%"PRIu64", "
+ "st_mode=%o, ia_uid=%d, ia_gid=%d, ia_atime=%s,"
+ " ia_mtime=%s, ia_ctime=%s})",
+ frame->root->unique, op_ret, statpre->ia_ino,
+ st_mode_from_ia(statpre->ia_prot, statpre->ia_type),
+ statpre->ia_uid,
+ statpre->ia_gid, atime_pre, mtime_pre,
+ ctime_pre, statpost->ia_ino,
+ st_mode_from_ia(statpost->ia_prot, statpost->ia_type),
+ statpost->ia_uid, statpost->ia_gid, atime_post,
+ mtime_post, ctime_post);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &statpre->ia_uid, &statpre->ia_gid);
+ (*uidmap_revmap)(frame->root, &statpost->ia_uid, &statpost->ia_gid);
+
+ STACK_UNWIND_STRICT(setattr, frame, op_ret, op_errno, statpre, statpost);
+ return 0;
+}
+
+
+int
+uidmap_fsetattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *statpre, struct iatt *statpost)
+{
+ uint64_t ia_time = 0;
+ char atime_pre[256] = {0,};
+ char mtime_pre[256] = {0,};
+ char ctime_pre[256] = {0,};
+ char atime_post[256] = {0,};
+ char mtime_post[256] = {0,};
+ char ctime_post[256] = {0,};
+
+ if (op_ret >= 0) {
+ ia_time = statpre->ia_atime;
+ strftime(atime_pre, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpre->ia_mtime;
+ strftime(mtime_pre, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpre->ia_ctime;
+ strftime(ctime_pre, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpost->ia_atime;
+ strftime(atime_post, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpost->ia_mtime;
+ strftime(mtime_post, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = statpost->ia_ctime;
+ strftime(ctime_post, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, *statpre "
+ "{ia_ino=%"PRIu64", st_mode=%o, ia_uid=%d, "
+ "ia_gid=%d, ia_atime=%s, ia_mtime=%s, "
+ "ia_ctime=%s}, *statpost {ia_ino=%"PRIu64", "
+ "st_mode=%o, ia_uid=%d, ia_gid=%d, ia_atime=%s,"
+ " ia_mtime=%s, ia_ctime=%s})",
+ frame->root->unique, op_ret, statpre->ia_ino,
+ st_mode_from_ia(statpre->ia_prot, statpre->ia_type),
+ statpre->ia_uid,
+ statpre->ia_gid, atime_pre, mtime_pre,
+ ctime_pre, statpost->ia_ino,
+ st_mode_from_ia(statpost->ia_prot, statpost->ia_type),
+ statpost->ia_uid, statpost->ia_gid, atime_post,
+ mtime_post, ctime_post);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &statpre->ia_uid, &statpre->ia_gid);
+ (*uidmap_revmap)(frame->root, &statpost->ia_uid, &statpost->ia_gid);
+
+ STACK_UNWIND_STRICT(fsetattr, frame, op_ret, op_errno,
+ statpre, statpost);
+ return 0;
+}
+
+
+int
+uidmap_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *preparent, struct iatt *postparent)
+{
+ char *preparentstr = NULL;
+ char *postparentstr = NULL;
+
+ if (op_ret >= 0) {
+ preparentstr = uidmap_stat_to_str(preparent);
+ postparentstr = uidmap_stat_to_str(postparent);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, *preparent = {%s}, "
+ "*postparent = {%s})",
+ frame->root->unique, op_ret, preparentstr,
+ postparentstr);
+
+ if (preparentstr)
+ GF_FREE(preparentstr);
+
+ if (postparentstr)
+ GF_FREE(postparentstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &preparent->ia_uid, &preparent->ia_gid);
+ (*uidmap_revmap)(frame->root, &postparent->ia_uid, &postparent->ia_gid);
+
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno,
+ preparent, postparent);
+ return 0;
+}
+
+
+int
+uidmap_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ struct iatt *preoldparent, struct iatt *postoldparent,
+ struct iatt *prenewparent, struct iatt *postnewparent)
+{
+ char *statstr = NULL;
+ char *preoldparentstr = NULL;
+ char *postoldparentstr = NULL;
+ char *prenewparentstr = NULL;
+ char *postnewparentstr = NULL;
+
+ if (op_ret >= 0) {
+ statstr = uidmap_stat_to_str(buf);
+ preoldparentstr = uidmap_stat_to_str(preoldparent);
+ postoldparentstr = uidmap_stat_to_str(postoldparent);
+
+ prenewparentstr = uidmap_stat_to_str(prenewparent);
+ postnewparentstr = uidmap_stat_to_str(postnewparent);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, *stbuf = {%s}, "
+ "*preoldparent = {%s}, *postoldparent = {%s}"
+ " *prenewparent = {%s}, *postnewparent = {%s})",
+ frame->root->unique, op_ret, statstr,
+ preoldparentstr, postoldparentstr,
+ prenewparentstr, postnewparentstr);
+
+ if (preoldparentstr)
+ GF_FREE(preoldparentstr);
+
+ if (postoldparentstr)
+ GF_FREE(postoldparentstr);
+
+ if (prenewparentstr)
+ GF_FREE(prenewparentstr);
+
+ if (postnewparentstr)
+ GF_FREE(postnewparentstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d, buf {ia_ino=%"PRIu64"})",
+ frame->root->unique, op_ret, op_errno,
+ (buf? buf->ia_ino : 0));
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+
+ STACK_UNWIND_STRICT(rename, frame, op_ret, op_errno, buf,
+ preoldparent, postoldparent,
+ prenewparent, postnewparent);
+ return 0;
+}
+
+
+int
+uidmap_readlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ const char *buf, struct iatt *stbuf)
+{
+ char *statstr = NULL;
+
+ if (op_ret == 0) {
+ statstr = uidmap_stat_to_str(stbuf);
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d, buf=%s, "
+ "stbuf = { %s })",
+ frame->root->unique, op_ret, op_errno, buf,
+ statstr);
+ if (statstr)
+ GF_FREE(statstr);
+ } else
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d",
+ frame->root->unique, op_ret, op_errno);
+
+ (*uidmap_revmap)(frame->root, &stbuf->ia_uid, &stbuf->ia_gid);
+
+ STACK_UNWIND_STRICT(readlink, frame, op_ret, op_errno, buf, stbuf);
+ return 0;
+}
+
+
+int
+uidmap_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ inode_t *inode, struct iatt *buf,
+ dict_t *xattr, struct iatt *postparent)
+{
+ char *statstr = NULL;
+ char *postparentstr = NULL;
+
+ if (op_ret >= 0) {
+ statstr = uidmap_stat_to_str(buf);
+ postparentstr = uidmap_stat_to_str(postparent);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, ino=%"PRIu64", "
+ "*buf {%s}, *postparent {%s}",
+ frame->root->unique, op_ret, inode->ino,
+ statstr, postparentstr);
+
+ if (statstr)
+ GF_FREE(statstr);
+ if (postparentstr)
+ GF_FREE(postparentstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+ (*uidmap_revmap)(frame->root, &postparent->ia_uid, &postparent->ia_gid);
+
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, buf,
+ xattr, postparent);
+ return 0;
+}
+
+
+int
+uidmap_symlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent)
+{
+ char *statstr = NULL;
+ char *preparentstr = NULL;
+ char *postparentstr = NULL;
+
+ if (op_ret >= 0) {
+ statstr = uidmap_stat_to_str(buf);
+ preparentstr = uidmap_stat_to_str(preparent);
+ postparentstr = uidmap_stat_to_str(postparent);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, ino=%"PRIu64", "
+ "*stbuf = {%s}, *preparent = {%s}, "
+ "*postparent = {%s})",
+ frame->root->unique, op_ret, inode->ino,
+ statstr, preparentstr, postparentstr);
+
+ if (statstr)
+ GF_FREE(statstr);
+
+ if (preparentstr)
+ GF_FREE(preparentstr);
+
+ if (postparentstr)
+ GF_FREE(postparentstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+
+ STACK_UNWIND_STRICT(symlink, frame, op_ret, op_errno, inode, buf,
+ preparent, postparent);
+ return 0;
+}
+
+
+int
+uidmap_mknod_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent)
+{
+ char *statstr = NULL;
+ char *preparentstr = NULL;
+ char *postparentstr = NULL;
+
+ if (op_ret >= 0) {
+ statstr = uidmap_stat_to_str(buf);
+ preparentstr = uidmap_stat_to_str(preparent);
+ postparentstr = uidmap_stat_to_str(postparent);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, ino=%"PRIu64", "
+ "*stbuf = {%s}, *preparent = {%s}, "
+ "*postparent = {%s})",
+ frame->root->unique, op_ret, inode->ino,
+ statstr, preparentstr, postparentstr);
+
+ if (statstr)
+ GF_FREE(statstr);
+
+ if (preparentstr)
+ GF_FREE(preparentstr);
+
+ if (postparentstr)
+ GF_FREE(postparentstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+
+ STACK_UNWIND_STRICT(mknod, frame, op_ret, op_errno, inode, buf,
+ preparent, postparent);
+ return 0;
+}
+
+
+int
+uidmap_mkdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent)
+{
+ char *statstr = NULL;
+ char *preparentstr = NULL;
+ char *postparentstr = NULL;
+
+ if (op_ret >= 0) {
+ statstr = uidmap_stat_to_str(buf);
+ preparentstr = uidmap_stat_to_str(preparent);
+ postparentstr = uidmap_stat_to_str(postparent);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, ino = %"PRIu64
+ ", *stbuf = {%s}, *prebuf = {%s}, "
+ "*postbuf = {%s} )",
+ frame->root->unique, op_ret, buf->ia_ino,
+ statstr, preparentstr, postparentstr);
+
+ if (statstr)
+ GF_FREE(statstr);
+
+ if (preparentstr)
+ GF_FREE(preparentstr);
+
+ if (postparentstr)
+ GF_FREE(postparentstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+
+ STACK_UNWIND_STRICT(mkdir, frame, op_ret, op_errno, inode, buf,
+ preparent, postparent);
+ return 0;
+}
+
+
+int
+uidmap_link_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent)
+{
+ char *statstr = NULL;
+ char *preparentstr = NULL;
+ char *postparentstr = NULL;
+
+ if (op_ret >= 0) {
+ statstr = uidmap_stat_to_str(buf);
+ preparentstr = uidmap_stat_to_str(preparent);
+ postparentstr = uidmap_stat_to_str(postparent);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, ino = %"PRIu64
+ ", *stbuf = {%s}, *prebuf = {%s}, "
+ "*postbuf = {%s})",
+ frame->root->unique, op_ret, buf->ia_ino,
+ statstr, preparentstr, postparentstr);
+
+ if (statstr)
+ GF_FREE(statstr);
+
+ if (preparentstr)
+ GF_FREE(preparentstr);
+
+ if (postparentstr)
+ GF_FREE(postparentstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+
+ STACK_UNWIND_STRICT(link, frame, op_ret, op_errno, inode, buf,
+ preparent, postparent);
+ return 0;
+}
+
+
+int
+uidmap_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
{
- uidmap_private_t *priv = this->private;
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(flush, frame, op_ret, op_errno);
+ return 0;
+}
+
+
+int
+uidmap_opendir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d, fd=%p)",
+ frame->root->unique, op_ret, op_errno, fd);
+
+ STACK_UNWIND_STRICT(opendir, frame, op_ret, op_errno, fd);
+ return 0;
+}
+
+
+int
+uidmap_rmdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *preparent, struct iatt *postparent)
+{
+ char *preparentstr = NULL;
+ char *postparentstr = NULL;
+
+ if (op_ret >= 0) {
+ preparentstr = uidmap_stat_to_str(preparent);
+ postparentstr = uidmap_stat_to_str(postparent);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, *prebuf = {%s}, "
+ "*postbuf = {%s}",
+ frame->root->unique, op_ret, preparentstr,
+ postparentstr);
+
+ if (preparentstr)
+ GF_FREE(preparentstr);
+
+ if (postparentstr)
+ GF_FREE(postparentstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &preparent->ia_uid, &preparent->ia_gid);
+ (*uidmap_revmap)(frame->root, &postparent->ia_uid, &postparent->ia_gid);
+
+ STACK_UNWIND_STRICT(rmdir, frame, op_ret, op_errno,
+ preparent, postparent);
+ return 0;
+}
+
+
+int
+uidmap_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *prebuf, struct iatt *postbuf)
+{
+ char *preopstr = NULL;
+ char *postopstr = NULL;
+
+ if (op_ret >= 0) {
+ preopstr = uidmap_stat_to_str(prebuf);
+ postopstr = uidmap_stat_to_str(postbuf);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, *prebuf = {%s}, "
+ "*postbuf = {%s} )",
+ frame->root->unique, op_ret, preopstr,
+ postopstr);
+
+ if (preopstr)
+ GF_FREE(preopstr);
+
+ if (postopstr)
+ GF_FREE(postopstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &prebuf->ia_uid, &prebuf->ia_gid);
+ (*uidmap_revmap)(frame->root, &postbuf->ia_uid, &postbuf->ia_gid);
+
+ STACK_UNWIND_STRICT(truncate, frame, op_ret, op_errno, prebuf, postbuf);
+ return 0;
+}
+
+
+int
+uidmap_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct statvfs *buf)
+{
+ if (op_ret >= 0) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": ({f_bsize=%lu, f_frsize=%lu, f_blocks=%"GF_PRI_FSBLK
+ ", f_bfree=%"GF_PRI_FSBLK", f_bavail=%"GF_PRI_FSBLK", "
+ "f_files=%"GF_PRI_FSBLK", f_ffree=%"GF_PRI_FSBLK", f_favail=%"
+ GF_PRI_FSBLK", f_fsid=%lu, f_flag=%lu, f_namemax=%lu}) => ret=%d",
+ frame->root->unique, buf->f_bsize, buf->f_frsize, buf->f_blocks,
+ buf->f_bfree, buf->f_bavail, buf->f_files, buf->f_ffree,
+ buf->f_favail, buf->f_fsid, buf->f_flag, buf->f_namemax, op_ret);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ STACK_UNWIND_STRICT(statfs, frame, op_ret, op_errno, buf);
+ return 0;
+}
+
+
+int
+uidmap_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(setxattr, frame, op_ret, op_errno);
+ return 0;
+}
+
+
+int
+uidmap_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d, dict=%p)",
+ frame->root->unique, op_ret, op_errno, dict);
+
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, dict);
+
+ return 0;
+}
+
+
+int
+uidmap_removexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(removexattr, frame, op_ret, op_errno);
+
+ return 0;
+}
+
+
+int
+uidmap_fsyncdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(fsyncdir, frame, op_ret, op_errno);
+ return 0;
+}
+
+
+int
+uidmap_access_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(access, frame, op_ret, op_errno);
+ return 0;
+}
+
+
+int
+uidmap_ftruncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *prebuf, struct iatt *postbuf)
+{
+ char *prebufstr = NULL;
+ char *postbufstr = NULL;
+
+ if (op_ret >= 0) {
+ prebufstr = uidmap_stat_to_str(prebuf);
+ postbufstr = uidmap_stat_to_str(postbuf);
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, *prebuf = {%s}, "
+ "*postbuf = {%s} )",
+ frame->root->unique, op_ret,
+ prebufstr, postbufstr);
+
+ if (prebufstr)
+ GF_FREE(prebufstr);
+
+ if (postbufstr)
+ GF_FREE(postbufstr);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &prebuf->ia_uid, &prebuf->ia_gid);
+ (*uidmap_revmap)(frame->root, &postbuf->ia_uid, &postbuf->ia_gid);
+
+ STACK_UNWIND_STRICT(ftruncate, frame, op_ret, op_errno, prebuf, postbuf);
+ return 0;
+}
+
+
+int
+uidmap_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf)
+{
+ uint64_t ia_time = 0;
+ char atime_buf[256] = {0, };
+ char mtime_buf[256] = {0, };
+ char ctime_buf[256] = {0, };
+
+ if (op_ret >= 0) {
+ ia_time = buf->ia_atime;
+ strftime(atime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = buf->ia_mtime;
+ strftime(mtime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = buf->ia_ctime;
+ strftime(ctime_buf, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, *buf {"
+ "ia_ino=%"PRIu64", st_mode=%o, ia_nlink=%"GF_PRI_NLINK", "
+ "ia_uid=%d, ia_gid=%d, ia_rdev=%"PRIu64", ia_size=%"PRId64", "
+ "ia_blksize=%"GF_PRI_BLKSIZE", ia_blocks=%"PRId64", ia_atime=%s, "
+ "ia_mtime=%s, ia_ctime=%s})",
+ frame->root->unique, op_ret, buf->ia_ino,
+ st_mode_from_ia(buf->ia_prot, buf->ia_type),
+ buf->ia_nlink, buf->ia_uid, buf->ia_gid,
+ buf->ia_rdev, buf->ia_size, buf->ia_blksize,
+ buf->ia_blocks, atime_buf, mtime_buf, ctime_buf);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &buf->ia_uid, &buf->ia_gid);
+
+ STACK_UNWIND_STRICT(fstat, frame, op_ret, op_errno, buf);
+ return 0;
+}
+
+
+int
+uidmap_lk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock)
+{
+ uid_t uid;
+ gid_t gid;
+ if (op_ret >= 0) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, {l_type=%d, l_whence=%d, "
+ "l_start=%"PRId64", l_len=%"PRId64", l_pid=%u})",
+ frame->root->unique, op_ret, lock->l_type, lock->l_whence,
+ lock->l_start, lock->l_len, lock->l_pid);
+ } else {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+ }
+
+ (*uidmap_revmap)(frame->root, &uid, &gid);
+ lock->l_owner = uid;
+
+ STACK_UNWIND_STRICT(lk, frame, op_ret, op_errno, lock);
+ return 0;
+}
+
+
+
+int
+uidmap_entrylk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": op_ret=%d, op_errno=%d",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(entrylk, frame, op_ret, op_errno);
+ return 0;
+}
+
+
+int
+uidmap_xattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(xattrop, frame, op_ret, op_errno, dict);
+ return 0;
+}
+
+
+int
+uidmap_fxattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (op_ret=%d, op_errno=%d)",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(fxattrop, frame, op_ret, op_errno, dict);
+ return 0;
+}
+
+
+int
+uidmap_inodelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": op_ret=%d, op_errno=%d",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(inodelk, frame, op_ret, op_errno);
+ return 0;
+}
+
+
+int
+uidmap_finodelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": op_ret=%d, op_errno=%d",
+ frame->root->unique, op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(finodelk, frame, op_ret, op_errno);
+ return 0;
+}
+
+
+int
+uidmap_entrylk(call_frame_t *frame, xlator_t *this,
+ const char *volume, loc_t *loc, const char *basename,
+ entrylk_cmd cmd, entrylk_type type)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": volume=%s, (loc= {path=%s, ino=%"PRIu64"} basename=%s, cmd=%s, type=%s)",
+ frame->root->unique, volume, loc->path, loc->inode->ino, basename,
+ ((cmd == ENTRYLK_LOCK) ? "ENTRYLK_LOCK" : "ENTRYLK_UNLOCK"),
+ ((type == ENTRYLK_RDLCK) ? "ENTRYLK_RDLCK" : "ENTRYLK_WRLCK"));
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_entrylk_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->entrylk,
+ volume, loc, basename, cmd, type);
+ return 0;
+}
+
+
+int
+uidmap_inodelk(call_frame_t *frame, xlator_t *this, const char *volume,
+ loc_t *loc, int32_t cmd, struct gf_flock *flock)
+{
+ char *cmd_str = NULL;
+ char *type_str = NULL;
+
+ switch (cmd) {
+#if F_GETLK != F_GETLK64
+ case F_GETLK64:
+#endif
+ case F_GETLK:
+ cmd_str = "GETLK";
+ break;
+
+#if F_SETLK != F_SETLK64
+ case F_SETLK64:
+#endif
+ case F_SETLK:
+ cmd_str = "SETLK";
+ break;
+
+#if F_SETLKW != F_SETLKW64
+ case F_SETLKW64:
+#endif
+ case F_SETLKW:
+ cmd_str = "SETLKW";
+ break;
+
+ default:
+ cmd_str = "UNKNOWN";
+ break;
+ }
+
+ switch (flock->l_type) {
+ case F_RDLCK:
+ type_str = "READ";
+ break;
+ case F_WRLCK:
+ type_str = "WRITE";
+ break;
+ case F_UNLCK:
+ type_str = "UNLOCK";
+ break;
+ default:
+ type_str = "UNKNOWN";
+ break;
+ }
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": volume=%s, (loc {path=%s, ino=%"PRIu64"}, "
+ "cmd=%s, type=%s, start=%llu, len=%llu, pid=%llu)",
+ frame->root->unique, volume, loc->path, loc->inode->ino,
+ cmd_str, type_str, (unsigned long long) flock->l_start,
+ (unsigned long long) flock->l_len,
+ (unsigned long long) flock->l_pid);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_inodelk_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->inodelk,
+ volume, loc, cmd, flock);
+ return 0;
+}
+
+
+int
+uidmap_finodelk(call_frame_t *frame, xlator_t *this, const char *volume,
+ fd_t *fd, int32_t cmd, struct gf_flock *flock)
+{
+ char *cmd_str = NULL, *type_str = NULL;
+
+ switch (cmd) {
+#if F_GETLK != F_GETLK64
+ case F_GETLK64:
+#endif
+ case F_GETLK:
+ cmd_str = "GETLK";
+ break;
+
+#if F_SETLK != F_SETLK64
+ case F_SETLK64:
+#endif
+ case F_SETLK:
+ cmd_str = "SETLK";
+ break;
+
+#if F_SETLKW != F_SETLKW64
+ case F_SETLKW64:
+#endif
+ case F_SETLKW:
+ cmd_str = "SETLKW";
+ break;
+
+ default:
+ cmd_str = "UNKNOWN";
+ break;
+ }
+
+ switch (flock->l_type) {
+ case F_RDLCK:
+ type_str = "READ";
+ break;
+ case F_WRLCK:
+ type_str = "WRITE";
+ break;
+ case F_UNLCK:
+ type_str = "UNLOCK";
+ break;
+ default:
+ type_str = "UNKNOWN";
+ break;
+ }
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": volume=%s, (fd =%p, ino=%"PRIu64"}, "
+ "cmd=%s, type=%s, start=%llu, len=%llu, pid=%llu)",
+ frame->root->unique, volume, fd, fd->inode->ino,
+ cmd_str, type_str, (unsigned long long) flock->l_start,
+ (unsigned long long) flock->l_len,
+ (unsigned long long) flock->l_pid);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_finodelk_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->finodelk,
+ volume, fd, cmd, flock);
+ return 0;
+}
+
+
+int
+uidmap_xattrop(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t flags, dict_t *dict)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (path=%s, ino=%"PRIu64" flags=%d)",
+ frame->root->unique, loc->path, loc->inode->ino, flags);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_xattrop_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->xattrop,
+ loc, flags, dict);
+
+ return 0;
+}
+
+
+int
+uidmap_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t flags, dict_t *dict)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (fd=%p, flags=%d)",
+ frame->root->unique, fd, flags);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_fxattrop_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fxattrop,
+ fd, flags, dict);
+
+ return 0;
+}
+
+
+int
+uidmap_lookup(call_frame_t *frame, xlator_t *this,
+ loc_t *loc, dict_t *xattr_req)
+{
+ /* TODO: print all the keys mentioned in xattr_req */
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"})",
+ frame->root->unique, loc->path,
+ loc->inode->ino);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_lookup_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup,
+ loc, xattr_req);
+
+ return 0;
+}
+
+
+int
+uidmap_stat(call_frame_t *frame, xlator_t *this, loc_t *loc)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"})",
+ frame->root->unique, loc->path, loc->inode->ino);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_stat_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->stat,
+ loc);
+
+ return 0;
+}
+
+
+int
+uidmap_readlink(call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}, size=%"GF_PRI_SIZET")",
+ frame->root->unique, loc->path, loc->inode->ino, size);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_readlink_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readlink,
+ loc, size);
+
+ return 0;
+}
+
+
+int
+uidmap_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ mode_t mode, dev_t dev, dict_t *params)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}, mode=%d, dev=%"GF_PRI_DEV")",
+ frame->root->unique, loc->path, loc->inode->ino, mode, dev);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_mknod_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod,
+ loc, mode, dev, params);
+
+ return 0;
+}
+
+
+int
+uidmap_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dict_t *params)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (path=%s, ino=%"PRIu64", mode=%d)",
+ frame->root->unique, loc->path,
+ ((loc->inode)? loc->inode->ino : 0), mode);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ gf_log(this->name, GF_LOG_NORMAL, "mkdir uid: %u, gid: %u",
+ frame->root->uid, frame->root->gid);
+
+ STACK_WIND(frame, uidmap_mkdir_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mkdir,
+ loc, mode, params);
+ return 0;
+}
+
+
+int
+uidmap_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"})",
+ frame->root->unique, loc->path, loc->inode->ino);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_unlink_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink,
+ loc);
+ return 0;
+}
+
+
+int
+uidmap_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}, flags=%d)",
+ frame->root->unique, loc->path, loc->inode->ino, flags);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_rmdir_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rmdir,
+ loc, flags);
+
+ return 0;
+}
+
+
+int
+uidmap_symlink(call_frame_t *frame, xlator_t *this, const char *linkpath,
+ loc_t *loc, dict_t *params)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (linkpath=%s, loc {path=%s, ino=%"PRIu64"})",
+ frame->root->unique, linkpath, loc->path,
+ ((loc->inode)? loc->inode->ino : 0));
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_symlink_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->symlink,
+ linkpath, loc, params);
+
+ return 0;
+}
+
+
+int
+uidmap_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (oldloc {path=%s, ino=%"PRIu64"}, "
+ "newloc{path=%s, ino=%"PRIu64"})",
+ frame->root->unique, oldloc->path, oldloc->ino,
+ newloc->path, newloc->ino);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_rename_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename,
+ oldloc, newloc);
+
+ return 0;
+}
+
+
+int
+uidmap_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (oldloc {path=%s, ino=%"PRIu64"}, "
+ "newloc {path=%s, ino=%"PRIu64"})",
+ frame->root->unique, oldloc->path, oldloc->inode->ino,
+ newloc->path, newloc->inode->ino);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_link_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link,
+ oldloc, newloc);
+ return 0;
+}
+
+
+int
+uidmap_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct iatt *stbuf, int32_t valid)
+{
+ uint64_t ia_time = 0;
+ char actime_str[256] = {0,};
+ char modtime_str[256] = {0,};
+
+ if (valid & GF_SET_ATTR_MODE) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"},"
+ " mode=%o)", frame->root->unique, loc->path,
+ loc->inode->ino,
+ st_mode_from_ia(stbuf->ia_prot, stbuf->ia_type));
+ }
+
+ if (valid & (GF_SET_ATTR_UID | GF_SET_ATTR_GID)) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"},"
+ " uid=%o, gid=%o)",
+ frame->root->unique, loc->path, loc->inode->ino,
+ stbuf->ia_uid, stbuf->ia_gid);
+ }
+
+ if (valid & (GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME)) {
+ ia_time = stbuf->ia_atime;
+ strftime(actime_str, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = stbuf->ia_mtime;
+ strftime(modtime_str, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}, "
+ "*stbuf=%p {ia_atime=%s, ia_mtime=%s})",
+ frame->root->unique, loc->path, loc->inode->ino,
+ stbuf, actime_str, modtime_str);
+ }
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_setattr_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setattr,
+ loc, stbuf, valid);
+
+ return 0;
+}
+
+
+int
+uidmap_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iatt *stbuf, int32_t valid)
+{
+ uint64_t ia_time = 0;
+ char actime_str[256] = {0,};
+ char modtime_str[256] = {0,};
+
+ if (valid & GF_SET_ATTR_MODE) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (*fd=%p, mode=%o)",
+ frame->root->unique, fd,
+ st_mode_from_ia(stbuf->ia_prot, stbuf->ia_type));
+ }
+
+ if (valid & (GF_SET_ATTR_UID | GF_SET_ATTR_GID)) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (*fd=%p, uid=%o, gid=%o)",
+ frame->root->unique, fd,
+ stbuf->ia_uid, stbuf->ia_gid);
+ }
+
+ if (valid & (GF_SET_ATTR_ATIME | GF_SET_ATTR_MTIME)) {
+ ia_time = stbuf->ia_atime;
+ strftime(actime_str, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ ia_time = stbuf->ia_mtime;
+ strftime(modtime_str, 256, "[%b %d %H:%M:%S]",
+ localtime((time_t *)&ia_time));
+
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (*fd=%p"
+ "*stbuf=%p {ia_atime=%s, ia_mtime=%s})",
+ frame->root->unique, fd, stbuf, actime_str,
+ modtime_str);
+ }
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_fsetattr_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetattr,
+ fd, stbuf, valid);
+
+ return 0;
+}
+
+
+int
+uidmap_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ off_t offset)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}, offset=%"PRId64")",
+ frame->root->unique, loc->path, loc->inode->ino, offset);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_truncate_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate,
+ loc, offset);
+
+ return 0;
+}
+
+
+int
+uidmap_open(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int32_t flags, fd_t *fd, int32_t wbflags)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}, flags=%d, "
+ "fd=%p, wbflags=%d)",
+ frame->root->unique, loc->path, loc->inode->ino, flags,
+ fd, wbflags);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_open_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->open,
+ loc, flags, fd, wbflags);
+ return 0;
+}
+
+
+int
+uidmap_create(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int32_t flags, mode_t mode, fd_t *fd, dict_t *params)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}, flags=0%o mode=0%o)",
+ frame->root->unique, loc->path, loc->inode->ino, flags, mode);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ gf_log(this->name, GF_LOG_NORMAL, "create uid: %u, gid: %u",
+ frame->root->uid, frame->root->gid);
+
+ STACK_WIND(frame, uidmap_create_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create,
+ loc, flags, mode, fd, params);
+ return 0;
+}
+
+
+int
+uidmap_readv(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ size_t size, off_t offset)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (*fd=%p, size=%"GF_PRI_SIZET", offset=%"PRId64")",
+ frame->root->unique, fd, size, offset);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_readv_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv,
+ fd, size, offset);
+ return 0;
+}
+
+
+int
+uidmap_writev(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int32_t count,
+ off_t offset, struct iobref *iobref)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (*fd=%p, *vector=%p, count=%d, offset=%"PRId64")",
+ frame->root->unique, fd, vector, count, offset);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_writev_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev,
+ fd, vector, count, offset, iobref);
+ return 0;
+}
+
+
+int
+uidmap_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"})",
+ frame->root->unique, loc->path,
+ ((loc->inode)? loc->inode->ino : 0));
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_statfs_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->statfs,
+ loc);
+ return 0;
+}
+
+
+int
+uidmap_flush(call_frame_t *frame, xlator_t *this, fd_t *fd)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (*fd=%p)", frame->root->unique, fd);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_flush_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->flush,
+ fd);
+ return 0;
+}
+
+
+int
+uidmap_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (flags=%d, *fd=%p)", frame->root->unique, flags, fd);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_fsync_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsync,
+ fd, flags);
+ return 0;
+}
+
+
+int
+uidmap_setxattr(call_frame_t *frame, xlator_t *this,
+ loc_t *loc, dict_t *dict, int32_t flags)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}, dict=%p, flags=%d)",
+ frame->root->unique, loc->path,
+ ((loc->inode)? loc->inode->ino : 0), dict, flags);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_setxattr_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr,
+ loc, dict, flags);
+ return 0;
+}
+
+
+int
+uidmap_getxattr(call_frame_t *frame, xlator_t *this,
+ loc_t *loc, const char *name)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}), name=%s",
+ frame->root->unique, loc->path,
+ ((loc->inode)? loc->inode->ino : 0), name);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_getxattr_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr,
+ loc, name);
+ return 0;
+}
+
+
+int
+uidmap_removexattr(call_frame_t *frame, xlator_t *this,
+ loc_t *loc, const char *name)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (loc {path=%s, ino=%"PRIu64"}, name=%s)",
+ frame->root->unique, loc->path,
+ ((loc->inode)? loc->inode->ino : 0), name);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_removexattr_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr,
+ loc, name);
+
+ return 0;
+}
+
+
+int
+uidmap_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64":( loc {path=%s, ino=%"PRIu64"}, fd=%p)",
+ frame->root->unique, loc->path, loc->inode->ino, fd);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_opendir_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->opendir,
+ loc, fd);
+ return 0;
+}
+
+int
+uidmap_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (fd=%p, size=%"GF_PRI_SIZET", offset=%"PRId64")",
+ frame->root->unique, fd, size, offset);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_readdirp_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdirp,
+ fd, size, offset);
+
+ return 0;
+}
+
+
+int
+uidmap_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ size_t size, off_t offset)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (fd=%p, size=%"GF_PRI_SIZET", offset=%"PRId64")",
+ frame->root->unique, fd, size, offset);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_readdir_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdir,
+ fd, size, offset);
+
+ return 0;
+}
+
+
+int
+uidmap_fsyncdir(call_frame_t *frame, xlator_t *this,
+ fd_t *fd, int32_t datasync)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (datasync=%d, *fd=%p)",
+ frame->root->unique, datasync, fd);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_fsyncdir_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsyncdir,
+ fd, datasync);
+ return 0;
+}
+
+
+int
+uidmap_access(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t mask)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (*loc {path=%s, ino=%"PRIu64"}, mask=0%o)",
+ frame->root->unique, loc->path,
+ ((loc->inode)? loc->inode->ino : 0), mask);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_access_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->access,
+ loc, mask);
+ return 0;
+}
+
+
+int
+uidmap_ftruncate(call_frame_t *frame, xlator_t *this,
+ fd_t *fd, off_t offset)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (offset=%"PRId64", *fd=%p)",
+ frame->root->unique, offset, fd);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_ftruncate_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate,
+ fd, offset);
+
+ return 0;
+}
+
+
+int
+uidmap_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (*fd=%p)", frame->root->unique, fd);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_fstat_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat,
+ fd);
+ return 0;
+}
+
+
+int
+uidmap_lk(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ int32_t cmd, struct gf_flock *lock)
+{
+ gf_log(this->name, GF_LOG_NORMAL,
+ "%"PRId64": (*fd=%p, cmd=%d, lock {l_type=%d, l_whence=%d, "
+ "l_start=%"PRId64", l_len=%"PRId64", l_pid=%u})",
+ frame->root->unique, fd, cmd, lock->l_type, lock->l_whence,
+ lock->l_start, lock->l_len, lock->l_pid);
+
+ if ((*uidmap_map)(frame->root, this) == -1)
+ return -1;
+
+ STACK_WIND(frame, uidmap_lk_cbk,
+ FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lk,
+ fd, cmd, lock);
+ return 0;
+}
+
+
+int32_t
+init(xlator_t *this)
+{
+ char *uid_range = NULL, *gid_range = NULL;
+ char *plugin = NULL;
+ void *handle = NULL;
+ char *root_squash = NULL;
+ char *tenant_override = NULL;
+ init_fn plugin_init = NULL;
+
+ if (!this)
+ return -1;
+
+ gf_log_set_loglevel(GF_LOG_NORMAL);
+
+ if (!this->children || this->children->next) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "uidmap translator requires one subvolume");
+ return -1;
+ }
+ if (!this->parents) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "dangling volume. check volfile ");
+ }
+
+ uid_range = data_to_str(dict_get(this->options, "uid-range"));
+ if (uid_range != NULL && *uid_range != 0) {
+ uid_t low, hi;
+ if (sscanf(uid_range, "%u-%u", &low, &hi) == 2) {
+ uidmap_low_uid = low; uidmap_hi_uid = hi;
+ uidmap_mapping.im_uid_low = uidmap_mapping.im_uid_next = low;
+ uidmap_mapping.im_uid_high = hi;
+ } else {
+ gf_log(this->name, GF_LOG_ERROR,
+ "invalid uid-range in config");
+ }
+ }
+
+ gid_range = data_to_str(dict_get(this->options, "gid-range"));
+ if (gid_range != NULL && *gid_range != 0) {
+ gid_t low, hi;
+ if (sscanf(gid_range, "%u-%u", &low, &hi) == 2) {
+ uidmap_low_gid = low; uidmap_hi_gid = hi;
+ uidmap_mapping.im_gid_low = uidmap_mapping.im_gid_next = low;
+ uidmap_mapping.im_gid_high = hi;
+ } else {
+ gf_log(this->name, GF_LOG_ERROR,
+ "invalid gid-range in config");
+ }
+ }
+
+ root_squash = data_to_str(dict_get(this->options, "root-squash"));
+ if (root_squash != NULL && *root_squash != 0) {
+ char* ptr = root_squash;
+ for (; *ptr != 0; ptr++)
+ *ptr = tolower(*ptr);
+ if (strcmp(root_squash, "no") == 0)
+ uidmap_root_squash = 0;
+ }
+
+ tenant_override = data_to_str(dict_get(this->options, "tenant-override"));
+ if (tenant_override != NULL) {
+ uidmap_tenant_override = strdup(tenant_override);
+ }
+
+ plugin = data_to_str(dict_get(this->options, "uidmap-plugin"));
+
+ if (plugin != NULL && *plugin != 0) {
+ gf_log(this->name, GF_LOG_NORMAL, "loading: %s", plugin);
+ handle = dlopen(plugin, RTLD_NOW|RTLD_NODELETE);
+ if (handle) {
+ do {
+ uidmap_map = (map_fn) dlsym(handle, "map");
+ if (uidmap_map == NULL) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "plugin missing map: %s", dlerror());
+ break;
+ }
+
+ uidmap_revmap = (revmap_fn) dlsym(handle, "revmap");
+ if (uidmap_revmap == NULL) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "plugin missing revmap: %s", dlerror());
+ break;
+ }
+
+ uidmap_plugin_fini = (fini_fn) dlsym(handle, "fini");
+ if (uidmap_plugin_fini == NULL) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "plugin missing fini: %s", dlerror());
+ break;
+ }
+
+ plugin_init = (init_fn) dlsym(handle, "init");
+ if (plugin_init == NULL) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "plugin missing init: %s", dlerror());
+ } else {
+ if ((*plugin_init)(this, uidmap_tenant_override,
+ uidmap_low_uid, uidmap_hi_uid,
+ uidmap_low_gid, uidmap_hi_gid) != 0) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "plugin init error");
+ }
+ }
+ } while (0);
+
+ if (dlclose(handle)) {
+ gf_log(this->name, GF_LOG_NORMAL,
+ "dlclose plugin: %s", dlerror());
+ }
+ }
+ }
+ if (uidmap_map == NULL) {
+ if (uidmap_deserialize_default(this->name) == -1)
+ return -1;
+ uidmap_map = uidmap_map_default;
+ uidmap_revmap = uidmap_revmap_default;
+ }
+
+ return 0;
+}
+
+void
+fini(xlator_t *this)
+{
+ if (!this)
+ return;
+
+ if (uidmap_plugin_fini)
+ uidmap_plugin_fini(this);
- GF_FREE(priv->db_path);
- FREE (priv);
+ gf_log(this->name, GF_LOG_NORMAL, "uidmap translator unloaded");
+ return;
}
struct xlator_fops fops = {
- .open = uidmap_open,
+ .stat = uidmap_stat,
+ .readlink = uidmap_readlink,
+ .mknod = uidmap_mknod,
+ .mkdir = uidmap_mkdir,
+ .unlink = uidmap_unlink,
+ .rmdir = uidmap_rmdir,
+ .symlink = uidmap_symlink,
+ .rename = uidmap_rename,
+ .link = uidmap_link,
+ .truncate = uidmap_truncate,
+ .open = uidmap_open,
+ .readv = uidmap_readv,
+ .writev = uidmap_writev,
+ .statfs = uidmap_statfs,
+ .flush = uidmap_flush,
+ .fsync = uidmap_fsync,
+ .setxattr = uidmap_setxattr,
+ .getxattr = uidmap_getxattr,
+ .removexattr = uidmap_removexattr,
+ .opendir = uidmap_opendir,
+ .readdir = uidmap_readdir,
+ .readdirp = uidmap_readdirp,
+ .fsyncdir = uidmap_fsyncdir,
+ .access = uidmap_access,
+ .ftruncate = uidmap_ftruncate,
+ .fstat = uidmap_fstat,
+ .create = uidmap_create,
+ .lk = uidmap_lk,
+ .inodelk = uidmap_inodelk,
+ .finodelk = uidmap_finodelk,
+ .entrylk = uidmap_entrylk,
+ .lookup = uidmap_lookup,
+ .xattrop = uidmap_xattrop,
+ .fxattrop = uidmap_fxattrop,
+ .setattr = uidmap_setattr,
+ .fsetattr = uidmap_fsetattr,
};
struct xlator_cbks cbks = {
};
struct volume_options options[] = {
- { .key = {"database"},
- .type = GF_OPTION_TYPE_PATH,
- .description = { "SQLite3 database" }
- },
- { .key = {"tenant"},
- .type = GF_OPTION_TYPE_INT,
- .description = { "Tenant ID" }
- },
+ { .key = {"uidmap-plugin", "plugin"},
+ .type = GF_OPTION_TYPE_PATH,
+ },
+ { .key = {"root-squash"},
+ .type = GF_OPTION_TYPE_STR,
+ .value = { "yes", "no"}
+ },
+ { .key = {"tenant-override", "override"},
+ .type = GF_OPTION_TYPE_STR
+ },
+ { .key = {"uid-range"},
+ .type = GF_OPTION_TYPE_STR,
+ },
+ { .key = {"gid-range"},
+ .type = GF_OPTION_TYPE_STR,
+ },
+ { .key = {NULL} },
};
diff --git a/xlators/features/uidmap/src/uidmap.h b/xlators/features/uidmap/src/uidmap.h
index 861f3b0..864bd5e 100644
--- a/xlators/features/uidmap/src/uidmap.h
+++ b/xlators/features/uidmap/src/uidmap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011 Red Hat, Inc.
+ * Copyright © 2011 Red Hat, Inc.
*
* This file is part of CloudFS.
*
@@ -17,18 +17,15 @@
* along with CloudFS. If not, see <http://www.gnu.org/licenses/>.
*/
-
#ifndef __UIDMAP_H__
#define __UIDMAP_H__
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
+#include <sys/types.h>
-typedef struct {
- char *db_path;
- uid_t tenant;
-} uidmap_private_t;
+typedef int (* map_fn)(struct _call_stack_t *, struct _xlator *);
+typedef void (* revmap_fn)(struct _call_stack_t *, uid_t *, gid_t *);
+typedef int32_t (* init_fn)(struct _xlator *, char *, uid_t, uid_t, gid_t, gid_t);
+typedef void (* fini_fn)(struct _xlator *);
#endif /* __UIDMAP_H__ */
+
12 years, 4 months
Branch 'cloudfsd' - scripts/cfs_add_directory.py scripts/cfs_add_tenant.py scripts/cfs_add_volume.py scripts/cfs_delete_tenant.py scripts/cfs_enable_tenant.py scripts/cfs_mount.py scripts/cfs_paths.py scripts/cfs_rm_volume.py scripts/cfs_start_volume.py scripts/cfs_stop_volume.py scripts/cfs_utils.py scripts/cloudfsd.py scripts/README.ssl scripts/ssl_examples ToDo
by Jeff Darcy
ToDo | 1
scripts/README.ssl | 84 +++++++++++++++++++++++++++++++
scripts/cfs_add_directory.py | 17 +-----
scripts/cfs_add_tenant.py | 18 +-----
scripts/cfs_add_volume.py | 18 +-----
scripts/cfs_delete_tenant.py | 16 +-----
scripts/cfs_enable_tenant.py | 18 +-----
scripts/cfs_mount.py | 20 ++++++-
scripts/cfs_paths.py | 1
scripts/cfs_rm_volume.py | 16 +-----
scripts/cfs_start_volume.py | 16 +-----
scripts/cfs_stop_volume.py | 16 +-----
scripts/cfs_utils.py | 49 ++++++++++++++++++
scripts/cloudfsd.py | 106 ++++++++++++++++++++++++++++++++++++++--
scripts/ssl_examples/admin.key | 15 +++++
scripts/ssl_examples/admin.pem | 16 ++++++
scripts/ssl_examples/admin.pfx |binary
scripts/ssl_examples/root.pem | 48 ++++++++++++++++++
scripts/ssl_examples/server.key | 15 +++++
scripts/ssl_examples/server.pem | 16 ++++++
scripts/ssl_examples/tenant.key | 15 +++++
scripts/ssl_examples/tenant.pem | 16 ++++++
22 files changed, 419 insertions(+), 118 deletions(-)
New commits:
commit 095f7664f5731ca79cebace7e7959fbbb6701202
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon May 16 15:20:02 2011 -0400
Added SSL support (disabled for now).
See scripts/README.ssl for more information.
diff --git a/ToDo b/ToDo
index 2fbbcb9..48a0dc9 100644
--- a/ToDo
+++ b/ToDo
@@ -2,7 +2,6 @@
(nothing left)
= Medium Priority =
-SSL
Sanitize volume/tenant names etc. to avoid XSS/injection
Add/document code to generate brick list for volume creation
Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
diff --git a/scripts/README.ssl b/scripts/README.ssl
new file mode 100644
index 0000000..3547f6b
--- /dev/null
+++ b/scripts/README.ssl
@@ -0,0 +1,84 @@
+= How to set up management SSL =
+
+The CloudFS management system uses three roles, each identified by a separate
+certificate. Only the commonName field of each certificate is checked, as
+folows.
+
+ * The servers themselves (commonName="The Server") represented by
+ PEM format certificate and key files.
+
+ * An administrator (commonName="Super User") represented by a PKCS#12
+ format file. This file is intended for loading into browsers; it
+ includes both certificate and key.
+
+ * A generic tenant (commonName="A Tenant") represented by PEM format
+ certificate and key. This is used only for management operations,
+ and is separate from certificates/keys used for the low-level SSL
+ transport (though for convenience this certificate might be used as
+ the signing key for those certificates).
+
+Here is an example of how to create the administrator certificate. The others
+are similar except that they don't require the last step.
+
+ # Generate key.
+ openssl genrsa 1024 > admin.key
+
+ # Generate PEM certificate.
+ openssl req -new -x509 -key admin.key -out admin.pem
+
+ # Convert to a PKCS#12 certificate.
+ openssl pkcs12 -export -in admin.pem -inkey admin.key \
+ -name "Super User" > admin.pfx
+
+There is a directory of sample certificates/keys included with the code, in
+.../scripts/ssl_examples. Note that the file names do matter, as they are
+embedded in the code.
+
+ * server.key, server.pem: servers' private key and certificate (PEM)
+
+ * admin.key, admin.pem: admin's private key and certificate (PEM)
+
+ * admin.pfx: admin's combined certificate (PKCS#12, export pw = "fubar")
+
+ * tenant.key, tenant.pem: tenants' private key and certificate (PEM)
+
+ * root.pem: "certificate authority" list for servers (multiple PEM).
+
+The last issue is who trusts whom. This is represented by which certificates
+are loaded into an endpoint's "certificate authority" file (which in our case
+actually includes the certificates themselves rather than the authorities).
+The servers need to trust each other for internal calls, admins for external
+ones, and tenants for the specific operations used when mounting. Thus, the
+easiest thing to do is:
+
+ cat server.pem admin.pem tenant.pem > root.pem
+
+Clients don't particularly need to trust anyone, so no action is needed there.
+
+== Appendix A: configuration for Firefox ==
+
+To load the admin certificate in Firefox, do the following:
+
+ * Select the "Edit/Preferences" menu item.
+
+ * Go to the "Advanced" section, "Encryption" tab.
+
+ * Click the "View Certificates" button.
+
+ * Click the "Import..." button and select the .pfx file.
+
+== Appendix B: configuration for Chrome ==
+
+To load the admin certificate in Chrome, do the following:
+
+ * Select the "Preferences" item from the configuration menu (the little
+ spanner at the top right).
+
+ * Click on "Under the Hood"
+
+ * Scroll down to "Security" and click on the "Manage Certificates..."
+ button.
+
+ * Click on the "Import..." button and select the .pfx file.
+
+
diff --git a/scripts/cfs_add_directory.py b/scripts/cfs_add_directory.py
index 1c9f0b9..a1b6585 100755
--- a/scripts/cfs_add_directory.py
+++ b/scripts/cfs_add_directory.py
@@ -6,7 +6,6 @@ import socket
import string
import sys
import urllib
-import urllib2
from bottle import request, template
import cfs_utils
@@ -101,19 +100,9 @@ def add_local (path):
def run_common (path):
node_list = cfs_utils.get_members()
blob = []
- for node in node_list:
- scratch = [node, []]
- if socket.gethostbyname(node) in cfs_utils.local_addrs:
- url_obj = [add_local(path)]
- else:
- url = "http://%s:%d/volumes/add_dir_local" % (
- node, cfs_paths.CLOUDFSD_PORT)
- data = urllib.urlencode([("path",path)])
- url_obj = urllib2.urlopen(url,data=data)
- for line in url_obj:
- scratch[1].append(line)
- blob.append(scratch)
- return blob
+ data = urllib.urlencode([("path",path)])
+ return cfs_utils.do_all(blob,node_list,"volumes/add_dir_local",data,
+ add_local, path)
def run_www ():
path = request.forms.get("path")
diff --git a/scripts/cfs_add_tenant.py b/scripts/cfs_add_tenant.py
index d460367..83982ce 100755
--- a/scripts/cfs_add_tenant.py
+++ b/scripts/cfs_add_tenant.py
@@ -6,7 +6,6 @@ import socket
import string
import sys
import urllib
-import urllib2
from bottle import request, template
import cfs_utils
@@ -22,20 +21,9 @@ def add_local (tn_name, tn_pw):
def run_common (tn_name, tn_pw):
node_list = cfs_utils.get_members()
blob = []
- for node in node_list:
- scratch = [node, []]
- if socket.gethostbyname(node) in cfs_utils.local_addrs:
- url_obj = [add_local(tn_name,tn_pw)]
- else:
- url = "http://%s:%d/tenants/add_local" % (
- node, cfs_paths.CLOUDFSD_PORT)
- data = urllib.urlencode([("tn_name",tn_name),
- ("tn_pw",tn_pw)])
- url_obj = urllib2.urlopen(url,data=data)
- for line in url_obj:
- scratch[1].append(line)
- blob.append(scratch)
- return blob
+ data = urllib.urlencode([("tn_name",tn_name), ("tn_pw",tn_pw)])
+ return cfs_utils.do_all(blob,node_list,"tenants/add_local",data,
+ add_local, tn_name, tn_pw)
def run_www ():
tn_name = request.forms.get("tn_name")
diff --git a/scripts/cfs_add_volume.py b/scripts/cfs_add_volume.py
index 32bdc60..a00e2bc 100755
--- a/scripts/cfs_add_volume.py
+++ b/scripts/cfs_add_volume.py
@@ -6,7 +6,6 @@ import socket
import string
import sys
import urllib
-import urllib2
from bottle import request, template
import cfs_paths
@@ -29,19 +28,10 @@ def run_common (vname, vtype, vcount, bricks):
if sts:
return [["gluster",["command failed with %d"%sts]]]
blob = []
- for node in cfs_utils.get_nodes_for_vol(vname):
- scratch = [node,[]]
- if socket.gethostbyname(node) in cfs_utils.local_addrs:
- url_obj = [add_local(vname)]
- else:
- url = "http://%s:%d/volumes/add_vol_local" % (
- node, cfs_paths.CLOUDFSD_PORT)
- data = urllib.urlencode([("vname",vname)])
- url_obj = urllib2.urlopen(url,data=data)
- for line in url_obj:
- scratch[1].append(line)
- blob.append(scratch)
- return blob
+ node_list = cfs_utils.get_nodes_for_vol(vname)
+ data = urllib.urlencode([("vname",vname)])
+ return cfs_utils.do_all(blob,node_list,"volumes/add_vol_local",data,
+ add_local, vname)
def run_www ():
volume_id = request.forms.get("VOLUMEID")
diff --git a/scripts/cfs_delete_tenant.py b/scripts/cfs_delete_tenant.py
index 92c11d7..e921783 100755
--- a/scripts/cfs_delete_tenant.py
+++ b/scripts/cfs_delete_tenant.py
@@ -5,7 +5,6 @@ import re
import socket
import string
import sys
-import urllib2
from bottle import request, template
import cfs_utils
@@ -21,18 +20,9 @@ def delete_local (tn_name):
def run_common (tn_name):
node_list = cfs_utils.get_members()
blob = []
- for node in node_list:
- scratch = [node, []]
- if socket.gethostbyname(node) in cfs_utils.local_addrs:
- url_obj = [delete_local(tn_name)]
- else:
- url = "http://%s:%d/tenants/%s/delete_local" % (
- node, cfs_paths.CLOUDFSD_PORT, tn_name)
- url_obj = urllib2.urlopen(url)
- for line in url_obj:
- scratch[1].append(line)
- blob.append(scratch)
- return blob
+ return cfs_utils.do_all(blob,node_list,
+ "tenants/%s/delete_local" % tn_name, None,
+ delete_local, tn_name)
def run_www (tn_name):
blob = run_common(tn_name)
diff --git a/scripts/cfs_enable_tenant.py b/scripts/cfs_enable_tenant.py
index 146c785..30eb848 100755
--- a/scripts/cfs_enable_tenant.py
+++ b/scripts/cfs_enable_tenant.py
@@ -6,7 +6,6 @@ import socket
import string
import sys
import urllib
-import urllib2
from bottle import request, template
import cfs_paths
@@ -36,20 +35,9 @@ def run_common (tn_name, vol_list):
vol_list = string.join(vol_list,",")
node_list = cfs_utils.get_members()
blob = []
- for node in node_list:
- scratch = [node, []]
- if socket.gethostbyname(node) in cfs_utils.local_addrs:
- url_obj = [enable_local(tn_name,vol_list)]
- else:
- url = "http://%s:%d/tenants/enable_local" % (
- node, cfs_paths.CLOUDFSD_PORT)
- data = urllib.urlencode([("tn_name",tn_name),
- ("vol_list",vol_list)])
- url_obj = urllib2.urlopen(url,data=data)
- for line in url_obj:
- scratch[1].append(line)
- blob.append(scratch)
- return blob
+ data = urllib.urlencode([("tn_name",tn_name), ("vol_list",vol_list)])
+ return cfs_utils.do_all(blob,node_list,"tenants/enable_local",data,
+ enable_local, tn_name, vol_list)
def run_www (tn_name):
vol_list = []
diff --git a/scripts/cfs_mount.py b/scripts/cfs_mount.py
index af4ad06..b3226e0 100755
--- a/scripts/cfs_mount.py
+++ b/scripts/cfs_mount.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
+import httplib
import json
import os
import sys
@@ -9,6 +10,18 @@ import cfs_paths
import cfs_utils
import volfilter
+if cfs_utils.use_ssl:
+ # Copied from cfs_utils (see long "monkey patch" comment there,
+ # modified to add tenant certs instead of server.
+ def my_hacked_init (self, *args, **kwargs):
+ apply(httplib.HTTPConnection.__init__,(self,)+args,kwargs)
+ self.key_file = "tenant.key"
+ self.cert_file = "tenant.pem"
+ httplib.HTTPSConnection.__init__ = my_hacked_init
+ proto = "https"
+else:
+ proto = "http"
+
# Simple cache of brick-to-port mappings, so we don't have to keep re-fetching
# the maps from the same host if it has multiple bricks.
class mapper:
@@ -18,8 +31,8 @@ class mapper:
if self.cache.has_key(host):
mydict = self.cache[host]
else:
- url = "http://%s:%d/%s/map" % \
- (host, cfs_paths.CLOUDFSD_PORT, volume)
+ url = "%s://%s:%d/%s/map" % \
+ (proto, host, cfs_paths.CLOUDFSD_PORT, volume)
mydict = json.load(urllib2.urlopen(url))
self.cache[host] = mydict
if mydict.has_key(subv):
@@ -37,7 +50,8 @@ if __name__ == "__main__":
(host, volume, username, password, mount) = sys.argv[1:6]
# Fetch the GlusterFS client-side volfile.
- url = "http://%s:%d/%s/fetch" % (host, cfs_paths.CLOUDFSD_PORT, volume)
+ url = "%s://%s:%d/%s/fetch" % \
+ (proto, host, cfs_paths.CLOUDFSD_PORT, volume)
vol_file = urllib2.urlopen(url)
# Load the volfile and clean out some of the crud.
diff --git a/scripts/cfs_paths.py b/scripts/cfs_paths.py
index bf47ebd..2f9085b 100644
--- a/scripts/cfs_paths.py
+++ b/scripts/cfs_paths.py
@@ -10,4 +10,3 @@ idle_subdir = os.path.join(pid_dir,".idle_ports")
used_subdir = os.path.join(pid_dir,".used_ports")
volfile_re = re.compile("(?P<vol>[^.]+)\.(?P<node>.+)\.(?P<path>[^.]+)\.vol")
CLOUDFSD_PORT = 8080
-
diff --git a/scripts/cfs_rm_volume.py b/scripts/cfs_rm_volume.py
index bfab9b2..b99cc2a 100755
--- a/scripts/cfs_rm_volume.py
+++ b/scripts/cfs_rm_volume.py
@@ -6,7 +6,6 @@ import socket
import string
import sys
import urllib
-import urllib2
from bottle import request, template
import cfs_paths
@@ -27,18 +26,9 @@ def run_common (vname):
sts = kid.wait()
if sts:
return template("rm_vol_done.html", name=vname, blob=blob)
- for node in nodes_for_vol:
- scratch = [node,[]]
- if socket.gethostbyname(node) in cfs_utils.local_addrs:
- url_obj = [rm_local(vname)]
- else:
- url = "http://%s:%d/volumes/%s/rm_local" % (
- node, cfs_paths.CLOUDFSD_PORT, vname)
- url_obj = urllib2.urlopen(url)
- for line in url_obj:
- scratch[1].append(line)
- blob.append(scratch)
- return blob
+ return cfs_utils.do_all(blob,nodes_for_vol,
+ "volumes/%s/rm_local" % vname, None,
+ rm_local, vname)
def run_www (vname):
blob = run_common(vname)
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
index 11c992b..a9112e6 100755
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -7,7 +7,6 @@ import socket
import string
import subprocess
import sys
-import urllib2
from bottle import template
@@ -171,18 +170,9 @@ def start_local (vol_name):
def run_common (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
- for node in node_list:
- scratch = [node, []]
- if socket.gethostbyname(node) in cfs_utils.local_addrs:
- url_obj = [start_local(vol_name)]
- else:
- url = "http://%s:%d/volumes/%s/start_local" % (
- node, cfs_paths.CLOUDFSD_PORT, vol_name)
- url_obj = urllib2.urlopen(url)
- for line in url_obj:
- scratch[1].append(line)
- blob.append(scratch)
- return blob
+ return cfs_utils.do_all(blob,node_list,
+ "volumes/%s/start_local" % vol_name, None,
+ start_local, vol_name)
def run_www (vol_name):
blob = run_common(vol_name)
diff --git a/scripts/cfs_stop_volume.py b/scripts/cfs_stop_volume.py
index b93b438..ec8126d 100755
--- a/scripts/cfs_stop_volume.py
+++ b/scripts/cfs_stop_volume.py
@@ -6,7 +6,6 @@ import os
import socket
import subprocess
import sys
-import urllib2
from bottle import template
@@ -46,18 +45,9 @@ def stop_local (vol_name):
def run_common (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
- for node in node_list:
- scratch = [node, []]
- if socket.gethostbyname(node) in cfs_utils.local_addrs:
- url_obj = [stop_local(vol_name)]
- else:
- url = "http://%s:%d/volumes/%s/stop_local" % (
- node, cfs_paths.CLOUDFSD_PORT, vol_name)
- url_obj = urllib2.urlopen(url)
- for line in url_obj:
- scratch[1].append(line)
- blob.append(scratch)
- return blob
+ return cfs_utils.do_all(blob,node_list,
+ "volumes/%s/stop_local" % vol_name, None,
+ stop_local, vol_name)
def run_www (vol_name):
blob = run_common(vol_name)
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index 39a36e0..21db9ec 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -1,8 +1,10 @@
import dbm
import glob
+import httplib
import os
import socket
import subprocess
+import urllib2
import cfs_paths
@@ -15,6 +17,9 @@ good_fs_types = [ "ext2", "ext3", "ext4", "xfs", "btrfs" ]
# Sudo is kind of pointless if we're already running as root.
use_sudo = False
+# Disabled for developer convenience; turn on for production.
+use_ssl = False
+
# We use the class cache to avoid having to call through the shell to find
# executables on $PATH every single time. To do this, we use the shell *once*
# to find the local path and assume all nodes are configured similarly.
@@ -162,6 +167,50 @@ def open_db ():
db_path = os.path.join(cfs_paths.info_dir,"config.db")
return dbm.open(db_path,"c",0600)
+# Yes, this is nasty. My first choice would have been to pass the key/cert as
+# arguments to urllib2.urlopen, but it doesn't give me that option. My second
+# choice would have been to use a clean registration interface to have urllib2
+# create my httplib.HTTPSConnection the way I want, but that's created in
+# AbtractHTTPHandler.do_open with fixed arguments, and there's no "hook" to
+# add the needed values to the object before that very same code starts using
+# it. Subclassing HTTPSConnection would mean also subclassing HTTPSHandler to
+# use it, building a new OpenerDirector to use *that* subclass, and generally
+# reinventing half of urllib2 to get one little bit of functionality. A pox
+# on developers who use terms like "modular" and "abstract" in code that is
+# in fact marked by bad linkage and hidden dependencies, to the point where
+# even a monkey-patch like this is cleaner than their alternative.
+
+if use_ssl:
+ def my_hacked_init (self, *args, **kwargs):
+ apply(httplib.HTTPConnection.__init__,(self,)+args,kwargs)
+ self.key_file = "server.key"
+ self.cert_file = "server.pem"
+ httplib.HTTPSConnection.__init__ = my_hacked_init
+
+# Loop over a set of nodes, issuing a command either directly or via HTTP(S).
+# "Blob" should be a list of [node,text...] and hence mutable, but it's also
+# returned for convenience.
+def do_all (blob, node_list, rmt_cmd, data, lcl_cmd, *args):
+ for node in node_list:
+ scratch = [node, []]
+ if socket.gethostbyname(node) in local_addrs:
+ url_obj = [apply(lcl_cmd,args)]
+ else:
+ if use_ssl:
+ url = "https://%s:%d/%s" % (
+ node, cfs_paths.CLOUDFSD_PORT,rmt_cmd)
+ else:
+ url = "http://%s:%d/%s" % (
+ node, cfs_paths.CLOUDFSD_PORT,rmt_cmd)
+ if data != None:
+ url_obj = urllib2.urlopen(url,data=data)
+ else:
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return blob
+
# Print a "blob" of [node, [line1, line2]] tuples/lists for CLI/debugging.
def print_blob (blob):
for node, text in blob:
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 8255a44..d081aed 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,11 +1,14 @@
#!/usr/bin/python
-from bottle import route, post, run, view, debug, request
import os
import socket
+import ssl
import string
+import wsgiref.simple_server as ss
+from bottle import route, post, run, view, debug, request, ServerAdapter, abort
+
import cfs_paths
import cfs_utils
import volmap
@@ -20,16 +23,34 @@ import cfs_add_tenant
import cfs_delete_tenant
import cfs_enable_tenant
+def authorized (request, *users):
+ if not cfs_utils.use_ssl:
+ return True
+ path = request.environ["PATH_INFO"]
+ cert = request.environ["wsgi.input"]._sock._sslobj.peer_certificate()
+ for part in cert["subject"]:
+ if part[0][0] == "commonName":
+ print "got %s from %s" % (path, part[0][1])
+ return (part[0][1] in users)
+ return False
+
+server_id = "The Server"
+admin_id = "Super User"
+
@route("/")
@route("/cfg")
@route("/cfgmain")
@view("cfgmain.html")
def cfg_main():
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return dict()
@route("/cluster")
@view("cluster.html")
def show_cluster():
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
# TBD: handle glusterd presence/startup check sanely
cfs_utils.run_cmd("chkconfig","--add glusterd")
cfs_utils.run_cmd("chkconfig","glusterd on")
@@ -39,6 +60,8 @@ def show_cluster():
@post("/cluster/add_node")
def add_node():
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return cfs_add_node.run_www()
# TBD: implement remove_node
@@ -46,6 +69,8 @@ def add_node():
@route("/volumes")
@view("volumes.html")
def show_volumes():
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
brick_list = cfs_utils.get_bricks()
mount_list = cfs_utils.get_mounts(brick_list)
# TBD: allow adding arbitrary directories instead of just mountpoints
@@ -53,53 +78,77 @@ def show_volumes():
@post("/volumes/add_directory")
def add_directory ():
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return cfs_add_directory.run_www()
@post("/volumes/add_dir_local")
def add_dir_local ():
+ if not authorized(request,server_id):
+ abort(401,"Forbidden")
path = request.forms.get("path")
return cfs_add_directory.add_local(path)
@post("/volumes/add_volume")
def add_volume():
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return cfs_add_volume.run_www()
@post("/volumes/add_vol_local")
def add_vol_local ():
+ if not authorized(request,server_id):
+ abort(401,"Forbidden")
vname = request.forms.get("vname")
return cfs_add_volume.add_local(vname)
@route("volumes/:name/remove")
def rm_volume (name):
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return cfs_rm_volume.run_www(name)
@route("/volumes/:name/rm_local")
def rm_vol_local (name):
+ if not authorized(request,server_id):
+ abort(401,"Forbidden")
return cfs_rm_volume.rm_local(name)
@route("/volumes/:vol_name/start")
def start_volume(vol_name):
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return cfs_start_volume.run_www(vol_name)
@route("/volumes/:vol_name/start_local")
def start_local (vol_name):
+ if not authorized(request,server_id):
+ abort(401,"Forbidden")
return cfs_start_volume.start_local(vol_name)
@route("/volumes/:vol_name/stop")
def stop_volume(vol_name):
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return cfs_stop_volume.run_www(vol_name)
@route("/volumes/:vol_name/stop_local")
def stop_local (vol_name):
+ if not authorized(request,server_id):
+ abort(401,"Forbidden")
return cfs_stop_volume.stop_local(vol_name)
# Used by mount.cloudfs
@route("/:vol_name/fetch")
def fetch_client_vf(vol_name):
+ if not authorized(request,"A Tenant"):
+ return "crap"
vf_path = "%s/vols/%s/%s-fuse.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
return open(vf_path,"r")
-# Used by mount.cloudfs
+# Used by mount.cloudfs; for these two we don't really care who they are, so
+# long as they do provide an identity that's part of this cluster. Merely
+# getting past the SSL handshake using the tenant ID is therefore sufficient.
@route("/:vol_name/map")
def map_paths(vol_name):
return volmap.vol_map(vol_name)
@@ -107,30 +156,42 @@ def map_paths(vol_name):
@route("/tenants")
@view("tenants.html")
def show_tenants ():
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
db_obj = cfs_utils.open_db()
return dict(tenants=db_obj)
@post("/tenants/add")
def add_tenant():
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return cfs_add_tenant.run_www()
@post("/tenants/add_local")
def add_tenant_local ():
+ if not authorized(request,server_id):
+ abort(401,"Forbidden")
tn_name = request.forms.get("tn_name")
tn_pw = request.forms.get("tn_pw")
return cfs_add_tenant.add_local(tn_name,tn_pw)
@route("/tenants/:name/delete")
def delete_tenant (name):
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return cfs_delete_tenant.run_www(name)
@route("/tenants/:tn_name/delete_local")
def delete_tenant_local (tn_name):
+ if not authorized(request,server_id):
+ abort(401,"Forbidden")
return cfs_delete_tenant.delete_local(tn_name)
@route("/tenants/:tn_name/volumes")
@view("tenant_volumes.html")
def show_tenant_volumes (tn_name):
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
db_obj = cfs_utils.open_db()
all_vols = [v[3:] for v in db_obj.keys() if v.startswith("vt_")]
print all_vols
@@ -141,20 +202,59 @@ def show_tenant_volumes (tn_name):
@post("/tenants/:tn_name/enable")
def enable_tenant_volumes (tn_name):
+ if not authorized(request,admin_id):
+ abort(401,"Forbidden")
return cfs_enable_tenant.run_www(tn_name)
@post("/tenants/enable_local")
def add_tenant_local ():
+ if not authorized(request,server_id):
+ abort(401,"Forbidden")
tn_name = request.forms.get("tn_name")
vol_list = request.forms.get("vol_list")
return cfs_enable_tenant.enable_local(tn_name,vol_list)
+# We don't really care if anyone sees our styles.
@route("/styles/:sheet")
def get_style (sheet):
return file("styles/%s"%sheet,"r")
+class SecureServer (ss.WSGIServer):
+ def server_activate (self):
+ ss.WSGIServer.server_activate(self)
+ self.socket = ssl.wrap_socket(self.socket,server_side=True,
+ certfile="server.pem", keyfile="server.key",
+ cert_reqs=ssl.CERT_REQUIRED, ca_certs="root.pem")
+
+class SecureHandler (ss.WSGIRequestHandler):
+ def handle (self):
+ for part in self.connection.getpeercert()["subject"]:
+ if part[0][0] == "commonName":
+ #print "### client is %s" % part[0][1]
+ break
+ else:
+ raise ssl.CertificateError, "no matching user"
+ ss.WSGIRequestHandler.handle(self)
+ # For some reason clients hang if we try to handle more than
+ # one request on a connection with WSGIServer (expecting a
+ # switch to non-SSL after a login page?) so we force a close
+ # here.
+ self.connection.shutdown(socket.SHUT_RDWR)
+ self.connection.close()
+
+class SecureAdapter (ServerAdapter):
+ def run (self, handler):
+ srv = ss.make_server(self.host,self.port,handler,
+ server_class=SecureServer,handler_class=SecureHandler,
+ **self.options)
+ srv.serve_forever()
+
if __name__ == "__main__":
debug(True)
#run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT,server=CherryPyServer)
- run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT)
+ if cfs_utils.use_ssl:
+ run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT,
+ server=SecureAdapter)
+ else:
+ run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT)
diff --git a/scripts/ssl_examples/admin.key b/scripts/ssl_examples/admin.key
new file mode 100644
index 0000000..bd1011d
--- /dev/null
+++ b/scripts/ssl_examples/admin.key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXAIBAAKBgQDp/8qCWAKyJLDMPC+V6pDyngE+h+K/ZWA6hjIvjuOktRFEl36Y
+mfM03L0JN4rjNS7JQkH4e2YWP9IrR5fKwlMwz1xqNaPSCFLwbUs52VUAPRT+LtQL
+5bpi8VdwvqAlGJg1SazWjfsXyPSmhYhgCZi9aGApkUStnNAtJ7rgnOFfywIDAQAB
+AoGAWnym0/ayvC7CC4huolt9x8RgGM01WuwZ5SfFumxYDXZTgiHPO7W0vclqdGj9
+FVWYjQ1JM4yMcqglXsUlpqu0vRrdt+WmsKZWhbJdUKIcKO3ztW8f++mWhYJINf/S
+srHQzpDIIOTIUrTOsWqJtLZF2bOdeIShany3QhBlpCSobokCQQD859wvTppW3L9K
+IggG5/G0cDLl5NyeE86pwlVDhoXqH90pMBJKLXvpiUHQ8DJ1LOdTTYbeodnQpdKV
+/bB2da+lAkEA7Ny2eiZS4suULDYvX4ZKKUes5st++/ycp9oJWTaBsLkCdgTi7Gx9
+Lu8K87yzxGCF31d3Fsg2Iia1m07a5CO2rwJBAPyT44crjWCq3jB/hFzBUNfQJkUL
+KHqHdaJ8/wKNWzjT0eDMrbd/bX1zI1Q0T3BPBEhh6Qx3wD1tHr/FuXLVXjECQGVl
+SnxZ4YuS6JhQjdEPtLmdJEgcfvyyNAGq1eup6LTVTldlWhspiiFIVWDnu/Dp/QUR
+9Tn8dSgDeCTHCe811qMCQGscWcY5yA85Ax379RJd7/3WXeUOF8ZMwuZsmbWVMKkO
+QXiiLDMnCzumSaDRiUAZxAz1WQ2WmgyhXF3vbzmowAI=
+-----END RSA PRIVATE KEY-----
diff --git a/scripts/ssl_examples/admin.pem b/scripts/ssl_examples/admin.pem
new file mode 100644
index 0000000..2d76c95
--- /dev/null
+++ b/scripts/ssl_examples/admin.pem
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIICfDCCAeWgAwIBAgIJAKW4Tv+M90fKMA0GCSqGSIb3DQEBBQUAMFcxCzAJBgNV
+BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+Q29tcGFueSBMdGQxEzARBgNVBAMMClN1cGVyIFVzZXIwHhcNMTEwNTE2MjAyMjEy
+WhcNMTEwNjE1MjAyMjEyWjBXMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVs
+dCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMRMwEQYDVQQDDApT
+dXBlciBVc2VyMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDp/8qCWAKyJLDM
+PC+V6pDyngE+h+K/ZWA6hjIvjuOktRFEl36YmfM03L0JN4rjNS7JQkH4e2YWP9Ir
+R5fKwlMwz1xqNaPSCFLwbUs52VUAPRT+LtQL5bpi8VdwvqAlGJg1SazWjfsXyPSm
+hYhgCZi9aGApkUStnNAtJ7rgnOFfywIDAQABo1AwTjAdBgNVHQ4EFgQUh/pgdfSK
+cnALftl4NALNBHZ84FQwHwYDVR0jBBgwFoAUh/pgdfSKcnALftl4NALNBHZ84FQw
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQASXLjNSj91lbrWpn8FpU1F
+2X4EJEXNArKR+d1F/+WJxFSFQbyuhc0cmjuo30Z6Wzova1s2Icnw+EDgYVfXGdIC
+OJuDrtN9uonNPGeVIq+WrbHQF06FNiBgxCdJcpiQG5J6K6jAFC6cJHfWPFvDBSz6
+MkOGZT7Q9qApCepdUct3OA==
+-----END CERTIFICATE-----
diff --git a/scripts/ssl_examples/admin.pfx b/scripts/ssl_examples/admin.pfx
new file mode 100644
index 0000000..ac91a21
Binary files /dev/null and b/scripts/ssl_examples/admin.pfx differ
diff --git a/scripts/ssl_examples/root.pem b/scripts/ssl_examples/root.pem
new file mode 100644
index 0000000..ee2928a
--- /dev/null
+++ b/scripts/ssl_examples/root.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIICfDCCAeWgAwIBAgIJAIBSkSFer4KiMA0GCSqGSIb3DQEBBQUAMFcxCzAJBgNV
+BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+Q29tcGFueSBMdGQxEzARBgNVBAMMClRoZSBTZXJ2ZXIwHhcNMTEwNTE2MjAyMTU3
+WhcNMTEwNjE1MjAyMTU3WjBXMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVs
+dCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMRMwEQYDVQQDDApU
+aGUgU2VydmVyMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDDNGcrorj2eI6D
+FUOu6fgOFAloTSrbfzhCosquyTznCi3MiswOrAWy3Y1aiL2aLSRURnmsaIoVC5FS
+1bZoE63cFkTvv2rKoVmBWmWy497nb/h8DMrsfOT2Xn3y+u79Wt4RCkunRJEfnWsg
+wrW6ULyxVAVeHJEtiudtHU0htAcR4wIDAQABo1AwTjAdBgNVHQ4EFgQUa2eD7AFo
+sPsSX5nMBACmReQBTJIwHwYDVR0jBBgwFoAUa2eD7AFosPsSX5nMBACmReQBTJIw
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQBJM6ESfZzgb6qwuHB28niU
+Azvf3zSPO2ji5C/gcPZa/vknxiaydP9c3JmwzLN1Qx7c5dHxLD5n9in1vcnJKR2m
+THv9PkLYH0bIEehL+IBD8F4VfvL8ZAtHos1D+rnMeTtzsR4BqY0qzXjtu1/Q98qF
+nhBvjaJoutWy9+nwxvPHHA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICfDCCAeWgAwIBAgIJAKW4Tv+M90fKMA0GCSqGSIb3DQEBBQUAMFcxCzAJBgNV
+BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+Q29tcGFueSBMdGQxEzARBgNVBAMMClN1cGVyIFVzZXIwHhcNMTEwNTE2MjAyMjEy
+WhcNMTEwNjE1MjAyMjEyWjBXMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVs
+dCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMRMwEQYDVQQDDApT
+dXBlciBVc2VyMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDp/8qCWAKyJLDM
+PC+V6pDyngE+h+K/ZWA6hjIvjuOktRFEl36YmfM03L0JN4rjNS7JQkH4e2YWP9Ir
+R5fKwlMwz1xqNaPSCFLwbUs52VUAPRT+LtQL5bpi8VdwvqAlGJg1SazWjfsXyPSm
+hYhgCZi9aGApkUStnNAtJ7rgnOFfywIDAQABo1AwTjAdBgNVHQ4EFgQUh/pgdfSK
+cnALftl4NALNBHZ84FQwHwYDVR0jBBgwFoAUh/pgdfSKcnALftl4NALNBHZ84FQw
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQASXLjNSj91lbrWpn8FpU1F
+2X4EJEXNArKR+d1F/+WJxFSFQbyuhc0cmjuo30Z6Wzova1s2Icnw+EDgYVfXGdIC
+OJuDrtN9uonNPGeVIq+WrbHQF06FNiBgxCdJcpiQG5J6K6jAFC6cJHfWPFvDBSz6
+MkOGZT7Q9qApCepdUct3OA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICeDCCAeGgAwIBAgIJAPVYsIHSJcudMA0GCSqGSIb3DQEBBQUAMFUxCzAJBgNV
+BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+Q29tcGFueSBMdGQxETAPBgNVBAMMCEEgVGVuYW50MB4XDTExMDUxNjIwMjIyM1oX
+DTExMDYxNTIwMjIyM1owVTELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQg
+Q2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDERMA8GA1UEAwwIQSBU
+ZW5hbnQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMNWfsmPx0y2zdrNCl3w
+Puki+AYyYOzGPZRZeRBz5dFXxvJ0mdQMl1Mv4cbgjfaEnT+qT2vV17GFf7XWARd2
+oGLYfHZO2iINRDlyG5g/FDNkeUnVTm0ZBtsBo5hpX49C/MB1cHzmWAu5nopi8fhB
+nej+rAP1GIZkUkWyWIG1jbbRAgMBAAGjUDBOMB0GA1UdDgQWBBTzqu/xb6WUQ2mz
+bDwEGW9XfUEJ1DAfBgNVHSMEGDAWgBTzqu/xb6WUQ2mzbDwEGW9XfUEJ1DAMBgNV
+HRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAHvZ0Az8LLfKc72FXxS6llSrlB40
+Y/D4XTHcBMaiFDZClJvYJ+UmqrxYCJH5py/YRd9GSp5Ry+ghPWg/+eFsSGFtQFTI
+r+r70ux4lRYUk+adSqIiyg0xNGMqgOJKNNN5ixrCvN9k5BeeWyiKLbgcD+oG6sZM
+ImJ71ZY4QkH1MLb+
+-----END CERTIFICATE-----
diff --git a/scripts/ssl_examples/server.key b/scripts/ssl_examples/server.key
new file mode 100644
index 0000000..b188d77
--- /dev/null
+++ b/scripts/ssl_examples/server.key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXwIBAAKBgQDDNGcrorj2eI6DFUOu6fgOFAloTSrbfzhCosquyTznCi3MiswO
+rAWy3Y1aiL2aLSRURnmsaIoVC5FS1bZoE63cFkTvv2rKoVmBWmWy497nb/h8DMrs
+fOT2Xn3y+u79Wt4RCkunRJEfnWsgwrW6ULyxVAVeHJEtiudtHU0htAcR4wIDAQAB
+AoGBAIxEuqIzcr/BT37IJ/OnspTDNyNY2CQT3eScQBKrDnVi7hgd4JXmbM3jwDA/
+NCd2qrVYUxRdpmOLBWlFoqZVBcdvms4AlaOy9f6tMIwVt1FMgYp5GgPWKv2kZddR
++zwPBuwhkR41aGFAypsS9b5ZAdn5GP6ZfTw3vDtxA7vwoqU5AkEA8NwasAlFJy1E
+ESlqG7GAe1YzjtkO+wu2YCCer5eXxE+G3b8s+QOnc6qaSnG6PABk5JptjUwFKOzo
+m7ZuFk3I5wJBAM95nvVYHZBXutNEOZIINsxvaEMfwbpeQqTwWrGJQY55NpeRIzvg
+lUGDiJNuOdRcWNA6gvSRAjHS1sH9FwSLI6UCQQDcVm4vlftmEHnRPVKtTN8ddUkr
+J5QVwqwvGggw1/vlgV7+IjKRBm+8V1hYO9vDohSqMD+B4AZkXv2X3PaufrAHAkEA
+rW4t+Uq6E9GyAy4xraeeHxA1qH6gU2i97uBX/7YLjcw3XUVenYvjWEtaXFs0jhbP
+yuhOVZ/tpLZo/OnSVuL0XQJBAKQgA9RpoaFqSb4vWStlO54mKmzRA/xXVhDSzYYb
+N0VC77hav1p527nfWAiMuSvuhOStcwGQ32sgWN5Ecgzrg/s=
+-----END RSA PRIVATE KEY-----
diff --git a/scripts/ssl_examples/server.pem b/scripts/ssl_examples/server.pem
new file mode 100644
index 0000000..2c08fe3
--- /dev/null
+++ b/scripts/ssl_examples/server.pem
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIICfDCCAeWgAwIBAgIJAIBSkSFer4KiMA0GCSqGSIb3DQEBBQUAMFcxCzAJBgNV
+BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+Q29tcGFueSBMdGQxEzARBgNVBAMMClRoZSBTZXJ2ZXIwHhcNMTEwNTE2MjAyMTU3
+WhcNMTEwNjE1MjAyMTU3WjBXMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVs
+dCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMRMwEQYDVQQDDApU
+aGUgU2VydmVyMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDDNGcrorj2eI6D
+FUOu6fgOFAloTSrbfzhCosquyTznCi3MiswOrAWy3Y1aiL2aLSRURnmsaIoVC5FS
+1bZoE63cFkTvv2rKoVmBWmWy497nb/h8DMrsfOT2Xn3y+u79Wt4RCkunRJEfnWsg
+wrW6ULyxVAVeHJEtiudtHU0htAcR4wIDAQABo1AwTjAdBgNVHQ4EFgQUa2eD7AFo
+sPsSX5nMBACmReQBTJIwHwYDVR0jBBgwFoAUa2eD7AFosPsSX5nMBACmReQBTJIw
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQBJM6ESfZzgb6qwuHB28niU
+Azvf3zSPO2ji5C/gcPZa/vknxiaydP9c3JmwzLN1Qx7c5dHxLD5n9in1vcnJKR2m
+THv9PkLYH0bIEehL+IBD8F4VfvL8ZAtHos1D+rnMeTtzsR4BqY0qzXjtu1/Q98qF
+nhBvjaJoutWy9+nwxvPHHA==
+-----END CERTIFICATE-----
diff --git a/scripts/ssl_examples/tenant.key b/scripts/ssl_examples/tenant.key
new file mode 100644
index 0000000..9124136
--- /dev/null
+++ b/scripts/ssl_examples/tenant.key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDDVn7Jj8dMts3azQpd8D7pIvgGMmDsxj2UWXkQc+XRV8bydJnU
+DJdTL+HG4I32hJ0/qk9r1dexhX+11gEXdqBi2Hx2TtoiDUQ5chuYPxQzZHlJ1U5t
+GQbbAaOYaV+PQvzAdXB85lgLuZ6KYvH4QZ3o/qwD9RiGZFJFsliBtY220QIDAQAB
+AoGAChPLRLDDMmT358LONxxu0m44Z3Fv6KcthBq9kSi5gXxKyjVTvknMAMGmnzD0
+gfRDfIZXxJNqpkDh3sqkkcZP7dJ0VVGk6swoV24kvt5vtai6dqVD9klmSApDE3Ec
+KsAwJShRp9O2sBrtXMrtq1Q79SfmufJE0vcE73cfqQLijvUCQQDhof+ynTPWHPf6
+LxU6kl/MUaEG1MDQdS2WmUUrwjImWmBJjaPkpnpVX+eFenQt+nd1IpZQwuBWbTeP
+pVZ6z6x/AkEA3aC1Tk6da2NBR8Ef9zZ7DDhdVkQZXVkouGohQNMOyxg9wrqhelkR
+dWWm2ykNqYlxZYEcZc87m5G6xDH8KvM0rwJBAK7WWAuw0r0EH4dmun1zdPYe/rcL
+Xwlo81VyGO5qgW/Esj3smmYQNlU3hnCgzavfHHfQwEd+alWuNdKCXLu3dsMCQDQ7
+DJ+AzX6ibJ8Rd4wWTddqbSzIbcXfHkaf1GhnlSPt+ZgrzaR82y10oGcj/LFIz+2h
+COVBeoXGSWK1eP1SRccCQQC+16Mrw/C9ZWCu+1LUtCWTjoP/S82ooRTF6Ug3XIo6
+sROrv9p85+vf0AUF7s07soh4XrVFa6Cy4u8C/vxsmokT
+-----END RSA PRIVATE KEY-----
diff --git a/scripts/ssl_examples/tenant.pem b/scripts/ssl_examples/tenant.pem
new file mode 100644
index 0000000..593c56f
--- /dev/null
+++ b/scripts/ssl_examples/tenant.pem
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIICeDCCAeGgAwIBAgIJAPVYsIHSJcudMA0GCSqGSIb3DQEBBQUAMFUxCzAJBgNV
+BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+Q29tcGFueSBMdGQxETAPBgNVBAMMCEEgVGVuYW50MB4XDTExMDUxNjIwMjIyM1oX
+DTExMDYxNTIwMjIyM1owVTELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQg
+Q2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDERMA8GA1UEAwwIQSBU
+ZW5hbnQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMNWfsmPx0y2zdrNCl3w
+Puki+AYyYOzGPZRZeRBz5dFXxvJ0mdQMl1Mv4cbgjfaEnT+qT2vV17GFf7XWARd2
+oGLYfHZO2iINRDlyG5g/FDNkeUnVTm0ZBtsBo5hpX49C/MB1cHzmWAu5nopi8fhB
+nej+rAP1GIZkUkWyWIG1jbbRAgMBAAGjUDBOMB0GA1UdDgQWBBTzqu/xb6WUQ2mz
+bDwEGW9XfUEJ1DAfBgNVHSMEGDAWgBTzqu/xb6WUQ2mzbDwEGW9XfUEJ1DAMBgNV
+HRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAHvZ0Az8LLfKc72FXxS6llSrlB40
+Y/D4XTHcBMaiFDZClJvYJ+UmqrxYCJH5py/YRd9GSp5Ry+ghPWg/+eFsSGFtQFTI
+r+r70ux4lRYUk+adSqIiyg0xNGMqgOJKNNN5ixrCvN9k5BeeWyiKLbgcD+oG6sZM
+ImJ71ZY4QkH1MLb+
+-----END CERTIFICATE-----
12 years, 4 months
Branch 'cloudfsd' - doc/mgmt_manual.md
by Jeff Darcy
doc/mgmt_manual.md | 143 +++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 143 insertions(+)
New commits:
commit 519042c2751772e24192210cc40afd29e8d50f4a
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 16:30:21 2011 -0400
Added management manual.
diff --git a/doc/mgmt_manual.md b/doc/mgmt_manual.md
new file mode 100644
index 0000000..bfcbbe9
--- /dev/null
+++ b/doc/mgmt_manual.md
@@ -0,0 +1,143 @@
+= CloudFS Management Manual =
+
+The CloudFS management system consists of two parts: a very simple web-based
+management daemon called cloudfsd, and scripts to perform various discrete
+functions. The vast majority of the management functionality is in scripts
+that can be called either from cloudfsd or directly from the command line, but
+there is some functionality that is implemented directly in cloudfsd itself and
+there are a couple of command-line-only scripts. Because CloudFS is a
+distributed system, running cloudfsd on all servers is required even when using
+the CLI, because the scripts use the HTTP interface to perform actions on other
+nodes. If cloudfsd is not running, or if other nodes are unavailable, some
+scripts might appear to work but either configuration data or operational
+states might become inconsistent.
+
+This manual is divided into sections representing the major types of entities
+that can be managed via cloudfsd. Descriptions are given primarily in terms of
+the web interface, with CLI equivalents pointed out where necessary.
+
+== Main Page ==
+
+You can access the main web interface by connecting to port 8080 (default) of a
+node running cloudfsd. You will be presented options to do one of the
+following:
+
+ * Manage Servers
+ * Manage Volumes
+ * Manage Tenants
+
+== Managing Servers ==
+
+The simplest entities to manage are servers. If you click on "Manage Servers"
+you will be shown a list of servers that are currently members of the
+CloudFS/GlusterFS cluster including the node where cloudfsd is running. There
+is also a form that you can use to add a node, which will start up the
+GlusterFS daemons (but not cloudfsd) on that node and invoke GlusterFS to have
+it join the cluster.
+
+The CLI equivalents for these functions are "gluster peer status" to see
+servers, and "cfs_add_node" to add one.
+
+== Managing Volumes ==
+
+When you click on "Manage Volumes" you will be shown a page containing several
+sections:
+
+ * A list of current volumes. For each volume, there are links to
+ perform various actions on that volume, followed by a list of "bricks"
+ which are part of the volume.
+
+ * A form to add "bricks" (server directories) from which volumes may
+ be composed.
+
+ * A form to create a volume, including selection of bricks and other
+ parameters.
+
+In the existing-volume list, the following actions are possible:
+
+ * Manage tenant access to the volume (TBD).
+
+ * Start the volume.
+
+ * Stop the volume.
+
+ * Remove the volume.
+
+In the brick-addition form, you can add one or more bricks. To add a single
+brick, simply type in the server and path separated by a colon (e.g.
+"server1:/bricks/xyz"). To add multiple bricks, you can use various kinds of
+wild-card patterns within either the server or path part of the input:
+
+ * Character ranges, such as [a-j]
+
+ * Numeric ranges, such as [5-11]
+
+ * List of alternatives, such as {foo,bar}
+
+Wild-card expansion is done until no more expansions are possible, so a
+specification like server[1-3]:/{big,small}_bricks/volume[1-4] would expand
+to 24 bricks.
+
+In the volume-creation form, you can select which bricks will be part of a
+volume, plus the following parameters:
+
+ * Volume name
+
+ * Distribution type (plain/replicated/striped)
+
+ * Replica or stripe count
+
+Once these parameters have been selected, GlusterFS is invoked to create the
+"base" volume and then CloudFS takes additional configuration steps based on
+that.
+
+The CLI equivalents for these functions are:
+
+ * "cfs_list_volumes" to list volumes and associated bricks
+
+ * "cfs_add_directory" to add bricks/directories
+
+ * "cfs_add_volume" to create a new volume
+
+ * "cfs_rm_volume" to remove a volume
+
+ * "cfs_start_volume" and "cfs_stop_volume" to start/stop a volume
+
+== Managing Tenants ==
+
+When you click on "Manage Tenants" you will be shown a screen with two parts:
+
+ * A list of current tenants. For each tenant there is a link to
+ manage which volumes a tenant can access and a link to delete the
+ tenant.
+
+ * A form to add a new tenant.
+
+The tenant list also includes, for each tenant, the credentials the tenant uses
+to access CloudFS volumes. This is currently a plain-text password, which is
+Very Bad, but very soon it will be a certificate location instead. The form
+for adding a tenant lets you specify a name and credential
+
+The CLI equivalents for these functions are:
+
+ * "cfs_list_tenants" to list tenants (including which volumes are
+ enabled for each
+
+ * "cfs_add_tenant" to add a tenant
+
+ * "cfs_delete_tenant" to delete a tenant
+
+== Managing Tenant Access To Volumes ==
+
+There are two ways to manage the relationships between volumes and tenants:
+
+ * From the volume-management page, manage the list of tenants allowed
+ to use a particular volume (TBD).
+
+ * From the tenant-management page, manage the list of volumes to which
+ a particular tenant has access.
+
+In either case, the management is in the form of checkboxes which may be used
+to indicate which volume/tenant connections are valid, plus an "Update" button
+to have any changes take effect.
+
12 years, 4 months
doc/mgmt_manual.md
by Jeff Darcy
doc/mgmt_manual.md | 143 +++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 143 insertions(+)
New commits:
commit 519042c2751772e24192210cc40afd29e8d50f4a
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 16:30:21 2011 -0400
Added management manual.
diff --git a/doc/mgmt_manual.md b/doc/mgmt_manual.md
new file mode 100644
index 0000000..bfcbbe9
--- /dev/null
+++ b/doc/mgmt_manual.md
@@ -0,0 +1,143 @@
+= CloudFS Management Manual =
+
+The CloudFS management system consists of two parts: a very simple web-based
+management daemon called cloudfsd, and scripts to perform various discrete
+functions. The vast majority of the management functionality is in scripts
+that can be called either from cloudfsd or directly from the command line, but
+there is some functionality that is implemented directly in cloudfsd itself and
+there are a couple of command-line-only scripts. Because CloudFS is a
+distributed system, running cloudfsd on all servers is required even when using
+the CLI, because the scripts use the HTTP interface to perform actions on other
+nodes. If cloudfsd is not running, or if other nodes are unavailable, some
+scripts might appear to work but either configuration data or operational
+states might become inconsistent.
+
+This manual is divided into sections representing the major types of entities
+that can be managed via cloudfsd. Descriptions are given primarily in terms of
+the web interface, with CLI equivalents pointed out where necessary.
+
+== Main Page ==
+
+You can access the main web interface by connecting to port 8080 (default) of a
+node running cloudfsd. You will be presented options to do one of the
+following:
+
+ * Manage Servers
+ * Manage Volumes
+ * Manage Tenants
+
+== Managing Servers ==
+
+The simplest entities to manage are servers. If you click on "Manage Servers"
+you will be shown a list of servers that are currently members of the
+CloudFS/GlusterFS cluster including the node where cloudfsd is running. There
+is also a form that you can use to add a node, which will start up the
+GlusterFS daemons (but not cloudfsd) on that node and invoke GlusterFS to have
+it join the cluster.
+
+The CLI equivalents for these functions are "gluster peer status" to see
+servers, and "cfs_add_node" to add one.
+
+== Managing Volumes ==
+
+When you click on "Manage Volumes" you will be shown a page containing several
+sections:
+
+ * A list of current volumes. For each volume, there are links to
+ perform various actions on that volume, followed by a list of "bricks"
+ which are part of the volume.
+
+ * A form to add "bricks" (server directories) from which volumes may
+ be composed.
+
+ * A form to create a volume, including selection of bricks and other
+ parameters.
+
+In the existing-volume list, the following actions are possible:
+
+ * Manage tenant access to the volume (TBD).
+
+ * Start the volume.
+
+ * Stop the volume.
+
+ * Remove the volume.
+
+In the brick-addition form, you can add one or more bricks. To add a single
+brick, simply type in the server and path separated by a colon (e.g.
+"server1:/bricks/xyz"). To add multiple bricks, you can use various kinds of
+wild-card patterns within either the server or path part of the input:
+
+ * Character ranges, such as [a-j]
+
+ * Numeric ranges, such as [5-11]
+
+ * List of alternatives, such as {foo,bar}
+
+Wild-card expansion is done until no more expansions are possible, so a
+specification like server[1-3]:/{big,small}_bricks/volume[1-4] would expand
+to 24 bricks.
+
+In the volume-creation form, you can select which bricks will be part of a
+volume, plus the following parameters:
+
+ * Volume name
+
+ * Distribution type (plain/replicated/striped)
+
+ * Replica or stripe count
+
+Once these parameters have been selected, GlusterFS is invoked to create the
+"base" volume and then CloudFS takes additional configuration steps based on
+that.
+
+The CLI equivalents for these functions are:
+
+ * "cfs_list_volumes" to list volumes and associated bricks
+
+ * "cfs_add_directory" to add bricks/directories
+
+ * "cfs_add_volume" to create a new volume
+
+ * "cfs_rm_volume" to remove a volume
+
+ * "cfs_start_volume" and "cfs_stop_volume" to start/stop a volume
+
+== Managing Tenants ==
+
+When you click on "Manage Tenants" you will be shown a screen with two parts:
+
+ * A list of current tenants. For each tenant there is a link to
+ manage which volumes a tenant can access and a link to delete the
+ tenant.
+
+ * A form to add a new tenant.
+
+The tenant list also includes, for each tenant, the credentials the tenant uses
+to access CloudFS volumes. This is currently a plain-text password, which is
+Very Bad, but very soon it will be a certificate location instead. The form
+for adding a tenant lets you specify a name and credential
+
+The CLI equivalents for these functions are:
+
+ * "cfs_list_tenants" to list tenants (including which volumes are
+ enabled for each
+
+ * "cfs_add_tenant" to add a tenant
+
+ * "cfs_delete_tenant" to delete a tenant
+
+== Managing Tenant Access To Volumes ==
+
+There are two ways to manage the relationships between volumes and tenants:
+
+ * From the volume-management page, manage the list of tenants allowed
+ to use a particular volume (TBD).
+
+ * From the tenant-management page, manage the list of volumes to which
+ a particular tenant has access.
+
+In either case, the management is in the form of checkboxes which may be used
+to indicate which volume/tenant connections are valid, plus an "Update" button
+to have any changes take effect.
+
12 years, 4 months
16 commits - scripts/bottle.py scripts/cfs_add_directory.py scripts/cfs_add_node.py scripts/cfs_add_tenant.py scripts/cfs_add_volume.py scripts/cfs_delete_tenant.py scripts/cfs_enable_tenant.py scripts/cfs_list_tenants.py scripts/cfs_list_vols.py scripts/cfs_mount.py scripts/cfs_paths.py scripts/cfs_rm_volume.py scripts/cfs_start_volume.py scripts/cfs_stop_volume.py scripts/cfs_utils.py scripts/cloudfsd.py scripts/styles scripts/views scripts/volfilter.py scripts/volmap.py ToDo xlators/encryption
by Jeff Darcy
ToDo | 20
scripts/bottle.py | 1934 +++++++++++++++++++++++++++++++++++
scripts/cfs_add_directory.py | 128 ++
scripts/cfs_add_node.py | 58 +
scripts/cfs_add_tenant.py | 55
scripts/cfs_add_volume.py | 65 +
scripts/cfs_delete_tenant.py | 49
scripts/cfs_enable_tenant.py | 73 +
scripts/cfs_list_tenants.py | 14
scripts/cfs_list_vols.py | 16
scripts/cfs_mount.py | 72 +
scripts/cfs_paths.py | 13
scripts/cfs_rm_volume.py | 55
scripts/cfs_start_volume.py | 199 +++
scripts/cfs_stop_volume.py | 74 +
scripts/cfs_utils.py | 170 +++
scripts/cloudfsd.py | 160 ++
scripts/styles/cfgmain.css | 43
scripts/styles/provlist.css | 13
scripts/views/add_dir_done.html | 15
scripts/views/add_node_done.html | 7
scripts/views/add_vol_done.html | 15
scripts/views/add_vol_fail.html | 7
scripts/views/cfgmain.html | 16
scripts/views/cluster.html | 23
scripts/views/rm_vol_done.html | 15
scripts/views/start_done.html | 15
scripts/views/stop_done.html | 15
scripts/views/tenant_volumes.html | 25
scripts/views/tenants.html | 33
scripts/views/tn_act_done.html | 15
scripts/views/volumes.html | 69 +
scripts/volfilter.py | 51
scripts/volmap.py | 26
xlators/encryption/crypt/src/crypt.c | 851 +++------------
xlators/encryption/crypt/src/crypt.h | 43
36 files changed, 3737 insertions(+), 715 deletions(-)
New commits:
commit 04d9caaab692c3ca819e1b3038099c81eee058a5
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 15:42:11 2011 -0400
Added CLI programs to list volumes/tenants.
diff --git a/ToDo b/ToDo
index 696245b..2fbbcb9 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,5 @@
= High Priority =
-Add CLI equivalents to list nodes/volumes/tenants
+(nothing left)
= Medium Priority =
SSL
diff --git a/scripts/cfs_list_tenants.py b/scripts/cfs_list_tenants.py
new file mode 100755
index 0000000..3446418
--- /dev/null
+++ b/scripts/cfs_list_tenants.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+import cfs_utils
+
+db_obj = cfs_utils.open_db()
+
+for tenant in [t for t in db_obj.keys() if t.startswith("tv_")]:
+ print "%s:" % t[3:]
+ vol_list = db_obj[tenant]
+ if vol_list == "":
+ print " (no volumes enabled)"
+ else:
+ for vol in vol_list.split(","):
+ print " %s enabled" % vol
diff --git a/scripts/cfs_list_vols.py b/scripts/cfs_list_vols.py
new file mode 100755
index 0000000..f26f3b8
--- /dev/null
+++ b/scripts/cfs_list_vols.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+import cfs_utils
+
+brick_list = cfs_utils.get_bricks()
+vol_list = brick_list.keys()[:]
+vol_list.sort()
+db_obj = cfs_utils.open_db()
+
+for vol in vol_list:
+ if db_obj.has_key("vt_"+vol):
+ print "Volume %s (CloudFS):" % vol
+ else:
+ print "Volume %s (GlusterFS):" % vol
+ for brick in brick_list[vol]:
+ print " %s" % brick
commit 6a1ba723db3dee6496350f5a9e6f59741645ee6b
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 15:26:18 2011 -0400
Fixed CLI versions of most functions.
diff --git a/ToDo b/ToDo
index fec4353..696245b 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,5 @@
= High Priority =
-Make sure CLI equivalents work
+Add CLI equivalents to list nodes/volumes/tenants
= Medium Priority =
SSL
diff --git a/scripts/cfs_add_directory.py b/scripts/cfs_add_directory.py
old mode 100644
new mode 100755
index 508f501..1c9f0b9
--- a/scripts/cfs_add_directory.py
+++ b/scripts/cfs_add_directory.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -78,12 +79,16 @@ def add_local (path):
xp_list = []
paths_by_node[k[3:]] = set(xp_list)
# Add the user's paths.
+ members = cfs_utils.get_members()
for p in expand(path):
parts = string.split(p,":")
if len(parts) != 2:
- return "add_local(%s) rejected %s on %s" %(
+ return "add_local(%s) rejected %s on %s (no node)" %(
path, p, socket.gethostname())
node, dir = parts
+ if node not in members:
+ return "add_local(%s) rejected %s on %s (bad node)" %(
+ path, p, socket.gethostname())
if paths_by_node.has_key(node):
paths_by_node[node].add(dir)
else:
@@ -108,13 +113,16 @@ def run_common (path):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("add_dir_done.html",path=path,blob=blob);
+ return blob
def run_www ():
path = request.forms.get("path")
- return run_common(path)
+ blob = run_common(path)
+ return template("add_dir_done.html",path=path,blob=blob);
if __name__ == "__main__":
path = sys.argv[1]
- #run_common(tn_name,tn_pw)
- print add_local(path)
+ blob = run_common(path)
+ print "Directory %s added." % path
+ cfs_utils.print_blob(blob)
+
diff --git a/scripts/cfs_add_node.py b/scripts/cfs_add_node.py
old mode 100644
new mode 100755
index 497db36..b9bb7c4
--- a/scripts/cfs_add_node.py
+++ b/scripts/cfs_add_node.py
@@ -50,4 +50,9 @@ def run_www ():
return run_common(node_name)
if __name__ == "__main__":
- run_common(sys.argv[1])
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s node_name_or_addr" % sys.argv[0]
+ sys.exit(1)
+ name = sys.argv[1]
+ run_common(name)
+ print "Node %s added." % name
diff --git a/scripts/cfs_add_tenant.py b/scripts/cfs_add_tenant.py
old mode 100644
new mode 100755
index d36837e..d460367
--- a/scripts/cfs_add_tenant.py
+++ b/scripts/cfs_add_tenant.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -34,15 +35,21 @@ def run_common (tn_name, tn_pw):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("tn_act_done.html",name=tn_name,action="added",
- blob=blob);
+ return blob
def run_www ():
tn_name = request.forms.get("tn_name")
tn_pw = request.forms.get("tn_pw")
- return run_common(tn_name,tn_pw)
+ blob = run_common(tn_name,tn_pw)
+ return template("tn_act_done.html",name=tn_name,action="added",
+ blob=blob);
if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ print >> sys.stderr, "Usage: %s name password" % sys.argv[0]
+ sys.exit(1)
tn_name = sys.argv[1]
tn_pw = sys.argv[2]
- run_common(tn_name,tn_pw)
+ blob = run_common(tn_name,tn_pw)
+ print "Tenant %s added." % tn_name
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_add_volume.py b/scripts/cfs_add_volume.py
old mode 100644
new mode 100755
index 4af663b..32bdc60
--- a/scripts/cfs_add_volume.py
+++ b/scripts/cfs_add_volume.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -23,10 +24,10 @@ def run_common (vname, vtype, vcount, bricks):
if vtype != "plain":
cmd = "%s %s %s" % (cmd, vtype, vcount)
cmd = "%s %s" % (cmd, string.join(bricks))
+ print cmd
sts = cfs_utils.run_cmd("gluster",cmd).wait()
if sts:
- return template("add_vol_fail.html", name=vname,
- action="gluster", status=sts)
+ return [["gluster",["command failed with %d"%sts]]]
blob = []
for node in cfs_utils.get_nodes_for_vol(vname):
scratch = [node,[]]
@@ -40,7 +41,7 @@ def run_common (vname, vtype, vcount, bricks):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("add_vol_done.html",name=vname,blob=blob);
+ return blob
def run_www ():
volume_id = request.forms.get("VOLUMEID")
@@ -50,12 +51,15 @@ def run_www ():
for prop in request.forms.iterkeys():
if prop.startswith("VOLUME_"):
brick_list.append(prop[7:])
- return run_common(volume_id,volume_type,replica_or_stripe_count,
+ blob = run_common(volume_id,volume_type,replica_or_stripe_count,
brick_list)
+ return template("add_vol_done.html",name=volume_id,blob=blob);
if __name__ == "__main__":
volume_id = sys.argv[1]
volume_type = sys.argv[2]
replica_or_stripe_count = sys.argv[3]
brick_list = sys.argv[4:]
- run_common(volume_id,volume_type,replica_or_stripe_count,brick_list)
+ blob = run_common(volume_id,volume_type,replica_or_stripe_count,brick_list)
+ print "Volume %s added." % volume_id
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_delete_tenant.py b/scripts/cfs_delete_tenant.py
old mode 100644
new mode 100755
index ebbd35d..92c11d7
--- a/scripts/cfs_delete_tenant.py
+++ b/scripts/cfs_delete_tenant.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -31,12 +32,18 @@ def run_common (tn_name):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("tn_act_done.html",name=tn_name,action="deleted",
- blob=blob);
+ return blob
def run_www (tn_name):
- return run_common(tn_name)
+ blob = run_common(tn_name)
+ return template("tn_act_done.html",name=tn_name,action="deleted",
+ blob=blob);
if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s name" % sys.argv[0]
+ sys.exit(1)
tn_name = sys.argv[1]
- run_common(tn_name)
+ blob = run_common(tn_name)
+ print "Tenant %s deleted." % tn_name
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_enable_tenant.py b/scripts/cfs_enable_tenant.py
old mode 100644
new mode 100755
index 3855546..146c785
--- a/scripts/cfs_enable_tenant.py
+++ b/scripts/cfs_enable_tenant.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -48,8 +49,7 @@ def run_common (tn_name, vol_list):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("tn_act_done.html",name=tn_name,action="enabled",
- blob=blob)
+ return blob
def run_www (tn_name):
vol_list = []
@@ -57,8 +57,17 @@ def run_www (tn_name):
print prop
if prop.startswith("VOLUME_"):
vol_list.append(prop[7:])
- return run_common(tn_name,vol_list)
+ blob = run_common(tn_name,vol_list)
+ return template("tn_act_done.html",name=tn_name,action="enabled",
+ blob=blob)
if __name__ == "__main__":
- run_common(sys.argv[1],sys.argv[2:])
+ if len(sys.argv) < 2:
+ print >> sys.stderr, "Usage: %s name [volume...]" % sys.argv[0]
+ sys.exit(1)
+ name = sys.argv[1]
+ vols = sys.argv[2:]
+ blob = run_common(name,vols)
+ print "Volumes %s enabled for %s." % (string.join(vols,","), name)
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_paths.py b/scripts/cfs_paths.py
index c497fe7..bf47ebd 100644
--- a/scripts/cfs_paths.py
+++ b/scripts/cfs_paths.py
@@ -8,6 +8,6 @@ log_dir = "/var/log/cloudfs"
pid_dir = "/var/run/cloudfs"
idle_subdir = os.path.join(pid_dir,".idle_ports")
used_subdir = os.path.join(pid_dir,".used_ports")
-volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
+volfile_re = re.compile("(?P<vol>[^.]+)\.(?P<node>.+)\.(?P<path>[^.]+)\.vol")
CLOUDFSD_PORT = 8080
diff --git a/scripts/cfs_rm_volume.py b/scripts/cfs_rm_volume.py
old mode 100644
new mode 100755
index c871f65..bfab9b2
--- a/scripts/cfs_rm_volume.py
+++ b/scripts/cfs_rm_volume.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -17,7 +18,7 @@ def rm_local (vname):
db_obj["vt_"+vname] = ""
return "rm_local(%s) OK on %s" % (vname, socket.gethostname())
-def run_www (vname):
+def run_common (vname):
# TBD: all sorts of input-validity checking
nodes_for_vol = cfs_utils.get_nodes_for_vol(vname)
cmd = "volume delete %s" % vname
@@ -37,9 +38,18 @@ def run_www (vname):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
+ return blob
+
+def run_www (vname):
+ blob = run_common(vname)
return template("rm_vol_done.html",name=vname,blob=blob);
if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s volume_name" % sys.argv[0]
+ sys.exit(0)
name = sys.argv[1]
- run_www(name)
+ blob = run_common(name)
+ print "Volume %s removed." % name
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
old mode 100644
new mode 100755
index 031735a..11c992b
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import glob
import os
@@ -54,7 +55,7 @@ def scan_gfs_volfiles(vol_name):
for vf in glob.iglob(my_glob):
m = cfs_paths.volfile_re.match(os.path.basename(vf))
if m:
- this_host = m.groups(1)[0]
+ this_host = m.group("node")
this_addr = socket.getaddrinfo(this_host, 0)[0][4][0]
for addr in my_addrs:
if this_addr == addr[4][0]:
@@ -167,7 +168,7 @@ def start_local (vol_name):
return "start_local(%s) returned %d on %s\n" % (
vol_name, retcode, socket.gethostname())
-def run_www (vol_name):
+def run_common (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
for node in node_list:
@@ -181,7 +182,18 @@ def run_www (vol_name):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
+ return blob
+
+def run_www (vol_name):
+ blob = run_common(vol_name)
return template("start_done.html",name=vol_name,blob=blob)
if __name__ == "__main__":
- run_www(sys.argv[1])
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s volume_name" % sys.argv[0]
+ sys.exit(1)
+ name = sys.argv[1]
+ blob = run_common(name)
+ print "Volume %s started." % name
+ cfs_utils.print_blob(blob)
+
diff --git a/scripts/cfs_stop_volume.py b/scripts/cfs_stop_volume.py
old mode 100644
new mode 100755
index 0851d6b..b93b438
--- a/scripts/cfs_stop_volume.py
+++ b/scripts/cfs_stop_volume.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import fileinput
import glob
@@ -42,7 +43,7 @@ def stop_local (vol_name):
vol_name, retcode, socket.gethostname())
-def run_www (vol_name):
+def run_common (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
for node in node_list:
@@ -56,7 +57,18 @@ def run_www (vol_name):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
+ return blob
+
+def run_www (vol_name):
+ blob = run_common(vol_name)
return template("stop_done.html",name=vol_name,blob=blob)
if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s volume_name" % sys.argv[0]
+ sys.exit(1)
+ name = sys.argv[1]
+ blob = run_common(name)
+ print "Volume %s stopped." % name
+ cfs_utils.print_blob(blob)
run_www(sys.argv[1])
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index 9655b58..39a36e0 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -151,7 +151,7 @@ def get_nodes_for_vol (vol_name):
for vf in glob.iglob(my_glob):
m = cfs_paths.volfile_re.match(os.path.basename(vf))
if m:
- node_list.add(m.groups(1)[0])
+ node_list.add(m.group("node"))
return node_list
# Open our configuration database.
@@ -161,3 +161,10 @@ def get_nodes_for_vol (vol_name):
def open_db ():
db_path = os.path.join(cfs_paths.info_dir,"config.db")
return dbm.open(db_path,"c",0600)
+
+# Print a "blob" of [node, [line1, line2]] tuples/lists for CLI/debugging.
+def print_blob (blob):
+ for node, text in blob:
+ print "= %s =" % node
+ for line in text:
+ print line
commit dae24a26c655bdb57a38f95f72d7b92ccdb88b6d
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 14:12:51 2011 -0400
Interface to remove volumes.
diff --git a/ToDo b/ToDo
index 24fa991..fec4353 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,4 @@
= High Priority =
-Interface to remove volumes
Make sure CLI equivalents work
= Medium Priority =
diff --git a/scripts/cfs_rm_volume.py b/scripts/cfs_rm_volume.py
new file mode 100644
index 0000000..c871f65
--- /dev/null
+++ b/scripts/cfs_rm_volume.py
@@ -0,0 +1,45 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib
+import urllib2
+
+from bottle import request, template
+import cfs_paths
+import cfs_utils
+
+def rm_local (vname):
+ db_obj = cfs_utils.open_db()
+ # Can't actually delete, but clearing is functionally equivalent.
+ db_obj["vt_"+vname] = ""
+ return "rm_local(%s) OK on %s" % (vname, socket.gethostname())
+
+def run_www (vname):
+ # TBD: all sorts of input-validity checking
+ nodes_for_vol = cfs_utils.get_nodes_for_vol(vname)
+ cmd = "volume delete %s" % vname
+ kid = cfs_utils.run_cmd("gluster",cmd)
+ blob = [["gluster",kid.communicate("y\n")]]
+ sts = kid.wait()
+ if sts:
+ return template("rm_vol_done.html", name=vname, blob=blob)
+ for node in nodes_for_vol:
+ scratch = [node,[]]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [rm_local(vname)]
+ else:
+ url = "http://%s:%d/volumes/%s/rm_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vname)
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("rm_vol_done.html",name=vname,blob=blob);
+
+if __name__ == "__main__":
+ name = sys.argv[1]
+ run_www(name)
+
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 2cc7259..8255a44 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -13,6 +13,7 @@ import cfs_utils
import cfs_add_node
import cfs_add_directory
import cfs_add_volume
+import cfs_rm_volume
import cfs_start_volume
import cfs_stop_volume
import cfs_add_tenant
@@ -68,6 +69,14 @@ def add_vol_local ():
vname = request.forms.get("vname")
return cfs_add_volume.add_local(vname)
+@route("volumes/:name/remove")
+def rm_volume (name):
+ return cfs_rm_volume.run_www(name)
+
+@route("/volumes/:name/rm_local")
+def rm_vol_local (name):
+ return cfs_rm_volume.rm_local(name)
+
@route("/volumes/:vol_name/start")
def start_volume(vol_name):
return cfs_start_volume.run_www(vol_name)
diff --git a/scripts/views/rm_vol_done.html b/scripts/views/rm_vol_done.html
new file mode 100644
index 0000000..cede10d
--- /dev/null
+++ b/scripts/views/rm_vol_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p>Volume {{name}} deleted.</p>
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
index 7595052..cc54f92 100644
--- a/scripts/views/volumes.html
+++ b/scripts/views/volumes.html
@@ -14,7 +14,8 @@
<p><b>{{vol_name}}</b>
<a href="/volumes/{{vol_name}}/tenants">tenants</a>
<a href="/volumes/{{vol_name}}/start">start</a>
- <a href="/volumes/{{vol_name}}/stop">stop</a></p>
+ <a href="/volumes/{{vol_name}}/stop">stop</a>
+ <a href="/volumes/{{vol_name}}/remove">remove</a></p>
<ul>
%for brick in brick_list:
<li>{{brick}}</li>
commit 81fc70f17bddfa72c77487351927aae382075a6c
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 13:35:26 2011 -0400
Move log/pid/port directories.
diff --git a/ToDo b/ToDo
index db4cf21..24fa991 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,4 @@
= High Priority =
-Create log/pid directories, move port links to /var/run
Interface to remove volumes
Make sure CLI equivalents work
diff --git a/scripts/cfs_paths.py b/scripts/cfs_paths.py
index 27e11b0..c497fe7 100644
--- a/scripts/cfs_paths.py
+++ b/scripts/cfs_paths.py
@@ -4,10 +4,10 @@ import os
gfs_dir = "/var/lib/glusterd"
info_dir = "/var/lib/cloudfs"
-idle_subdir = os.path.join(info_dir,".idle_ports")
-used_subdir = os.path.join(info_dir,".used_ports")
log_dir = "/var/log/cloudfs"
pid_dir = "/var/run/cloudfs"
+idle_subdir = os.path.join(pid_dir,".idle_ports")
+used_subdir = os.path.join(pid_dir,".used_ports")
volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
CLOUDFSD_PORT = 8080
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
index c0b8ce7..031735a 100644
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -17,12 +17,18 @@ import cfs_utils
# Make sure the volume directory exists and has the right stuff in it.
def check_volume_directory(vol_name):
if not os.path.exists(cfs_paths.info_dir):
- os.mkdir(cfs_paths.info_dir)
- os.mkdir(cfs_paths.idle_subdir)
+ os.mkdir(cfs_paths.info_dir,0700)
+ if not os.path.exists(cfs_paths.log_dir):
+ os.mkdir(cfs_paths.log_dir,0700)
+ if not os.path.exists(cfs_paths.pid_dir):
+ os.mkdir(cfs_paths.pid_dir,0700)
+ if not os.path.exists(cfs_paths.idle_subdir):
+ os.mkdir(cfs_paths.idle_subdir,0700)
for i in range(24010, 24030):
fp = open("%s/%d" % (cfs_paths.idle_subdir, i), "w")
fp.close()
- os.mkdir(cfs_paths.used_subdir)
+ if not os.path.exists(cfs_paths.used_subdir):
+ os.mkdir(cfs_paths.used_subdir,0700)
vol_dir = "%s/%s" % (cfs_paths.info_dir, vol_name)
if not os.path.exists(vol_dir):
commit 70633f02811345a63e67f6767b1be444248678a3
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 12:59:00 2011 -0400
Allow user to add non-mountpoint directories.
diff --git a/ToDo b/ToDo
index e51d2bd..db4cf21 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,4 @@
= High Priority =
-Use arbitrary directories, not just mountpoints
Create log/pid directories, move port links to /var/run
Interface to remove volumes
Make sure CLI equivalents work
@@ -7,6 +6,7 @@ Make sure CLI equivalents work
= Medium Priority =
SSL
Sanitize volume/tenant names etc. to avoid XSS/injection
+Add/document code to generate brick list for volume creation
Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
Add "bread crumbs" everywhere for navigation
Interface to remove servers
@@ -16,6 +16,7 @@ Sanity checking for volume configs
Handle zero-length tenant list without alice/bob filler
= Low Priority =
+Interface to remove non-mountpoint directories
Import GlusterFS volume to CloudFS
Eliminate clean_and_run in favor of cfs_utils.run_cmd
diff --git a/scripts/cfs_add_directory.py b/scripts/cfs_add_directory.py
new file mode 100644
index 0000000..508f501
--- /dev/null
+++ b/scripts/cfs_add_directory.py
@@ -0,0 +1,120 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib
+import urllib2
+
+from bottle import request, template
+import cfs_utils
+import cfs_paths
+
+c_range_re = re.compile("\[(?P<first>.)-(?P<last>.)\\]")
+n_range_re = re.compile("\[(?P<first>[1-9][0-9]*)-(?P<last>[1-9][0-9]*)\\]")
+a_range_re = re.compile("\{(?P<stuff>[^}]+)\}")
+
+def expand (path):
+ old_paths = [path]
+ # Keep going until there are no more expansions.
+ while True:
+ new_paths = []
+ n_subs = 0
+ for p in old_paths:
+ m = c_range_re.search(p)
+ if m:
+ pfx = p[:m.start()]
+ sfx = p[m.end():]
+ first = m.group("first")
+ last = m.group("last")
+ cur = first
+ while True:
+ munged = pfx + cur + sfx
+ new_paths.append(munged)
+ if cur == last:
+ break
+ cur = chr(ord(cur)+1)
+ n_subs += 1
+ continue
+ m = n_range_re.search(p)
+ if m:
+ pfx = p[:m.start()]
+ sfx = p[m.end():]
+ first = int(m.group("first"))
+ last = int(m.group("last"))
+ cur = first
+ while True:
+ munged = "%s%d%s" % (pfx, cur, sfx)
+ new_paths.append(munged)
+ if cur == last:
+ break
+ cur += 1
+ n_subs += 1
+ continue
+ m = a_range_re.search(p)
+ if m:
+ pfx = p[:m.start()]
+ sfx = p[m.end():]
+ for alt in m.group("stuff").split(","):
+ munged = pfx + alt + sfx
+ new_paths.append(munged)
+ n_subs += 1
+ continue
+ new_paths.append(p)
+ if not n_subs:
+ return new_paths
+ old_paths = new_paths
+ # Go back and do it again.
+
+def add_local (path):
+ # TBD: all sorts of input-validity checking
+ db_obj = cfs_utils.open_db()
+ # Get the current list of extra paths.
+ paths_by_node = {}
+ for node in [k for k in db_obj.keys() if k.startswith("xp_")]:
+ xp_list = db_obj[k].split(",")
+ if xp_list == [""]:
+ xp_list = []
+ paths_by_node[k[3:]] = set(xp_list)
+ # Add the user's paths.
+ for p in expand(path):
+ parts = string.split(p,":")
+ if len(parts) != 2:
+ return "add_local(%s) rejected %s on %s" %(
+ path, p, socket.gethostname())
+ node, dir = parts
+ if paths_by_node.has_key(node):
+ paths_by_node[node].add(dir)
+ else:
+ paths_by_node[node] = set([dir])
+ # Store back the results.
+ for key, value in paths_by_node.iteritems():
+ db_obj["xp_"+key] = string.join(value,",")
+ return "add_local(%s) OK on %s" % (path, socket.gethostname())
+
+def run_common (path):
+ node_list = cfs_utils.get_members()
+ blob = []
+ for node in node_list:
+ scratch = [node, []]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [add_local(path)]
+ else:
+ url = "http://%s:%d/volumes/add_dir_local" % (
+ node, cfs_paths.CLOUDFSD_PORT)
+ data = urllib.urlencode([("path",path)])
+ url_obj = urllib2.urlopen(url,data=data)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("add_dir_done.html",path=path,blob=blob);
+
+def run_www ():
+ path = request.forms.get("path")
+ return run_common(path)
+
+if __name__ == "__main__":
+ path = sys.argv[1]
+ #run_common(tn_name,tn_pw)
+ print add_local(path)
diff --git a/scripts/cfs_add_volume.py b/scripts/cfs_add_volume.py
index 1835b51..4af663b 100644
--- a/scripts/cfs_add_volume.py
+++ b/scripts/cfs_add_volume.py
@@ -1,14 +1,22 @@
import os
import re
+import socket
import string
import sys
+import urllib
+import urllib2
from bottle import request, template
+import cfs_paths
import cfs_utils
-def run_common (vname, vtype, vcount, bricks):
+def add_local (vname):
+ db_obj = cfs_utils.open_db()
+ db_obj["vt_"+vname] = ""
+ return "add_local(%s) OK on %s" % (vname, socket.gethostname())
+def run_common (vname, vtype, vcount, bricks):
# TBD: all sorts of input-validity checking
# TBD: construct sane brick list
cmd = "volume create %s" % vname
@@ -19,18 +27,20 @@ def run_common (vname, vtype, vcount, bricks):
if sts:
return template("add_vol_fail.html", name=vname,
action="gluster", status=sts)
-
- db_obj = cfs_utils.open_db()
- db_obj["vt_"+vname] = ""
-
- # TBD: generating the cloudfs volfiles (client+server) and starting the
- # glusterfsd daemons should be part of a separate "start" action, so
- # that it's done with a tenant list that's as up-to-date as possible.
- # It should also be done using the cloudfsd fetch/map infrastructure so
- # that cfs_mount.py/mount.cloudfs can actually work. Doing it as a
- # hack on top of Gluster's fetching/portmapping mess, even if it seems
- # to work in some bogus test environment, is a total waste of time.
- return template("add_vol_done.html",name=vname);
+ blob = []
+ for node in cfs_utils.get_nodes_for_vol(vname):
+ scratch = [node,[]]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [add_local(vname)]
+ else:
+ url = "http://%s:%d/volumes/add_vol_local" % (
+ node, cfs_paths.CLOUDFSD_PORT)
+ data = urllib.urlencode([("vname",vname)])
+ url_obj = urllib2.urlopen(url,data=data)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("add_vol_done.html",name=vname,blob=blob);
def run_www ():
volume_id = request.forms.get("VOLUMEID")
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
index 26f0980..c0b8ce7 100644
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -122,6 +122,9 @@ def create_tenant_dirs(vol_file):
for opt_dir_line in opt_dir_lines:
tokens = re.split(' ', string.lstrip(opt_dir_line))
path = string.rstrip(tokens[2])
+ ppath = os.path.dirname(path)
+ if not os.path.exists(ppath):
+ os.mkdir(ppath)
if not os.path.exists(path):
os.mkdir(path)
opt_dir_lines.close()
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index 94be076..9655b58 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -128,8 +128,19 @@ def get_mounts (brick_list):
if bits[4] not in good_fs_types:
continue
mount = "%s:%s" % (node_ip, bits[2])
- scratch.append((bits[2],dir_in_use(brick_list,mount)))
+ inuse = dir_in_use(brick_list,mount)
+ scratch.append((bits[2],inuse))
volumes_on_nodes[node_ip] = scratch
+ # Add user-defined non-mountpoint directories.
+ db_obj = open_db()
+ for node in [k[3:] for k in db_obj.keys() if k.startswith("xp_")]:
+ dirs = db_obj["xp_"+node].split(",")
+ if dirs == [""]:
+ dirs = []
+ for dir in dirs:
+ mount = "%s:%s" % (node, dir)
+ inuse = dir_in_use(brick_list,mount)
+ volumes_on_nodes[node].append((dir,inuse))
return volumes_on_nodes
# Get the list of nodes that are serving (any part of) a specific volume.
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 99e715a..2cc7259 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -11,6 +11,7 @@ import cfs_utils
import volmap
import cfs_utils
import cfs_add_node
+import cfs_add_directory
import cfs_add_volume
import cfs_start_volume
import cfs_stop_volume
@@ -49,10 +50,24 @@ def show_volumes():
# TBD: allow adding arbitrary directories instead of just mountpoints
return dict(bricks=brick_list,mounts=mount_list)
+@post("/volumes/add_directory")
+def add_directory ():
+ return cfs_add_directory.run_www()
+
+@post("/volumes/add_dir_local")
+def add_dir_local ():
+ path = request.forms.get("path")
+ return cfs_add_directory.add_local(path)
+
@post("/volumes/add_volume")
def add_volume():
return cfs_add_volume.run_www()
+@post("/volumes/add_vol_local")
+def add_vol_local ():
+ vname = request.forms.get("vname")
+ return cfs_add_volume.add_local(vname)
+
@route("/volumes/:vol_name/start")
def start_volume(vol_name):
return cfs_start_volume.run_www(vol_name)
diff --git a/scripts/styles/cfgmain.css b/scripts/styles/cfgmain.css
index dc79620..82043e8 100644
--- a/scripts/styles/cfgmain.css
+++ b/scripts/styles/cfgmain.css
@@ -25,16 +25,19 @@ body {
table {
border: 2px black solid;
+ background-color: #cccccc;
}
th {
+ text-align: left;
border: 1px black solid;
- padding: 3px;
+ padding: 5px;
background-color: #b0b0ff;
}
td {
+ text-align: left;
border: 1px black solid;
- padding: 2px;
+ padding: 5px;
background-color: #d0d0ff;
}
diff --git a/scripts/styles/provlist.css b/scripts/styles/provlist.css
index 70132a4..2bd771f 100644
--- a/scripts/styles/provlist.css
+++ b/scripts/styles/provlist.css
@@ -1,25 +1,9 @@
-.header {
- float: left;
- width: 100%;
- background-color: #f4f4f4;
-}
-
-.wrapper {
- position: relative;
- float: left;
- left: 0.00%;
- width: 100.00%;
- background-color: #cccccc;
-}
-
tr.d0 td {
background-color: #CC9999;
- color: black;
}
tr.d1 td {
background-color: #9999CC;
- color: black;
}
.footer {
diff --git a/scripts/views/add_dir_done.html b/scripts/views/add_dir_done.html
new file mode 100644
index 0000000..122e4e5
--- /dev/null
+++ b/scripts/views/add_dir_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Directory {{path}} added.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/add_vol_done.html b/scripts/views/add_vol_done.html
index d6ddf58..6044fa6 100644
--- a/scripts/views/add_vol_done.html
+++ b/scripts/views/add_vol_done.html
@@ -1,6 +1,14 @@
<html><head>
<meta http-equiv="pragma" content="no-cache">
</head><body>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
<p>Volume {{name}} created.</p>
<p><a href="/volumes">Back to volume configuration</a></p>
<p><a href="/cfgmain">Back to main menu</a></p>
diff --git a/scripts/views/tenants.html b/scripts/views/tenants.html
index c55dc0a..6b5d069 100644
--- a/scripts/views/tenants.html
+++ b/scripts/views/tenants.html
@@ -10,7 +10,7 @@
</div>
<div class="content">
<h2>Existing Tenants:</h2>
- <table cellspacing="0">
+ <table>
<tr><th>Name</th><th>Password</th><th></th></tr>
%tn_list = tenants.keys()
%tn_list.sort()
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
index 489778f..7595052 100644
--- a/scripts/views/volumes.html
+++ b/scripts/views/volumes.html
@@ -22,10 +22,21 @@
</ul>
%end
<hr>
-<h2>Provision a Volume From Available Bricks:</h2>
+<h2>Add Directories</h2>
+<p>Add non-mountpoint directories here. The format is host:path, with wildcard support in the following forms:</p>
+<ul>
+<li>character ranges, e.g. server1:/bricks/brick1</li>
+<li>number ranges, e.g. server1:/bricks/brick[9-10]</li>
+<li>alternatives, e.g. {alpha,bravo}:/bricks/brick20</li>
+</ul>
+<p>Expansions are attempted in the order shown, and the results are stored persistently as alternatives to the server mount points that are used as a default.</p>
+<form method="post" action="/volumes/add_directory">
+ <input type="text" name="path" size="40" />
+ <input type="submit" value="Add" />
+</form>
+<hr />
+<h2>Provision a Volume From Available Directories:</h2>
<form method="post" name="provision" action="/volumes/add_volume">
-<div class="header"><hr></div>
-<div class="wrapper">
<table>
%color_index = 0
%for node, mount_list in mounts.iteritems():
@@ -45,7 +56,6 @@
</tr>
%end
</table>
-</div>
<div class="footer"><hr></div>
Volume Type: <input type="radio" name="TYPE" value="plain" checked />Plain
<input type="radio" name="TYPE" value="replica" />Replicated
commit 399e471f61b471d0531f8b1cf06502304ec035a3
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed May 11 17:13:54 2011 -0400
Make volume-start code use same DB as tenant add/remove/enable.
diff --git a/ToDo b/ToDo
index 8ed0469..e51d2bd 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,4 @@
= High Priority =
-Use real tenant list for start/mount (no default, handle zero)
Use arbitrary directories, not just mountpoints
Create log/pid directories, move port links to /var/run
Interface to remove volumes
diff --git a/scripts/cfs_enable_tenant.py b/scripts/cfs_enable_tenant.py
index 26ca099..3855546 100644
--- a/scripts/cfs_enable_tenant.py
+++ b/scripts/cfs_enable_tenant.py
@@ -17,13 +17,17 @@ def enable_local (tn_name, vol_list):
db_obj = cfs_utils.open_db()
db_obj["tv_"+tn_name] = vol_list
for vol in [v[3:] for v in db_obj.keys() if v.startswith("vt_")]:
- vt_list = set(db_obj["vt_"+vol].split(","))
+ vt_list = db_obj["vt_"+vol].split(",")
+ if vt_list == ['']:
+ # This is one of the stupider split() behaviors.
+ vt_list = []
+ vt_list = set(vt_list)
if vol in vol_list.split(","):
print "enabling %s" % vol
- vt_list.add(vol)
+ vt_list.add(tn_name)
else:
print "disabling %s" % vol
- vt_list.discard(vol)
+ vt_list.discard(tn_name)
db_obj["vt_"+vol] = string.join(vt_list,",")
return "Volumes enabled for %s on %s" % (tn_name, socket.gethostname())
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
index 4c4e7bb..26f0980 100644
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -18,12 +18,6 @@ import cfs_utils
def check_volume_directory(vol_name):
if not os.path.exists(cfs_paths.info_dir):
os.mkdir(cfs_paths.info_dir)
- user_file = open("%s/%s" % (cfs_paths.info_dir, "default_users"), "w")
- # TBD: big gaping security hole until other code can deal
- # with having zero users defined.
- user_file.write("alice password1\nbob password2\n")
- user_file.flush()
- user_file.close()
os.mkdir(cfs_paths.idle_subdir)
for i in range(24010, 24030):
fp = open("%s/%d" % (cfs_paths.idle_subdir, i), "w")
@@ -79,18 +73,16 @@ def allocate_port(vol_file):
# everything that uses this is in Python we could just make it a pickle/shelf
# or whatever, but it would all go away with a real volume database so it's not
# worth the trouble to re-do it now.
-def parse_user_file(vol_name):
- try:
- user_file = open("%s/%s/users" % (cfs_paths.info_dir, vol_name), "r")
- except IOError:
- user_file = open("%s/default_users" % cfs_paths.info_dir, "r")
-
+def parse_user_file (vol_name):
+ db_obj = cfs_utils.open_db()
users = []
- for line in user_file.readlines():
- space = line.find(" ")
- if space == -1:
- print >> sys.stderr, "Bad line in userfile: %s" % line
- users.append([line[:space],line[space+1:-1]])
+ user_list = db_obj["vt_"+vol_name].split(",")
+ if user_list == ['']:
+ user_list = []
+ for name in user_list:
+ print "found user %s" % name
+ pw = db_obj["tp_"+name]
+ users.append([name,pw])
return users
def cloudify_server (input, output, users, port):
@@ -137,6 +129,8 @@ def create_tenant_dirs(vol_file):
def start_local (vol_name):
vol_base = check_volume_directory(vol_name)
users = parse_user_file(vol_name)
+ if not len(users):
+ return "no users for %s on %s" % (vol_name, socket.gethostname())
# TBD: deal with more than one brick on the same server
vf = scan_gfs_volfiles(vol_name)
new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
commit 6713c3178f4ca54f7959eb663e7f0b3795ad77e7
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed May 11 16:21:01 2011 -0400
More progress on volume and tenant interfaces.
Volume interfaces now work across nodes, without inducing reentrancy, and
also actually start the glusterfs daemons. Tenant interfaces include list,
add, remove, and enable volumes.
diff --git a/ToDo b/ToDo
index c769c94..8ed0469 100644
--- a/ToDo
+++ b/ToDo
@@ -1,19 +1,20 @@
= High Priority =
-Interface to start/stop volumes (includes volfile generation and mkdir)
-Get mount.cloudfs to work
-Interface to add/remove tenants
-Interface to map between tenants and volumes
+Use real tenant list for start/mount (no default, handle zero)
Use arbitrary directories, not just mountpoints
+Create log/pid directories, move port links to /var/run
Interface to remove volumes
-Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
-SSL
+Make sure CLI equivalents work
= Medium Priority =
+SSL
+Sanitize volume/tenant names etc. to avoid XSS/injection
+Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
+Add "bread crumbs" everywhere for navigation
Interface to remove servers
+Add mapping from volumes to tenants (already have vice versa)
Deal with glusterd startup consistently
Sanity checking for volume configs
Handle zero-length tenant list without alice/bob filler
-Handle multiple bricks per server when generating volfiles
= Low Priority =
Import GlusterFS volume to CloudFS
diff --git a/scripts/cfs_add_tenant.py b/scripts/cfs_add_tenant.py
new file mode 100644
index 0000000..d36837e
--- /dev/null
+++ b/scripts/cfs_add_tenant.py
@@ -0,0 +1,48 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib
+import urllib2
+
+from bottle import request, template
+import cfs_utils
+import cfs_paths
+
+def add_local (tn_name, tn_pw):
+ # TBD: all sorts of input-validity checking
+ db_obj = cfs_utils.open_db()
+ db_obj["tp_"+tn_name] = tn_pw
+ db_obj["tv_"+tn_name] = ""
+ return "add_local(%s) OK on %s" % (tn_name, socket.gethostname())
+
+def run_common (tn_name, tn_pw):
+ node_list = cfs_utils.get_members()
+ blob = []
+ for node in node_list:
+ scratch = [node, []]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [add_local(tn_name,tn_pw)]
+ else:
+ url = "http://%s:%d/tenants/add_local" % (
+ node, cfs_paths.CLOUDFSD_PORT)
+ data = urllib.urlencode([("tn_name",tn_name),
+ ("tn_pw",tn_pw)])
+ url_obj = urllib2.urlopen(url,data=data)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("tn_act_done.html",name=tn_name,action="added",
+ blob=blob);
+
+def run_www ():
+ tn_name = request.forms.get("tn_name")
+ tn_pw = request.forms.get("tn_pw")
+ return run_common(tn_name,tn_pw)
+
+if __name__ == "__main__":
+ tn_name = sys.argv[1]
+ tn_pw = sys.argv[2]
+ run_common(tn_name,tn_pw)
diff --git a/scripts/cfs_add_volume.py b/scripts/cfs_add_volume.py
index a9e7617..1835b51 100644
--- a/scripts/cfs_add_volume.py
+++ b/scripts/cfs_add_volume.py
@@ -1,6 +1,6 @@
-import re
import os
+import re
import string
import sys
@@ -20,36 +20,8 @@ def run_common (vname, vtype, vcount, bricks):
return template("add_vol_fail.html", name=vname,
action="gluster", status=sts)
- ## make the cloudfs vol files with cloudfs
- # TBD: this should go away in favor of generating these at start
- # time; see big TBD near the end of the function
- cmd = "init %s /var/lib/glusterd/cloudfs.tenants" % vname
- for node in cfs_utils.get_members()[1:]:
- sts = cfs_utils.run_cmd("cloudfs",cmd,host=node).wait()
- if sts:
- act = "cloudfs on %s" % node
- return template("add_vol_fail.html", name=vname,
- action=act, status=sts)
-
- ## make the dirs on each node/volume
- ## first get all the tenants
- tenants = []
- tenants.append("junk")
- tenant_file = open("/var/lib/glusterd/cloudfs.tenants","r")
- for tenantline in tenant_file:
- scratch = re.split(' ', tenantline)
- tenants.append(scratch[0])
- ## now make the dirs on every volume
- for b in bricks:
- scratch = re.split(':', b)
- for tenant in tenants :
- node = scratch[0]
- cmd = "%s/%s" % (scratch[1], tenant)
- sts = cfs_utils.run_cmd("mkdir",cmd,host=node).wait()
- if sts:
- act = "mkdir on %s" % node
- return template("add_vol_fail.html", name=vname,
- action=act, status=sts)
+ db_obj = cfs_utils.open_db()
+ db_obj["vt_"+vname] = ""
# TBD: generating the cloudfs volfiles (client+server) and starting the
# glusterfsd daemons should be part of a separate "start" action, so
diff --git a/scripts/cfs_delete_tenant.py b/scripts/cfs_delete_tenant.py
new file mode 100644
index 0000000..ebbd35d
--- /dev/null
+++ b/scripts/cfs_delete_tenant.py
@@ -0,0 +1,42 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib2
+
+from bottle import request, template
+import cfs_utils
+import cfs_paths
+
+def delete_local (tn_name):
+ # TBD: all sorts of input-validity checking
+ db_obj = cfs_utils.open_db()
+ del db_obj["tp_"+tn_name]
+ del db_obj["tv_"+tn_name]
+ return "delete_local(%s) OK on %s" % (tn_name, socket.gethostname())
+
+def run_common (tn_name):
+ node_list = cfs_utils.get_members()
+ blob = []
+ for node in node_list:
+ scratch = [node, []]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [delete_local(tn_name)]
+ else:
+ url = "http://%s:%d/tenants/%s/delete_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, tn_name)
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("tn_act_done.html",name=tn_name,action="deleted",
+ blob=blob);
+
+def run_www (tn_name):
+ return run_common(tn_name)
+
+if __name__ == "__main__":
+ tn_name = sys.argv[1]
+ run_common(tn_name)
diff --git a/scripts/cfs_enable_tenant.py b/scripts/cfs_enable_tenant.py
new file mode 100644
index 0000000..26ca099
--- /dev/null
+++ b/scripts/cfs_enable_tenant.py
@@ -0,0 +1,60 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib
+import urllib2
+
+from bottle import request, template
+import cfs_paths
+import cfs_utils
+
+def enable_local (tn_name, vol_list):
+
+ # TBD: all sorts of input-validity checking
+ db_obj = cfs_utils.open_db()
+ db_obj["tv_"+tn_name] = vol_list
+ for vol in [v[3:] for v in db_obj.keys() if v.startswith("vt_")]:
+ vt_list = set(db_obj["vt_"+vol].split(","))
+ if vol in vol_list.split(","):
+ print "enabling %s" % vol
+ vt_list.add(vol)
+ else:
+ print "disabling %s" % vol
+ vt_list.discard(vol)
+ db_obj["vt_"+vol] = string.join(vt_list,",")
+ return "Volumes enabled for %s on %s" % (tn_name, socket.gethostname())
+
+def run_common (tn_name, vol_list):
+ vol_list = string.join(vol_list,",")
+ node_list = cfs_utils.get_members()
+ blob = []
+ for node in node_list:
+ scratch = [node, []]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [enable_local(tn_name,vol_list)]
+ else:
+ url = "http://%s:%d/tenants/enable_local" % (
+ node, cfs_paths.CLOUDFSD_PORT)
+ data = urllib.urlencode([("tn_name",tn_name),
+ ("vol_list",vol_list)])
+ url_obj = urllib2.urlopen(url,data=data)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("tn_act_done.html",name=tn_name,action="enabled",
+ blob=blob)
+
+def run_www (tn_name):
+ vol_list = []
+ for prop in request.forms.iterkeys():
+ print prop
+ if prop.startswith("VOLUME_"):
+ vol_list.append(prop[7:])
+ return run_common(tn_name,vol_list)
+
+if __name__ == "__main__":
+ run_common(sys.argv[1],sys.argv[2:])
+
diff --git a/scripts/cfs_mount.py b/scripts/cfs_mount.py
index b6d64d1..af4ad06 100755
--- a/scripts/cfs_mount.py
+++ b/scripts/cfs_mount.py
@@ -1,7 +1,12 @@
+#!/usr/bin/env python
+
import json
+import os
import sys
import urllib2
+import cfs_paths
+import cfs_utils
import volfilter
# Simple cache of brick-to-port mappings, so we don't have to keep re-fetching
@@ -56,7 +61,11 @@ if __name__ == "__main__":
xl.opts["username"] = username
xl.opts["password"] = password
- volfilter.generate(graph,last,sys.stdout)
+ outfile = os.path.join(cfs_paths.info_dir,"%s.vol"%volume)
+ volfilter.generate(graph,last,open(outfile,"w"))
+ cmd = "-f %s %s" % (outfile, mount)
+ print "running glusterfs %s" % cmd
+ cfs_utils.run_cmd("glusterfs",cmd).wait()
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
index 989bff9..4c4e7bb 100644
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -69,8 +69,8 @@ def allocate_port(vol_file):
for pf in glob.iglob("%s/*" % cfs_paths.idle_subdir):
base = os.path.basename(pf)
new_name = "%s/%s" % (cfs_paths.used_subdir, base)
- os.symlink(vol_file, new_name)
os.remove(pf)
+ os.symlink(vol_file, new_name)
return base
else:
raise RuntimeError, "no ports available"
@@ -168,16 +168,16 @@ def run_www (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
for node in node_list:
- url = "http://%s:%d/volumes/%s/start_local" % (
- node, cfs_paths.CLOUDFSD_PORT, vol_name)
scratch = [node, []]
- print "opening %s" % url
- url_obj = urllib2.urlopen(url)
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [start_local(vol_name)]
+ else:
+ url = "http://%s:%d/volumes/%s/start_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ url_obj = urllib2.urlopen(url)
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- print "done with %s" % url
- print blob
return template("start_done.html",name=vol_name,blob=blob)
if __name__ == "__main__":
diff --git a/scripts/cfs_stop_volume.py b/scripts/cfs_stop_volume.py
index ebfa47a..0851d6b 100644
--- a/scripts/cfs_stop_volume.py
+++ b/scripts/cfs_stop_volume.py
@@ -46,16 +46,16 @@ def run_www (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
for node in node_list:
- url = "http://%s:%d/volumes/%s/stop_local" % (
- node, cfs_paths.CLOUDFSD_PORT, vol_name)
scratch = [node, []]
- print "opening %s" % url
- url_obj = urllib2.urlopen(url)
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [stop_local(vol_name)]
+ else:
+ url = "http://%s:%d/volumes/%s/stop_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ url_obj = urllib2.urlopen(url)
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- print "done with %s" % url
- print blob
return template("stop_done.html",name=vol_name,blob=blob)
if __name__ == "__main__":
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index fd981cd..94be076 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -1,3 +1,4 @@
+import dbm
import glob
import os
import socket
@@ -43,16 +44,28 @@ def run_cmd (program, args, host=None, filters=[]):
if host:
cmd = make_remote(host,cmd)
if len(filters):
- print "executing %s using shell" % cmd
- child = subprocess.Popen(cmd,shell=True,
+ #print "executing %s using shell" % cmd
+ child = subprocess.Popen(cmd,close_fds=True,shell=True,
stdout=subprocess.PIPE)
else:
- print "executing %s without shell" % cmd
- child = subprocess.Popen(cmd.split(" "),shell=False,
+ #print "executing %s without shell" % cmd
+ cmd = cmd.split()
+ child = subprocess.Popen(cmd,close_fds=True,shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return child
+# Figure out what our local IP addresses are, for future reference.
+# TBD: IPv6 yadda yadda
+local_addrs = []
+for line in run_cmd("ip","addr").stdout:
+ parts = line.lstrip().split(" ")
+ if parts[0] == "inet":
+ addr = parts[1]
+ if not addr.startswith("127."):
+ local_addrs.append(addr.split("/")[0])
+#print local_addrs
+
# NB this node is always first in the list
def get_members ():
peer_ips = run_cmd("gluster","peer status",
@@ -129,3 +142,11 @@ def get_nodes_for_vol (vol_name):
if m:
node_list.add(m.groups(1)[0])
return node_list
+
+# Open our configuration database.
+# We use dbm explicitly because we don't want to get into a situation where
+# a DB was created using some more "advanced" library and then restored/moved
+# without that library present.
+def open_db ():
+ db_path = os.path.join(cfs_paths.info_dir,"config.db")
+ return dbm.open(db_path,"c",0600)
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 15d0a5c..99e715a 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,39 +1,22 @@
#!/usr/bin/python
-from bottle import route, post, run, view, debug, CherryPyServer
+from bottle import route, post, run, view, debug, request
import os
import socket
import string
import cfs_paths
+import cfs_utils
import volmap
import cfs_utils
import cfs_add_node
import cfs_add_volume
import cfs_start_volume
import cfs_stop_volume
-
-@route("/:vol_name/fetch")
-def fetch_client_vf(vol_name):
- vf_path = "%s/vols/%s/%s-fuse.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
- return open(vf_path,"r")
-
-@route("/:vol_name/map")
-def map_paths(vol_name):
- return volmap.vol_map(vol_name)
-
-@route("/:user_name/adduser")
-def add_user(user_name):
- print "add user: " + user_name
-
-@route("/:user_name/deleteuser")
-def delete_user(user_name):
- print "delete user: " + user_name
-
-@route("/listusers")
-def list_users():
- print "list users"
+import cfs_add_tenant
+import cfs_delete_tenant
+import cfs_enable_tenant
@route("/")
@route("/cfg")
@@ -86,17 +69,61 @@ def stop_volume(vol_name):
def stop_local (vol_name):
return cfs_stop_volume.stop_local(vol_name)
-@route("/wwwaddtenant")
-def www_addtenant():
- print "www addtenant"
+# Used by mount.cloudfs
+@route("/:vol_name/fetch")
+def fetch_client_vf(vol_name):
+ vf_path = "%s/vols/%s/%s-fuse.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
+ return open(vf_path,"r")
-@post("/wwwdoaddtenant")
-def www_doaddtenant():
- print "www doaddtenant"
+# Used by mount.cloudfs
+@route("/:vol_name/map")
+def map_paths(vol_name):
+ return volmap.vol_map(vol_name)
-@route("/wwwlisttenants")
-def www_listtenants():
- print "www listtenants"
+@route("/tenants")
+@view("tenants.html")
+def show_tenants ():
+ db_obj = cfs_utils.open_db()
+ return dict(tenants=db_obj)
+
+@post("/tenants/add")
+def add_tenant():
+ return cfs_add_tenant.run_www()
+
+@post("/tenants/add_local")
+def add_tenant_local ():
+ tn_name = request.forms.get("tn_name")
+ tn_pw = request.forms.get("tn_pw")
+ return cfs_add_tenant.add_local(tn_name,tn_pw)
+
+@route("/tenants/:name/delete")
+def delete_tenant (name):
+ return cfs_delete_tenant.run_www(name)
+
+@route("/tenants/:tn_name/delete_local")
+def delete_tenant_local (tn_name):
+ return cfs_delete_tenant.delete_local(tn_name)
+
+@route("/tenants/:tn_name/volumes")
+@view("tenant_volumes.html")
+def show_tenant_volumes (tn_name):
+ db_obj = cfs_utils.open_db()
+ all_vols = [v[3:] for v in db_obj.keys() if v.startswith("vt_")]
+ print all_vols
+ active = db_obj["tv_"+tn_name].split(",")
+ active.sort()
+ print active
+ return dict(tn_name=tn_name,all_vols=all_vols,active=active)
+
+@post("/tenants/:tn_name/enable")
+def enable_tenant_volumes (tn_name):
+ return cfs_enable_tenant.run_www(tn_name)
+
+@post("/tenants/enable_local")
+def add_tenant_local ():
+ tn_name = request.forms.get("tn_name")
+ vol_list = request.forms.get("vol_list")
+ return cfs_enable_tenant.enable_local(tn_name,vol_list)
@route("/styles/:sheet")
def get_style (sheet):
@@ -104,5 +131,6 @@ def get_style (sheet):
if __name__ == "__main__":
debug(True)
- run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT,server=CherryPyServer)
+ #run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT,server=CherryPyServer)
+ run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT)
diff --git a/scripts/styles/cfgmain.css b/scripts/styles/cfgmain.css
index 364d88b..dc79620 100644
--- a/scripts/styles/cfgmain.css
+++ b/scripts/styles/cfgmain.css
@@ -21,3 +21,20 @@ body {
.content {
padding: 1em 5em;
}
+
+
+table {
+ border: 2px black solid;
+}
+
+th {
+ border: 1px black solid;
+ padding: 3px;
+ background-color: #b0b0ff;
+}
+
+td {
+ border: 1px black solid;
+ padding: 2px;
+ background-color: #d0d0ff;
+}
diff --git a/scripts/views/cfgmain.html b/scripts/views/cfgmain.html
index 3f46b78..059a8f9 100644
--- a/scripts/views/cfgmain.html
+++ b/scripts/views/cfgmain.html
@@ -10,7 +10,7 @@
<div class="content">
<p><a href="/cluster">Manage Servers</a></p>
<p><a href="/volumes">Manage Volumes</a></p>
- <p><a href="/wwwlisttenants">Manage Tenants</a></p>
+ <p><a href="/tenants">Manage Tenants</a></p>
</div>
</body></html>
diff --git a/scripts/views/tenant_volumes.html b/scripts/views/tenant_volumes.html
new file mode 100644
index 0000000..dd4eb09
--- /dev/null
+++ b/scripts/views/tenant_volumes.html
@@ -0,0 +1,25 @@
+<html><head>
+<title>_CloudFS Tenant Access_</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="Pragma" content="no-cache" />
+<link href="/styles/cfgmain.css" rel="stylesheet" type="text/css" />
+</head><body>
+<div class="banner">
+<h1>Configure Volumes for {{tn_name}}</h1>
+</div>
+<div class="content">
+<h2>Volume List</h2>
+ <form method="post" action="/tenants/{{tn_name}}/enable">
+ <table cellspacing="0">
+ <tr><th>Volume Name</th><th></th></tr>
+ %for vol in all_vols:
+ <tr>
+ <td>{{vol}}</td>
+ %value = "checked" if (vol in active) else ""
+ <td><input type="checkbox" name="VOLUME_{{vol}}" {{value}} /></td>
+ </tr>
+ %end
+ </table>
+ <input type="submit" value="Update" />
+</div>
+</body></html>
diff --git a/scripts/views/tenants.html b/scripts/views/tenants.html
new file mode 100644
index 0000000..c55dc0a
--- /dev/null
+++ b/scripts/views/tenants.html
@@ -0,0 +1,33 @@
+<html><head>
+<title>_CloudFS Tenant Management_</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="Pragma" content="no-cache" />
+<link href="/styles/cfgmain.css" rel="stylesheet" type="text/css" />
+<link href="/styles/provlist.css" rel="stylesheet" type="text/css" />
+</head><body>
+<div class="banner">
+<h1>CloudFS Tenant Management</h1>
+</div>
+<div class="content">
+<h2>Existing Tenants:</h2>
+ <table cellspacing="0">
+ <tr><th>Name</th><th>Password</th><th></th></tr>
+ %tn_list = tenants.keys()
+ %tn_list.sort()
+ %for tn_name in [t[3:] for t in tn_list if t.startswith("tp_")]:
+ %tn_pw = tenants["tp_"+tn_name]
+ <tr><td>{{tn_name}}</td><td>{{tn_pw}}</td>
+ <td><a href="/tenants/{{tn_name}}/volumes">volumes</a>
+ <a href="/tenants/{{tn_name}}/delete">delete</a></td>
+ </tr>
+ %end
+ </table>
+<hr>
+<h2>Add a Tenant:</h2>
+<form method="post" action="/tenants/add">
+ Tenant Name: <input type="text" name="tn_name" size="20" />
+ Tenant Password: <input type="text" name="tn_pw" size="20" />
+ <input type="submit" name="PROVISION" value="Add" />
+</form>
+</div>
+</body></html>
diff --git a/scripts/views/tn_act_done.html b/scripts/views/tn_act_done.html
new file mode 100644
index 0000000..5fb27ae
--- /dev/null
+++ b/scripts/views/tn_act_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Tenant {{name}} {{action}}.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/tenants">Back to tenant configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
index f4d7436..489778f 100644
--- a/scripts/views/volumes.html
+++ b/scripts/views/volumes.html
@@ -12,6 +12,7 @@
<h2>Existing Volumes:</h2>
%for vol_name, brick_list in bricks.iteritems():
<p><b>{{vol_name}}</b>
+ <a href="/volumes/{{vol_name}}/tenants">tenants</a>
<a href="/volumes/{{vol_name}}/start">start</a>
<a href="/volumes/{{vol_name}}/stop">stop</a></p>
<ul>
commit 491da1780e466cfd60c3df0c0b063094f39bd104
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue May 10 13:57:17 2011 -0400
Got volume start/stop interfaces to work.
diff --git a/ToDo b/ToDo
new file mode 100644
index 0000000..c769c94
--- /dev/null
+++ b/ToDo
@@ -0,0 +1,21 @@
+= High Priority =
+Interface to start/stop volumes (includes volfile generation and mkdir)
+Get mount.cloudfs to work
+Interface to add/remove tenants
+Interface to map between tenants and volumes
+Use arbitrary directories, not just mountpoints
+Interface to remove volumes
+Handle IPv6, multi-homed hosts, localhost in scan_gfs_volfiles
+SSL
+
+= Medium Priority =
+Interface to remove servers
+Deal with glusterd startup consistently
+Sanity checking for volume configs
+Handle zero-length tenant list without alice/bob filler
+Handle multiple bricks per server when generating volfiles
+
+= Low Priority =
+Import GlusterFS volume to CloudFS
+Eliminate clean_and_run in favor of cfs_utils.run_cmd
+
diff --git a/scripts/cfs_mount.py b/scripts/cfs_mount.py
index af0715d..b6d64d1 100755
--- a/scripts/cfs_mount.py
+++ b/scripts/cfs_mount.py
@@ -4,8 +4,6 @@ import urllib2
import volfilter
-CLOUDFSD_PORT = 8080
-
# Simple cache of brick-to-port mappings, so we don't have to keep re-fetching
# the maps from the same host if it has multiple bricks.
class mapper:
@@ -16,7 +14,7 @@ class mapper:
mydict = self.cache[host]
else:
url = "http://%s:%d/%s/map" % \
- (host, CLOUDFSD_PORT, volume)
+ (host, cfs_paths.CLOUDFSD_PORT, volume)
mydict = json.load(urllib2.urlopen(url))
self.cache[host] = mydict
if mydict.has_key(subv):
@@ -34,7 +32,7 @@ if __name__ == "__main__":
(host, volume, username, password, mount) = sys.argv[1:6]
# Fetch the GlusterFS client-side volfile.
- url = "http://%s:%d/%s/fetch" % (host, CLOUDFSD_PORT, volume)
+ url = "http://%s:%d/%s/fetch" % (host, cfs_paths.CLOUDFSD_PORT, volume)
vol_file = urllib2.urlopen(url)
# Load the volfile and clean out some of the crud.
diff --git a/scripts/cfs_paths.py b/scripts/cfs_paths.py
new file mode 100644
index 0000000..27e11b0
--- /dev/null
+++ b/scripts/cfs_paths.py
@@ -0,0 +1,13 @@
+
+import re
+import os
+
+gfs_dir = "/var/lib/glusterd"
+info_dir = "/var/lib/cloudfs"
+idle_subdir = os.path.join(info_dir,".idle_ports")
+used_subdir = os.path.join(info_dir,".used_ports")
+log_dir = "/var/log/cloudfs"
+pid_dir = "/var/run/cloudfs"
+volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
+CLOUDFSD_PORT = 8080
+
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
new file mode 100644
index 0000000..989bff9
--- /dev/null
+++ b/scripts/cfs_start_volume.py
@@ -0,0 +1,184 @@
+
+import glob
+import os
+import re
+import socket
+import string
+import subprocess
+import sys
+import urllib2
+
+from bottle import template
+
+import volfilter
+import cfs_paths
+import cfs_utils
+
+# Make sure the volume directory exists and has the right stuff in it.
+def check_volume_directory(vol_name):
+ if not os.path.exists(cfs_paths.info_dir):
+ os.mkdir(cfs_paths.info_dir)
+ user_file = open("%s/%s" % (cfs_paths.info_dir, "default_users"), "w")
+ # TBD: big gaping security hole until other code can deal
+ # with having zero users defined.
+ user_file.write("alice password1\nbob password2\n")
+ user_file.flush()
+ user_file.close()
+ os.mkdir(cfs_paths.idle_subdir)
+ for i in range(24010, 24030):
+ fp = open("%s/%d" % (cfs_paths.idle_subdir, i), "w")
+ fp.close()
+ os.mkdir(cfs_paths.used_subdir)
+
+ vol_dir = "%s/%s" % (cfs_paths.info_dir, vol_name)
+ if not os.path.exists(vol_dir):
+ os.mkdir(vol_dir)
+ return vol_dir
+
+# Figure out which of the GlusterFS volfiles belong to us. Volfiles might have
+# names based on partial host names, fully qualified names, or addresses, or
+# even a mix thanks to "gluster peer probe" silliness. To deal with all of
+# these possibilities, we resolve everything to addresses and compare those.
+# ### bear in mind that depending on how a machine is set up, the IP addrs
+# ### for a node might include 127.0.0.1 and ::1 first
+def scan_gfs_volfiles(vol_name):
+ ret = ""
+ my_name = os.uname()[1]
+ # Getaddrinfo returns a list of tuples, each:
+ # family, socktype, proto, canonname, sockaddr
+ # We extract the sockaddr of the first item, and the IP addr from that
+ # TBD: handle IPv6, multi-homed hosts, etc.
+ # TBD: skip loopback addresses based on note above
+ my_addrs = socket.getaddrinfo(my_name, 0)
+ my_glob = "%s/vols/%s/%s.*.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
+ for vf in glob.iglob(my_glob):
+ m = cfs_paths.volfile_re.match(os.path.basename(vf))
+ if m:
+ this_host = m.groups(1)[0]
+ this_addr = socket.getaddrinfo(this_host, 0)[0][4][0]
+ for addr in my_addrs:
+ if this_addr == addr[4][0]:
+ ret = vf
+ break
+ return ret
+
+# Allocate a port for a server to run on. Right now we do this in a very
+# "clever" way, by creating files to match ports and then grabbing a file here.
+# When we have a real volume database such games will be unnecessary.
+def allocate_port(vol_file):
+ for pf in glob.iglob("%s/*" % cfs_paths.idle_subdir):
+ base = os.path.basename(pf)
+ new_name = "%s/%s" % (cfs_paths.used_subdir, base)
+ os.symlink(vol_file, new_name)
+ os.remove(pf)
+ return base
+ else:
+ raise RuntimeError, "no ports available"
+
+# Parse the user file into a list of [name,password] sub-lists. Since
+# everything that uses this is in Python we could just make it a pickle/shelf
+# or whatever, but it would all go away with a real volume database so it's not
+# worth the trouble to re-do it now.
+def parse_user_file(vol_name):
+ try:
+ user_file = open("%s/%s/users" % (cfs_paths.info_dir, vol_name), "r")
+ except IOError:
+ user_file = open("%s/default_users" % cfs_paths.info_dir, "r")
+
+ users = []
+ for line in user_file.readlines():
+ space = line.find(" ")
+ if space == -1:
+ print >> sys.stderr, "Bad line in userfile: %s" % line
+ users.append([line[:space],line[space+1:-1]])
+ return users
+
+def cloudify_server (input, output, users, port):
+ print "# Cloudifying server %s" % input
+ graph, last = volfilter.load(input)
+ last = volfilter.cleanup(last,graph)
+
+ if last.type != "protocol/server":
+ print >> sys.stderr, "Top translator must be protocol/server"
+ sys.exit(1)
+ old_stack = last.subvols[0]
+
+ bad_opts = []
+ for opt in last.opts.iterkeys():
+ if opt[:9] == "auth.addr":
+ bad_opts.append(opt)
+ elif opt[:10] == "auth.login":
+ bad_opts.append(opt)
+ for opt in bad_opts:
+ print "# stripping auth option %s = %s" % (opt, last.opts[opt])
+ del last.opts[opt]
+
+ last.subvols = []
+ for user, pw in users:
+ new_stack = volfilter.copy_stack(old_stack,user)
+ last.subvols.append(new_stack)
+ last.opts["auth.login.%s.allow"%new_stack.name] = user
+ last.opts["auth.login.%s.password"%new_stack.name] = pw
+
+ last.opts["transport.socket.listen-port"] = port
+ volfilter.generate(graph,last,output)
+
+def create_tenant_dirs(vol_file):
+ cmd = "/bin/grep \"option directory\" %s" % vol_file
+ path = ""
+ opt_dir_lines = os.popen(cmd)
+ for opt_dir_line in opt_dir_lines:
+ tokens = re.split(' ', string.lstrip(opt_dir_line))
+ path = string.rstrip(tokens[2])
+ if not os.path.exists(path):
+ os.mkdir(path)
+ opt_dir_lines.close()
+
+def start_local (vol_name):
+ vol_base = check_volume_directory(vol_name)
+ users = parse_user_file(vol_name)
+ # TBD: deal with more than one brick on the same server
+ vf = scan_gfs_volfiles(vol_name)
+ new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
+ outfile = open(new_vf, "w")
+ port = allocate_port(new_vf)
+ cloudify_server(vf, outfile, users, port)
+ outfile.flush()
+ outfile.close()
+ v_key = string.replace(os.path.basename(new_vf), ".vol", "")
+ # print "v_key: %s" % v_key
+ # make dirs for each of the users
+ create_tenant_dirs(new_vf)
+ # actually start the server
+ # note: pid file in /var/lib/glusterd/vols/<vol_name>/... so that
+ # gluster can find it
+ logfile = os.path.join(cfs_paths.log_dir,"%s.log"%v_key)
+ pidfile = os.path.join(cfs_paths.pid_dir,"%s.pid"%v_key)
+ cmd = "--volfile %s" % new_vf
+ cmd += (" --log-file %s" % logfile)
+ cmd += (" --pid-file %s" % pidfile)
+ cmd += (" --xlator-option %s-server.transport.socket.listen-port=%s" % (
+ vol_name, port))
+ print "whole command = glusterfsd %s" % cmd
+ retcode = cfs_utils.run_cmd("glusterfsd",cmd).wait()
+ return "start_local(%s) returned %d on %s\n" % (
+ vol_name, retcode, socket.gethostname())
+
+def run_www (vol_name):
+ node_list = cfs_utils.get_nodes_for_vol(vol_name)
+ blob = []
+ for node in node_list:
+ url = "http://%s:%d/volumes/%s/start_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ scratch = [node, []]
+ print "opening %s" % url
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ print "done with %s" % url
+ print blob
+ return template("start_done.html",name=vol_name,blob=blob)
+
+if __name__ == "__main__":
+ run_www(sys.argv[1])
diff --git a/scripts/cfs_stop_volume.py b/scripts/cfs_stop_volume.py
new file mode 100644
index 0000000..ebfa47a
--- /dev/null
+++ b/scripts/cfs_stop_volume.py
@@ -0,0 +1,62 @@
+
+import fileinput
+import glob
+import os
+import socket
+import subprocess
+import sys
+import urllib2
+
+from bottle import template
+
+import cfs_paths
+import cfs_utils
+
+def kill_daemon (vol_name):
+ myglob = os.path.join(cfs_paths.pid_dir,"%s.*"%vol_name)
+ for f in glob.iglob(myglob):
+ fp = open(f,"r")
+ pid = fp.read()[:-1]
+ print "killing %s" % pid
+ cfs_utils.run_cmd("kill",pid).wait()
+ break
+ return 0
+
+def recycle_port (path):
+ print "recycling %s" % path
+ port_num = os.path.basename(path)
+ os.unlink(path)
+ fp = open("%s/%s" % (cfs_paths.idle_subdir, port_num), "w")
+ fp.close()
+
+def stop_local (vol_name):
+ retcode = kill_daemon(vol_name)
+ for symlink in glob.glob(cfs_paths.used_subdir + "/*"):
+ vol_link = os.readlink(symlink)
+ vol = os.path.basename(vol_link)
+ tokens = vol.split(".")
+ if vol_name == tokens[0]:
+ recycle_port(symlink)
+ break
+ return "stop_local(%s) returned %d on %s\n" % (
+ vol_name, retcode, socket.gethostname())
+
+
+def run_www (vol_name):
+ node_list = cfs_utils.get_nodes_for_vol(vol_name)
+ blob = []
+ for node in node_list:
+ url = "http://%s:%d/volumes/%s/stop_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vol_name)
+ scratch = [node, []]
+ print "opening %s" % url
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ print "done with %s" % url
+ print blob
+ return template("stop_done.html",name=vol_name,blob=blob)
+
+if __name__ == "__main__":
+ run_www(sys.argv[1])
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index 09e5ace..fd981cd 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -1,7 +1,10 @@
+import glob
import os
import socket
import subprocess
+import cfs_paths
+
# The list of filesystems that work is much shorter than the list of
# filesystems that don't support xattrs, already-remote filesystems (e.g. NFS),
# pseudo-filesystems (e.g. devfs) or other things that won't work for one
@@ -116,3 +119,13 @@ def get_mounts (brick_list):
volumes_on_nodes[node_ip] = scratch
return volumes_on_nodes
+# Get the list of nodes that are serving (any part of) a specific volume.
+def get_nodes_for_vol (vol_name):
+ node_list = set()
+ my_glob = "%s/vols/%s/%s.*.vol" % (cfs_paths.gfs_dir,
+ vol_name, vol_name)
+ for vf in glob.iglob(my_glob):
+ m = cfs_paths.volfile_re.match(os.path.basename(vf))
+ if m:
+ node_list.add(m.groups(1)[0])
+ return node_list
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index e510823..15d0a5c 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,32 +1,22 @@
#!/usr/bin/python
-from bottle import route, post, run, view, debug
+from bottle import route, post, run, view, debug, CherryPyServer
import os
import socket
import string
-import paths
-import volstart
-import volstop
+import cfs_paths
import volmap
import cfs_utils
import cfs_add_node
import cfs_add_volume
-
-CLOUDFSD_PORT = 8080
-
-@route("/:vol_name/start")
-def start_server(vol_name):
- volstart.vol_start(vol_name)
-
-@route("/:vol_name/stop")
-def stop_server(vol_name):
- volstop.vol_stop(vol_name)
+import cfs_start_volume
+import cfs_stop_volume
@route("/:vol_name/fetch")
def fetch_client_vf(vol_name):
- vf_path = "%s/vols/%s/%s-fuse.vol" % (paths.gfs_dir, vol_name, vol_name)
+ vf_path = "%s/vols/%s/%s-fuse.vol" % (cfs_paths.gfs_dir, vol_name, vol_name)
return open(vf_path,"r")
@route("/:vol_name/map")
@@ -80,6 +70,22 @@ def show_volumes():
def add_volume():
return cfs_add_volume.run_www()
+@route("/volumes/:vol_name/start")
+def start_volume(vol_name):
+ return cfs_start_volume.run_www(vol_name)
+
+@route("/volumes/:vol_name/start_local")
+def start_local (vol_name):
+ return cfs_start_volume.start_local(vol_name)
+
+@route("/volumes/:vol_name/stop")
+def stop_volume(vol_name):
+ return cfs_stop_volume.run_www(vol_name)
+
+@route("/volumes/:vol_name/stop_local")
+def stop_local (vol_name):
+ return cfs_stop_volume.stop_local(vol_name)
+
@route("/wwwaddtenant")
def www_addtenant():
print "www addtenant"
@@ -98,4 +104,5 @@ def get_style (sheet):
if __name__ == "__main__":
debug(True)
- run(host='',port=CLOUDFSD_PORT)
+ run(host='0.0.0.0',port=cfs_paths.CLOUDFSD_PORT,server=CherryPyServer)
+
diff --git a/scripts/paths.py b/scripts/paths.py
deleted file mode 100644
index 51bdafd..0000000
--- a/scripts/paths.py
+++ /dev/null
@@ -1,10 +0,0 @@
-
-import re
-import os
-
-gfs_dir = "/var/lib/glusterd"
-info_dir = "/var/lib/cloudfs"
-idle_subdir = "/var/lib/cloudfs/.idle_ports"
-used_subdir = "/var/lib/cloudfs/.used_ports"
-volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
-
diff --git a/scripts/views/start_done.html b/scripts/views/start_done.html
new file mode 100644
index 0000000..5e35b6c
--- /dev/null
+++ b/scripts/views/start_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Volume {{name}} started.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/stop_done.html b/scripts/views/stop_done.html
new file mode 100644
index 0000000..69616c0
--- /dev/null
+++ b/scripts/views/stop_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Volume {{name}} stopped.</p>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
index 11c964c..f4d7436 100644
--- a/scripts/views/volumes.html
+++ b/scripts/views/volumes.html
@@ -11,11 +11,14 @@
<div class="content">
<h2>Existing Volumes:</h2>
%for vol_name, brick_list in bricks.iteritems():
- <p>{{vol_name}}
+ <p><b>{{vol_name}}</b>
+ <a href="/volumes/{{vol_name}}/start">start</a>
+ <a href="/volumes/{{vol_name}}/stop">stop</a></p>
+ <ul>
%for brick in brick_list:
- <br /> {{brick}}
+ <li>{{brick}}</li>
%end
- </p>
+ </ul>
%end
<hr>
<h2>Provision a Volume From Available Bricks:</h2>
diff --git a/scripts/volmap.py b/scripts/volmap.py
index 92b77e9..f5a81b2 100644
--- a/scripts/volmap.py
+++ b/scripts/volmap.py
@@ -4,13 +4,13 @@ import os
import re
import volfilter
-import paths
+import cfs_paths
def vol_map (vol_name):
- vol_dir = "%s/%s" % (paths.info_dir, vol_name)
+ vol_dir = "%s/%s" % (cfs_paths.info_dir, vol_name)
vol_re = re.compile(vol_dir+"/")
map = {}
- for link in glob.iglob("%s/*" % paths.used_subdir):
+ for link in glob.iglob("%s/*" % cfs_paths.used_subdir):
real_file = os.readlink(link)
m = vol_re.match(real_file)
if not m:
diff --git a/scripts/volstart.py b/scripts/volstart.py
deleted file mode 100644
index 3c699f7..0000000
--- a/scripts/volstart.py
+++ /dev/null
@@ -1,173 +0,0 @@
-
-import glob
-import os
-import re
-import socket
-import string
-import subprocess
-import sys
-
-import volfilter
-import paths
-
-# Make sure the volume directory exists and has the right stuff in it.
-def check_volume_directory(vol_name):
- if not os.path.exists(paths.info_dir):
- os.mkdir(paths.info_dir)
- user_file = open("%s/%s" % (paths.info_dir, "default_users"), "w")
- # TBD: big gaping security hole until other code can deal
- # with having zero users defined.
- user_file.write("alice password1\nbob password2\n")
- user_file.flush()
- user_file.close()
- os.mkdir(paths.idle_subdir)
- for i in range(24010, 24030):
- fp = open("%s/%d" % (paths.idle_subdir, i), "w")
- fp.close()
- os.mkdir(paths.used_subdir)
-
- vol_dir = "%s/%s" % (paths.info_dir, vol_name)
- if not os.path.exists(vol_dir):
- os.mkdir(vol_dir)
- return vol_dir
-
-# Figure out which of the GlusterFS volfiles belong to us. Volfiles might have
-# names based on partial host names, fully qualified names, or addresses, or
-# even a mix thanks to "gluster peer probe" silliness. To deal with all of
-# these possibilities, we resolve everything to addresses and compare those.
-# ### bear in mind that depending on how a machine is set up, the IP addrs
-# ### for a node might include 127.0.0.1 and ::1 first
-def scan_gfs_volfiles(vol_name):
- ret = ""
- my_name = os.uname()[1]
- # Getaddrinfo returns a list of tuples, each:
- # family, socktype, proto, canonname, sockaddr
- # We extract the sockaddr of the first item, and the IP addr from that
- # TBD: handle IPv6, multi-homed hosts, etc.
- # TBD: skip loopback addresses based on note above
- my_addrs = socket.getaddrinfo(my_name, 0)
- my_glob = "%s/vols/%s/%s.*.vol" % (paths.gfs_dir, vol_name, vol_name)
- for vf in glob.iglob(my_glob):
- m = paths.volfile_re.match(os.path.basename(vf))
- if m:
- this_host = m.groups(1)[0]
- this_addr = socket.getaddrinfo(this_host, 0)[0][4][0]
- for addr in my_addrs:
- if this_addr == addr[4][0]:
- ret = vf
- break
- return ret
-
-# Allocate a port for a server to run on. Right now we do this in a very
-# "clever" way, by creating files to match ports and then grabbing a file here.
-# When we have a real volume database such games will be unnecessary.
-def allocate_port(vol_file):
- for pf in glob.iglob("%s/*" % paths.idle_subdir):
- base = os.path.basename(pf)
- new_name = "%s/%s" % (paths.used_subdir, base)
- os.symlink(vol_file, new_name)
- os.remove(pf)
- return base
- else:
- raise RuntimeError, "no ports available"
-
-# Parse the user file into a list of [name,password] sub-lists. Since
-# everything that uses this is in Python we could just make it a pickle/shelf
-# or whatever, but it would all go away with a real volume database so it's not
-# worth the trouble to re-do it now.
-def parse_user_file(vol_name):
- try:
- user_file = open("%s/%s/users" % (paths.info_dir, vol_name), "r")
- except IOError:
- user_file = open("%s/default_users" % paths.info_dir, "r")
-
- users = []
- for line in user_file.readlines():
- space = line.find(" ")
- if space == -1:
- print >> sys.stderr, "Bad line in userfile: %s" % line
- users.append([line[:space],line[space+1:-1]])
- return users
-
-# Convert a single GlusterFS server volfile to its CloudFS form, with one
-# translator stack per tenant and "evil" translators stripped out. Some day
-# this will also involve adding translators (e.g. UID mapping) at the top of
-# each stack.
-def cloudify_volfile(input, output, users, port):
- graph, last = volfilter.load(input)
- last = volfilter.cleanup(last, graph)
-
- if last.type != "protocol/server":
- print >> sys.stderr, "Top translator must be protocol/server"
- sys.exit(1)
- old_stack = last.subvols[0]
-
- bad_opts = []
- for opt in last.opts.iterkeys():
- if opt[:9] == "auth.addr":
- bad_opts.append(opt)
- elif opt[:10] == "auth.login":
- bad_opts.append(opt)
- for opt in bad_opts:
- print "# stripping auth option %s = %s" % (opt, last.opts[opt])
- del last.opts[opt]
-
- last.subvols = []
- for user, pw in users:
- new_stack = volfilter.copy_stack(old_stack, user)
- last.subvols.append(new_stack)
- last.opts["auth.login.%s.allow" % new_stack.name] = user
- last.opts["auth.login.%s.password" % new_stack.name] = pw
-
- last.opts["transport.socket.listen-port"] = port
- volfilter.generate(graph, last, output)
-
-def create_tenant_dirs(vol_file):
- cmd = "/bin/grep \"option directory\" %s" % vol_file
- path = ""
- opt_dir_lines = os.popen(cmd)
- for opt_dir_line in opt_dir_lines:
- tokens = re.split(' ', string.lstrip(opt_dir_line))
- path = string.rstrip(tokens[2])
- if not os.path.exists(path):
- os.mkdir(path)
- opt_dir_lines.close()
- junkdir = os.path.dirname(path) + "/junk"
- if not os.path.exists(junkdir):
- os.mkdir(junkdir)
-
-def vol_start(vol_name):
- vol_base = check_volume_directory(vol_name)
- users = parse_user_file(vol_name)
- # TBD: deal with more than one brick on the same server
- vf = scan_gfs_volfiles(vol_name)
- new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
- outfile = open(new_vf, "w")
- port = allocate_port(new_vf)
- cloudify_volfile(vf, outfile, users, port)
- outfile.flush()
- outfile.close()
- v_key = string.replace(os.path.basename(new_vf), ".vol", "")
- # print "v_key: %s" % v_key
- # make dirs for each of the users
- create_tenant_dirs(new_vf)
- # actually start the server
- # note: pid file in /var/lib/glusterd/vols/<vol_name>/... so that
- # gluster can find it
- cmd = "/usr/sbin/glusterfsd --volfile=%s --xlator-option %s-server.listen-port=%s --pid-file=/var/lib/glusterd/vols/%s/run/%s.pid --socket-file=/tmp/%s.socket --log-file=/var/log/glusterfs/bricks/%s.log" % (new_vf, vol_name, port, vol_name, v_key, v_key, vol_name)
- # before 3.1.4 there were --brick-name and --brick-port for use by
- # the gluster port mapper. These were secret/hidden cmdline options.
- try:
- p = subprocess.Popen(cmd, close_fds=True, shell=True)
- retcode = os.waitpid(p.pid, 0)[1]
- if retcode < 0:
- print >>sys.stderr, "killed ", -retcode
- except ValueError, v:
- print >>sys.stderr, "value error: ", v
- except OSError, o:
- print >>sys.stderr, "os error: ", o
- except NameError, n:
- print >>sys.stderr, "name error: ", n
- except:
- print >>sys.stderr, "unknown error: ", sys.exc_info()[0]
-
diff --git a/scripts/volstop.py b/scripts/volstop.py
deleted file mode 100644
index 068baeb..0000000
--- a/scripts/volstop.py
+++ /dev/null
@@ -1,45 +0,0 @@
-
-import fileinput
-import glob
-import os
-import subprocess
-import sys
-
-import paths
-
-def kill_daemon(vol_name, vol_id):
- cooked = vol_id.rsplit(".", 1)
- pid_file_name = paths.gfs_dir + "/vols/" + vol_name + "/run/" + cooked[0] + ".pid"
- for pid in fileinput.input(pid_file_name):
- cmd = "/bin/kill " + pid
- try:
- p = subprocess.Popen(cmd, close_fds=True, shell=True);
- retcode = os.waitpid(p.pid, 0)[1]
- if retcode < 0:
- print >>sys.stderr, "killed ", -retcode
- except ValueError, v:
- print >>sys.stderr, "value error: ", v
- except OSError, o:
- print >>sys.stderr, "os error: ", o
- except NameError, n:
- print >>sys.stderr, "name error: ", n
- except:
- print >>sys.stderr, "unknown error: ", sys.exc_info()[0]
- fileinput.close()
-
-def recycle_port(path):
- port_num = os.path.basename(path)
- os.unlink(path)
- fp = open("%s/%s" % (paths.idle_subdir, port_num), "w")
- fp.close()
-
-def vol_stop(vol_name):
- for symlink in glob.glob(paths.used_subdir + "/*"):
- vol_link = os.readlink(symlink)
- vol = os.path.basename(vol_link)
- tokens = vol.split(".")
- if vol_name == tokens[0]:
- kill_daemon(vol_name, vol)
- recycle_port(symlink)
- break
-
commit 152e08b73f136a05c26fd90190d0654c9f0993ea
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue May 10 12:40:29 2011 -0400
Main task was to use routing and templates instead of commingling logic
with presentation, and coalesce common functionality into a library instead
of duplicating it, all for maintainability. Many other fixes/changes have
been rolled in, including:
form handling that actually works (i.e. not parsing stdin)
consistently include self when getting cluster member list
include last brick in each volume
add missing parameters (e.g. brick_in_use could never have worked)
use "mount" and explicit list of valid fs types
don't let glusterd start our daemons, which it will do the wrong way
Still a long way to go, but it's a start.
diff --git a/scripts/cfs_add_node.py b/scripts/cfs_add_node.py
new file mode 100644
index 0000000..497db36
--- /dev/null
+++ b/scripts/cfs_add_node.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+import fileinput
+import re
+import os
+import socket
+import sys
+
+from bottle import request, template
+
+# TBD: move this into a library if we're going to re-use it elsewhere
+# (or remove it entirely since it seems rather pointless)
+def clean_and_run (cmd):
+ clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", " ")
+ return os.system(clean_cmd + " > /dev/null 2>&1")
+
+def run_common (node_name):
+ host_name = socket.gethostname()
+
+ # derive the "real" node addr, e.g. if the user enters, e.g.,
+ # 192.168.122.55 and that is this IP for 'this' node then when we're
+ # done can_node_addr will either be 192.168.122.55 or 127.0.0.1.
+ # similarly if the user enters, e.g., <principalnode>.foo.bar.baz.com,
+ # the result will be the same
+ host_addr = socket.gethostbyname(host_name)
+ node_addr = socket.gethostbyname(node_name)
+ can_node_name = socket.gethostbyaddr(node_addr)
+ can_node_addr = socket.gethostbyname(can_node_name[0])
+
+
+ # now we can do the right thing if we're on the principal node or not
+ if node_addr == host_addr or "127.0.0.1" == can_node_addr :
+ sts = os.system("/usr/bin/sudo /sbin/chkconfig --add glusterd; \
+ /usr/bin/sudo /sbin/chkconfig glusterd on; \
+ /usr/bin/sudo /sbin/service glusterd start")
+ else :
+ # Remote actions.
+ pfx = "/usr/bin/sudo /usr/bin/ssh %s " % can_node_addr
+ clean_and_run(pfx+"/sbin/chkconfig --add glusterd")
+ clean_and_run(pfx+"/sbin/chkconfig glusterd on")
+ clean_and_run(pfx+"/sbin/service glusterd start")
+ # Local actions.
+ pfx = "/usr/bin/sudo "
+ clean_and_run(pfx+"/usr/sbin/gluster peer probe %s"%node_name)
+
+ return template("add_node_done.html",node_name=node_name)
+
+def run_www ():
+ node_name = request.forms.get("NODENAME")
+ return run_common(node_name)
+
+if __name__ == "__main__":
+ run_common(sys.argv[1])
diff --git a/scripts/cfs_add_volume.py b/scripts/cfs_add_volume.py
new file mode 100644
index 0000000..a9e7617
--- /dev/null
+++ b/scripts/cfs_add_volume.py
@@ -0,0 +1,79 @@
+
+import re
+import os
+import string
+import sys
+
+from bottle import request, template
+import cfs_utils
+
+def run_common (vname, vtype, vcount, bricks):
+
+ # TBD: all sorts of input-validity checking
+ # TBD: construct sane brick list
+ cmd = "volume create %s" % vname
+ if vtype != "plain":
+ cmd = "%s %s %s" % (cmd, vtype, vcount)
+ cmd = "%s %s" % (cmd, string.join(bricks))
+ sts = cfs_utils.run_cmd("gluster",cmd).wait()
+ if sts:
+ return template("add_vol_fail.html", name=vname,
+ action="gluster", status=sts)
+
+ ## make the cloudfs vol files with cloudfs
+ # TBD: this should go away in favor of generating these at start
+ # time; see big TBD near the end of the function
+ cmd = "init %s /var/lib/glusterd/cloudfs.tenants" % vname
+ for node in cfs_utils.get_members()[1:]:
+ sts = cfs_utils.run_cmd("cloudfs",cmd,host=node).wait()
+ if sts:
+ act = "cloudfs on %s" % node
+ return template("add_vol_fail.html", name=vname,
+ action=act, status=sts)
+
+ ## make the dirs on each node/volume
+ ## first get all the tenants
+ tenants = []
+ tenants.append("junk")
+ tenant_file = open("/var/lib/glusterd/cloudfs.tenants","r")
+ for tenantline in tenant_file:
+ scratch = re.split(' ', tenantline)
+ tenants.append(scratch[0])
+ ## now make the dirs on every volume
+ for b in bricks:
+ scratch = re.split(':', b)
+ for tenant in tenants :
+ node = scratch[0]
+ cmd = "%s/%s" % (scratch[1], tenant)
+ sts = cfs_utils.run_cmd("mkdir",cmd,host=node).wait()
+ if sts:
+ act = "mkdir on %s" % node
+ return template("add_vol_fail.html", name=vname,
+ action=act, status=sts)
+
+ # TBD: generating the cloudfs volfiles (client+server) and starting the
+ # glusterfsd daemons should be part of a separate "start" action, so
+ # that it's done with a tenant list that's as up-to-date as possible.
+ # It should also be done using the cloudfsd fetch/map infrastructure so
+ # that cfs_mount.py/mount.cloudfs can actually work. Doing it as a
+ # hack on top of Gluster's fetching/portmapping mess, even if it seems
+ # to work in some bogus test environment, is a total waste of time.
+ return template("add_vol_done.html",name=vname);
+
+def run_www ():
+ volume_id = request.forms.get("VOLUMEID")
+ volume_type = request.forms.get("TYPE")
+ replica_or_stripe_count = request.forms.get("COUNT")
+ brick_list = []
+ for prop in request.forms.iterkeys():
+ if prop.startswith("VOLUME_"):
+ brick_list.append(prop[7:])
+ return run_common(volume_id,volume_type,replica_or_stripe_count,
+ brick_list)
+
+if __name__ == "__main__":
+ volume_id = sys.argv[1]
+ volume_type = sys.argv[2]
+ replica_or_stripe_count = sys.argv[3]
+ brick_list = sys.argv[4:]
+ run_common(volume_id,volume_type,replica_or_stripe_count,brick_list)
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
new file mode 100644
index 0000000..09e5ace
--- /dev/null
+++ b/scripts/cfs_utils.py
@@ -0,0 +1,118 @@
+import os
+import socket
+import subprocess
+
+# The list of filesystems that work is much shorter than the list of
+# filesystems that don't support xattrs, already-remote filesystems (e.g. NFS),
+# pseudo-filesystems (e.g. devfs) or other things that won't work for one
+# reason or another.
+good_fs_types = [ "ext2", "ext3", "ext4", "xfs", "btrfs" ]
+
+# Sudo is kind of pointless if we're already running as root.
+use_sudo = False
+
+# We use the class cache to avoid having to call through the shell to find
+# executables on $PATH every single time. To do this, we use the shell *once*
+# to find the local path and assume all nodes are configured similarly.
+class_cache = {}
+
+def get_path (program):
+ if not class_cache.has_key(program):
+ for dir in os.getenv("PATH").split(":"):
+ maybe = os.path.join(dir,program)
+ if os.access(maybe,os.X_OK):
+ class_cache[program] = maybe
+ break
+ # KeyError is as good as any exception we might throw ourselves.
+ return class_cache[program]
+
+# This lets users change access methods (e.g. pdsh, slurm, some future method
+# that proxies the command through cloudfsd's own secure connection.
+def make_remote (host, cmd):
+ return "%s %s %s" % (get_path("ssh"), host, cmd)
+
+def run_cmd (program, args, host=None, filters=[]):
+ cmd = "%s %s" % (get_path(program), args)
+ for prog2, args2 in filters:
+ cmd = "%s | %s %s" % (cmd, get_path(prog2), args2)
+ if use_sudo:
+ cmd = "%s %s" % cmd
+ if host:
+ cmd = make_remote(host,cmd)
+ if len(filters):
+ print "executing %s using shell" % cmd
+ child = subprocess.Popen(cmd,shell=True,
+ stdout=subprocess.PIPE)
+ else:
+ print "executing %s without shell" % cmd
+ child = subprocess.Popen(cmd.split(" "),shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ return child
+
+# NB this node is always first in the list
+def get_members ():
+ peer_ips = run_cmd("gluster","peer status",
+ filters=[("grep","Hostname:")]).stdout
+ my_ip = socket.gethostbyname(socket.gethostname())
+ node_list = [socket.gethostbyaddr(my_ip)[0]]
+ # For each line: strip off the trailing newline, split at colon, take
+ # item #1
+ for peer in [line[:-1].split(": ")[1] for line in peer_ips]:
+ node_list.append(peer)
+ return node_list
+
+# A brick list looks something like this:
+# {
+# "vol_A": [ "server1:/bricks/A", "server2:/bricks/A" ],
+# "vol_B": [ "server2:/bricks/B", "server3:/bricks/B: ]
+# }
+def get_bricks ():
+ vol_list = {}
+ brick_list = []
+ for line in run_cmd("gluster","volume info all").stdout:
+ parts = line[:-1].split(": ")
+ if len(parts) != 2:
+ continue
+ if parts[0] == "Volume Name":
+ if brick_list:
+ vol_list[vol_name] = brick_list
+ vol_name = parts[1]
+ brick_list = []
+ elif parts[0].startswith("Brick"):
+ brick_list.append(parts[1])
+ if brick_list:
+ vol_list[vol_name] = brick_list
+ return vol_list
+
+def dir_in_use (brick_list, dir):
+ for vol_bricks in brick_list.itervalues():
+ for brick in vol_bricks:
+ if brick == dir:
+ return True
+ return False
+
+# A mount list looks something like this:
+# {
+# "server1": [ ( "/bricks/A", True ),
+# ( "/some/other/mount", False) ]
+# "server2": [ ( "/bricks/A", True ),
+# ( "/bricks/B", True ) ],
+# "server3": [ ( "/bricks/B", True ),
+# ( "/what/ever", False ) ]
+# }
+# The second part of each tuple is the "in-use" flag, true iff the mount is
+# part of a volume.
+def get_mounts (brick_list):
+ volumes_on_nodes = {}
+ for node_ip in get_members() :
+ scratch = []
+ for line in run_cmd("mount","",host=node_ip).stdout:
+ bits = line.split(" ")
+ if bits[4] not in good_fs_types:
+ continue
+ mount = "%s:%s" % (node_ip, bits[2])
+ scratch.append((bits[2],dir_in_use(brick_list,mount)))
+ volumes_on_nodes[node_ip] = scratch
+ return volumes_on_nodes
+
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index f68d76e..e510823 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,19 +1,18 @@
#!/usr/bin/python
-from bottle import route, post, run, request
+from bottle import route, post, run, view, debug
+
+import os
+import socket
+import string
import paths
import volstart
import volstop
import volmap
-import wwwroot
-import wwwcfgroot
-import wwwcfgmain
-import wwwinitcluster
-import wwwdoinitcluster
-import wwwprovision
-import wwwdoprovision
-import wwwconfirmprovision
+import cfs_utils
+import cfs_add_node
+import cfs_add_volume
CLOUDFSD_PORT = 8080
@@ -47,39 +46,39 @@ def list_users():
print "list users"
@route("/")
-def www_root():
- return wwwroot.www_root()
-
@route("/cfg")
-def www_cfgroot():
- return wwwcfgroot.www_cfgroot()
-
@route("/cfgmain")
-def www_cfgmain():
- return wwwcfgmain.www_cfgmain()
-
-@route("/wwwprovision")
-def www_provision():
- return wwwprovision.www_provision()
-
-@post("/wwwconfirmprovision")
-def www_confirmprovision():
- return wwwconfirmprovision.www_confirmprovision(request.body)
-
-@post("/wwwdoprovision")
-def www_doprovision():
- print "www doprovision"
- return wwwdoprovision.www_doprovision(request.body)
-
-@route("/wwwinitcluster")
-def www_initcluster():
- print "www initcluster"
- return wwwinitcluster.www_initcluster()
-
-@post("/wwwdoinitcluster")
-def www_doinitcluster():
- print "www doinitcluster"
- return wwwdoinitcluster.www_doinitcluster()
+@view("cfgmain.html")
+def cfg_main():
+ return dict()
+
+@route("/cluster")
+@view("cluster.html")
+def show_cluster():
+ # TBD: handle glusterd presence/startup check sanely
+ cfs_utils.run_cmd("chkconfig","--add glusterd")
+ cfs_utils.run_cmd("chkconfig","glusterd on")
+ cfs_utils.run_cmd("service","glusterd start")
+ node_list = cfs_utils.get_members()
+ return dict(node_list=string.join(node_list,"<br />"))
+
+@post("/cluster/add_node")
+def add_node():
+ return cfs_add_node.run_www()
+
+# TBD: implement remove_node
+
+@route("/volumes")
+@view("volumes.html")
+def show_volumes():
+ brick_list = cfs_utils.get_bricks()
+ mount_list = cfs_utils.get_mounts(brick_list)
+ # TBD: allow adding arbitrary directories instead of just mountpoints
+ return dict(bricks=brick_list,mounts=mount_list)
+
+@post("/volumes/add_volume")
+def add_volume():
+ return cfs_add_volume.run_www()
@route("/wwwaddtenant")
def www_addtenant():
@@ -93,5 +92,10 @@ def www_doaddtenant():
def www_listtenants():
print "www listtenants"
+@route("/styles/:sheet")
+def get_style (sheet):
+ return file("styles/%s"%sheet,"r")
+
if __name__ == "__main__":
+ debug(True)
run(host='',port=CLOUDFSD_PORT)
diff --git a/scripts/styles/cfgmain.css b/scripts/styles/cfgmain.css
new file mode 100644
index 0000000..364d88b
--- /dev/null
+++ b/scripts/styles/cfgmain.css
@@ -0,0 +1,23 @@
+body {
+ background-color: #fff;
+ color: #000;
+ font-size: 0.9em;
+ font-family: sans-serif,helvetica;
+ margin: 0;
+ padding: 0;
+}
+
+.banner {
+ text-align: center;
+ margin: 0;
+ padding: 0.6em 2em 0.4em;
+ background-color: #900;
+ color: #fff;
+ font-weight: bold;
+ font-size: 1.75em;
+ border-bottom: 2px solid #000;
+}
+
+.content {
+ padding: 1em 5em;
+}
diff --git a/scripts/styles/provlist.css b/scripts/styles/provlist.css
new file mode 100644
index 0000000..70132a4
--- /dev/null
+++ b/scripts/styles/provlist.css
@@ -0,0 +1,29 @@
+.header {
+ float: left;
+ width: 100%;
+ background-color: #f4f4f4;
+}
+
+.wrapper {
+ position: relative;
+ float: left;
+ left: 0.00%;
+ width: 100.00%;
+ background-color: #cccccc;
+}
+
+tr.d0 td {
+ background-color: #CC9999;
+ color: black;
+}
+
+tr.d1 td {
+ background-color: #9999CC;
+ color: black;
+}
+
+.footer {
+ float: left;
+ width: 100%;
+ background-color: #f4f4f4;
+}
diff --git a/scripts/views/add_node_done.html b/scripts/views/add_node_done.html
new file mode 100644
index 0000000..23308e3
--- /dev/null
+++ b/scripts/views/add_node_done.html
@@ -0,0 +1,7 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Node {{node_name}} added.</p>
+<p><a href="/cluster">Back to cluster configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/add_vol_done.html b/scripts/views/add_vol_done.html
new file mode 100644
index 0000000..d6ddf58
--- /dev/null
+++ b/scripts/views/add_vol_done.html
@@ -0,0 +1,7 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Volume {{name}} created.</p>
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/add_vol_fail.html b/scripts/views/add_vol_fail.html
new file mode 100644
index 0000000..5ecf1c5
--- /dev/null
+++ b/scripts/views/add_vol_fail.html
@@ -0,0 +1,7 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+<p>Creating volume {{name}} failed (stage {{action}}, status {{status}}).</p>
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/cfgmain.html b/scripts/views/cfgmain.html
new file mode 100644
index 0000000..3f46b78
--- /dev/null
+++ b/scripts/views/cfgmain.html
@@ -0,0 +1,16 @@
+<html><head>
+<title>_Red Hat CloudFS Configuration_</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="Pragma" content="no-cache" />
+<link href="/styles/cfgmain.css" rel="stylesheet" type="text/css" />
+</head><body>
+<div class="banner">
+ <h1>Red Hat CloudFS Configuration Main</h1>
+</div>
+<div class="content">
+ <p><a href="/cluster">Manage Servers</a></p>
+ <p><a href="/volumes">Manage Volumes</a></p>
+ <p><a href="/wwwlisttenants">Manage Tenants</a></p>
+</div>
+</body></html>
+
diff --git a/scripts/views/cluster.html b/scripts/views/cluster.html
new file mode 100644
index 0000000..58e83d2
--- /dev/null
+++ b/scripts/views/cluster.html
@@ -0,0 +1,23 @@
+<html><head>
+<title>Initialize CloudFS Cluster</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="pragma" content="no-cache">
+<link rel="stylesheet" type="text/css" href="/styles/cfgmain.css" />
+</head><body>
+<div class="banner">
+ <h1>Initialize CloudFS Cluster</h1>
+</div>
+<div class="content">
+<h2>Cluster Nodes</h2>
+<p>{{!node_list}}</p>
+<br />
+Enter the hostname of a node to add to the cluster
+<form method="post" action="cluster/add_node">
+ Node Name: <input type="text" name="NODENAME" />
+ <input type="submit" name="ACTION" value="Add" />
+</form>
+<br />
+<form method="get" action="/cfgmain">
+ <input type="submit" value="Done" />
+</form>
+</body></html>
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
new file mode 100644
index 0000000..11c964c
--- /dev/null
+++ b/scripts/views/volumes.html
@@ -0,0 +1,54 @@
+<html><head>
+<title>_Provision CloudFS Volume_</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<meta http-equiv="Pragma" content="no-cache" />
+<link href="/styles/cfgmain.css" rel="stylesheet" type="text/css" />
+<link href="/styles/provlist.css" rel="stylesheet" type="text/css" />
+</head><body>
+<div class="banner">
+<h1>Provision CloudFS Volume</h1>
+</div>
+<div class="content">
+<h2>Existing Volumes:</h2>
+ %for vol_name, brick_list in bricks.iteritems():
+ <p>{{vol_name}}
+ %for brick in brick_list:
+ <br /> {{brick}}
+ %end
+ </p>
+ %end
+<hr>
+<h2>Provision a Volume From Available Bricks:</h2>
+<form method="post" name="provision" action="/volumes/add_volume">
+<div class="header"><hr></div>
+<div class="wrapper">
+<table>
+ %color_index = 0
+ %for node, mount_list in mounts.iteritems():
+ %color = "d%d" % color_index
+ %color_index = (color_index + 1) % 2
+ <tr class="{{color}}">
+ <td>{{node}}</td>
+ %for mpath, minuse in mount_list:
+ %full_path = "%s:%s" % (node,mpath)
+ %if minuse:
+ <td><input type="checkbox" name="VOLUME" value=0 disabled />{{mpath}} (in use)</td>
+ %else:
+ <td><input type="checkbox" name="VOLUME_{{full_path}}" value=0 />{{mpath}}</td>
+ %end
+
+ %end
+ </tr>
+ %end
+</table>
+</div>
+<div class="footer"><hr></div>
+Volume Type: <input type="radio" name="TYPE" value="plain" checked />Plain
+<input type="radio" name="TYPE" value="replica" />Replicated
+<input type="radio" name="TYPE" value="stripe" />Striped
+<br>Replica or Stripe count: <input type="text" name="COUNT" size="2" />
+<br>Volume ID: <input type="text" name="VOLUMEID" />
+<input type="submit" name="PROVISION" value="Provision" />
+</form>
+</div>
+</body></html>
diff --git a/scripts/volstart.py b/scripts/volstart.py
index a8f0655..3c699f7 100644
--- a/scripts/volstart.py
+++ b/scripts/volstart.py
@@ -44,6 +44,7 @@ def scan_gfs_volfiles(vol_name):
# family, socktype, proto, canonname, sockaddr
# We extract the sockaddr of the first item, and the IP addr from that
# TBD: handle IPv6, multi-homed hosts, etc.
+ # TBD: skip loopback addresses based on note above
my_addrs = socket.getaddrinfo(my_name, 0)
my_glob = "%s/vols/%s/%s.*.vol" % (paths.gfs_dir, vol_name, vol_name)
for vf in glob.iglob(my_glob):
@@ -138,6 +139,7 @@ def create_tenant_dirs(vol_file):
def vol_start(vol_name):
vol_base = check_volume_directory(vol_name)
users = parse_user_file(vol_name)
+ # TBD: deal with more than one brick on the same server
vf = scan_gfs_volfiles(vol_name)
new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
outfile = open(new_vf, "w")
diff --git a/scripts/wwwcfgmain.py b/scripts/wwwcfgmain.py
deleted file mode 100644
index b577a4a..0000000
--- a/scripts/wwwcfgmain.py
+++ /dev/null
@@ -1,19 +0,0 @@
-
-import wwwcss
-
-def www_cfgmain() :
-
- ret = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
- ret = ret + "<html><head>"
- ret = ret + "<title>_Red Hat CloudFS Configuration_</title>"
- ret = ret + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
- ret = ret + "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
- ret = ret + wwwcss.css
- ret = ret + "</head><body>"
- ret = ret + "<h1>Red Hat CloudFS Configuration Main<h1/>"
- ret = ret + "<p><a href=\"wwwinitcluster\">Initialize Cluster</a></p>"
- ret = ret + "<p><a href=\"wwwlisttenants\">Tenant Management</a></p>"
- ret = ret + "<p><a href=\"wwwprovision\">Provision Storage</a></p>"
- ret = ret + "</body></html>"
- return ret
-
diff --git a/scripts/wwwcfgroot.py b/scripts/wwwcfgroot.py
deleted file mode 100644
index b9faede..0000000
--- a/scripts/wwwcfgroot.py
+++ /dev/null
@@ -1,34 +0,0 @@
-
-import wwwcss
-
-def www_cfgroot() :
-
- ret = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
- ret = ret + "<html><head>"
- ret = ret + "<title>_Red Hat CloudFS Configuration_</title>"
- ret = ret + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
- ret = ret + "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
- ret = ret + wwwcss.css
- ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=/cfgmain\" />"
- ret = ret + "</head><body>"
- ret = ret + "<h1>Red Hat CloudFS Configuration<h1/>"
-# authentication disabled for now
-# <h2>Sign On<h2/>
-# <form method="post" action="/cgi-bin/authenticate">
-# <p>
-# <strong>Please enter user ID and password:</strong>
-# <br>
-# <strong>User ID</strong>
-# <input type="text" size="20" name="USERNAME">
-# <strong>Password</strong>
-# <input type="password" size="20" name="PASSWORD">
-# </p>
-# <p>
-# <input type="submit" name="signon" value="Sign On">
-# </p>
-# </form>
- ret = ret + "<p>Please follow <a href=\"/cfgmain\">link</a>!</p>"
- ret = ret + "</body></html>"
-
- return ret
-
diff --git a/scripts/wwwconfirmprovision.py b/scripts/wwwconfirmprovision.py
deleted file mode 100644
index 8b3ccba..0000000
--- a/scripts/wwwconfirmprovision.py
+++ /dev/null
@@ -1,59 +0,0 @@
-
-import fileinput
-import re
-import os
-
-def www_confirmprovision(body):
-
- # f = fileinput.input()
- line = body.readline()
- # fileinput.close()
-
- tokens = re.split('&', line)
-
- volume_id = ""
- volume_type = ""
- replica_or_stripe_count = ""
- command = ""
- volumes = []
-
- # assert PROVISION= will be the last one, thus we may also assert that
- # all options will be extracted before we try to cons up a cmd
- for index in range(len(tokens)) :
- if tokens[index].startswith("TYPE=") :
- scratch = re.split('=', tokens[index])
- volume_type = scratch[1]
- continue
- if tokens[index].startswith("COUNT=") :
- scratch = re.split('=', tokens[index])
- replica_or_stripe_count = scratch[1]
- continue
- elif tokens[index].startswith("VOLUMEID=") :
- scratch = re.split('=', tokens[index])
- volume_id = scratch[1]
- continue
- elif tokens[index].startswith("VOLUME=") :
- scratch = re.split('=', tokens[index])
- volumes.append(scratch[1])
- continue
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- ret = ret + "<title>Confirm Provision Volume</title>"
- ret = ret + "</head><body>"
- ret = ret + "<h2>Confirm Provision Volume</h2>"
- cmd = "/usr/sbin/gluster volume create " + volume_id
- if volume_type != "plain" :
- cmd = cmd + " " + volume_type + " " + replica_or_stripe_count
- cmd = cmd + " transport tcp"
- for volume in volumes :
- cmd = cmd + " " + volume.replace("%3A", ":").replace("%2F", "/")
-
- ret = ret + "<form method=\"post\" name=\"provision\" action=\"wwwdoprovision\">"
- ret = ret + "<input type=\"hidden\" name=\"COMMAND\" value=\"" + cmd + "\" />"
- ret = ret + cmd + " <br><input type=\"submit\" name=\"PROVISION\" value=\"Confirm\" />"
- ret = ret + "</form>"
- ret = ret + "</body></html>"
- return ret
-
diff --git a/scripts/wwwcss.py b/scripts/wwwcss.py
deleted file mode 100644
index af378a8..0000000
--- a/scripts/wwwcss.py
+++ /dev/null
@@ -1,79 +0,0 @@
-
-css = """<style type=\"text/css\">
-/*<![CDATA[*/
-body {
-background-color: #fff;
-color: #000;
-font-size: 0.9em;
-font-family: sans-serif,helvetica;
-margin: 0;
-padding: 0;
-}
-:link {
-color: #fff;" + +
-}
-:visited {
-color: #fff;
-}
-a:hover {
-color: #f50;
-}
-h1 {
-text-align: center;
-margin: 0;
-padding: 0.6em 2em 0.4em;
-background-color: #900;
-color: #fff;
-font-weight: normal;
-font-size: 1.75em;
-border-bottom: 2px solid #000;
-}
-h1 strong {
-font-weight: bold;
-}
-h2 {
-font-size: 1.1em;
-font-weight: bold;
-}
-hr {
-display: none;
-}
-.content {
-padding: 1em 5em;
-}
-.content-columns {
-/* Setting relative positioning allows for
-absolute positioning for sub-classes */
-position: relative;
-padding-top: 1em;
-}
-.content-column-left {
-/* Value for IE/Win; will be overwritten for other browsers */
-width: 47%;
-padding-right: 3%;
-float: left;
-padding-bottom: 2em;
-}
-.content-column-left hr {
-display: none;
-}
-.content-column-right {
-/* Values for IE/Win; will be overwritten for other browsers */
-width: 47%;
-padding-left: 3%;
-float: left;
-padding-bottom: 2em;
-}
-.content-columns>.content-column-left, .content-columns>.content-column-right {
-/* Non-IE/Win */
-}
-img {
-border: 2px solid #fff;
-padding: 2px;
-margin: 2px;
-}
-a:hover img {
-border: 2px solid #f50;
-}
-/*]]>*/
-</style>"""
diff --git a/scripts/wwwdoinitcluster.py b/scripts/wwwdoinitcluster.py
deleted file mode 100644
index 29526a5..0000000
--- a/scripts/wwwdoinitcluster.py
+++ /dev/null
@@ -1,59 +0,0 @@
-
-import fileinput
-import re
-import os
-import socket
-
-def www_doinitcluster():
-
- f = fileinput.input()
- line = f.readline()
- fileinput.close()
-
- host_name = socket.gethostname()
-
- tokens = re.split('&', line)
- node_name = ""
-
- # all options will be extracted before we try to cons up a cmd
- for index in range(len(tokens)) :
- if tokens[index].startswith("NODENAME=") :
- scratch = re.split('=', tokens[index])
- node_name = scratch[1]
-
- # derive the "real" node addr, e.g. if the user enters, e.g., 192.168.122.55
- # and that is this IP for 'this' node then when we're done can_node_addr will
- # either be 192.168.122.55 or 127.0.0.1.
- # similarly if the user enters, e.g., <principalnode>.foo.bar.baz.com, the
- # result will be the same
- host_addr = socket.gethostbyname(host_name)
- node_addr = socket.gethostbyname(node_name)
- can_node_name = socket.gethostbyaddr(node_addr)
- can_node_addr = socket.gethostbyname(can_node_name[0])
-
-
- # now we can do the right thing if we're on the principal node or not
- if node_addr == host_addr or "127.0.0.1" == can_node_addr :
- sts = os.system("/usr/bin/sudo /sbin/chkconfig --add glusterd; /usr/bin/sudo /sbin/chkconfig glusterd on; /usr/bin/sudo /sbin/service glusterd start")
- else :
- cmd = "/usr/bin/sudo /usr/bin/ssh " + can_node_addr + " /sbin/chkconfig --add glusterd"
- clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
- sts = os.system(clean_cmd + " > /dev/null 2>&1");
- cmd = "/usr/bin/sudo /usr/bin/ssh " + can_node_addr + " /sbin/chkconfig glusterd on"
- clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
- sts = os.system(clean_cmd + " > /dev/null 2>&1");
- cmd = "/usr/bin/sudo /usr/bin/ssh " + can_node_addr + " /sbin/service glusterd start"
- clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
- sts = os.system(clean_cmd + " > /dev/null 2>&1");
- cmd = "/usr/bin/sudo /usr/sbin/gluster peer probe " + node_name
- clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
- sts = os.system(clean_cmd + " > /dev/null 2>&1");
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=wwwinitcluster\" />"
- ret = ret + "</head><body>"
- ret = ret + "<p>Please follow <a href=\"wwwinitcluster\">link</a>!</p>"
- ret = ret + "</body></html>"
- return ret
diff --git a/scripts/wwwdoprovision.py b/scripts/wwwdoprovision.py
deleted file mode 100644
index 6535c50..0000000
--- a/scripts/wwwdoprovision.py
+++ /dev/null
@@ -1,114 +0,0 @@
-
-import fileinput
-import re
-import os
-
-def www_doprovision(body):
-
- # f = fileinput.input()
- line = body.readline()
- # fileinput.close()
-
- tokens = re.split('&', line)
-
- volume_id = ""
- volume_type = ""
- replica_or_stripe_count = ""
- command = ""
- volumes = []
-
- # assert PROVISION= will be the only one, thus we may also assert that
- # all options will be extracted before we try to cons up a cmd
- for index in range(len(tokens)) :
- if tokens[index].startswith("COMMAND=") :
- scratch = re.split('=', tokens[index])
- command = scratch[1]
- continue
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- ret = ret + "<title>Provisioned Volume</title>"
- ret = ret + "</head><body>"
- ret = ret + "<h2>Provisioned Volume</h2>"
- ## make the underlying volume with gluster
- decode_cmd = command.replace("%3A", ":").replace("%2F", "/").replace("+", " ")
- clean_cmd = decode_cmd.replace("|", "").replace("&", "").replace(">", "")
- gluster_sts = os.system("/usr/bin/sudo " + clean_cmd + " > /dev/null 2>&1")
- ## make the cloudfs vol files with cloudfs
- cloudfs_sts = -1
- cmd_tokens = re.split(' ', clean_cmd)
- if gluster_sts != -1 and cmd_tokens[0] == "/usr/sbin/gluster" and cmd_tokens[1] == "volume" and cmd_tokens[2] == "create" :
- cloudfs_cmd = "/usr/bin/sudo /usr/bin/cloudfs init " + cmd_tokens[3] + " /var/lib/glusterd/cloudfs.tenants > /dev/null 2>&1"
- cloudfs_sts = os.system(cloudfs_cmd)
-
- ## make the dirs on each node/volume
- if cloudfs_sts != -1 :
- ## first get all the tenants
- tenants = []
- tenants.append("junk")
- for tenantline in fileinput.input("/var/lib/glusterd/cloudfs.tenants") :
- scratch = re.split(' ', tenantline)
- tenants.append(scratch[0])
- fileinput.close()
- ## now make the dirs on every volume
- first_node = 6
- if cmd_tokens[4] != "transport" :
- first_node = first_node + 2
- nodes = []
- for ii in range(first_node, len(cmd_tokens)) :
- scratch = re.split(':', cmd_tokens[ii])
- nodes.append(scratch[0])
- for tenant in tenants :
- mkdir_cmd = "/usr/bin/sudo /usr/bin/ssh " + scratch[0] + " /bin/mkdir -p " + scratch[1] + "/" + tenant
- clean_cmd = mkdir_cmd.replace("|", "").replace("&", "").replace(">", "")
- mkdir_sts = os.system(clean_cmd + " > /dev/null 2>&1")
- if mkdir_sts != 0 :
- ret = ret + "<br> fail: " + clean_cmd
- # copy the modified vol files to the peers
- unique_nodes = set(nodes)
- this_ip = ""
- ifconfig_pipe = os.popen("/sbin/ifconfig eth0")
- for line in ifconfig_pipe :
- line = line.lstrip()
- tokens = re.split('[: ]', line)
- if tokens[0] == "inet" and tokens[1] == "addr" :
- this_ip = tokens[2]
- scratch = []
- scratch.append(this_ip)
-
- unique_nodes = unique_nodes.difference(scratch)
- ifconfig_pipe.close()
- # ssh and scp (i.e. pull). could just scp
- # (i.e. push) but then we would have to add
- # scp to sudoers-- (Would like to minimize
- # the number of things added to sudoers
- vol_name = cmd_tokens[3].replace("|", "").replace("&", "").replace(">", "")
- for node in unique_nodes :
- scp_cmd = "/usr/bin/sudo /usr/bin/ssh " + node + " 'cd /var/lib/glusterd/vols/" + vol_name + " && /usr/bin/scp -q -r " + this_ip + ":/var/lib/glusterd/vols/" + vol_name + "/* .'"
- scp_sts = os.system(scp_cmd)
- if scp_sts != 0 :
- ret = ret + "<br>fail: " + scp_cmd
-
- # now start the volume
- start_cmd = "/usr/bin/sudo /usr/sbin/gluster volume start " + vol_name
- start_sts = os.system(start_cmd + " > /dev/null 2>&1")
- if start_sts != 0 :
- ret = ret + "<br> fail: " + start_cmd
- else :
- ret = ret + "<br> started " + vol_name
-
- # list FUSE volume specs for download
- mkdir_sts = os.system("/usr/bin/sudo /bin/mkdir -p scratch")
- cp_sts = os.system("/usr/bin/sudo /bin/cp /var/lib/cloudfs/vols/" + vol_name + "/" + vol_name + "-fuse.vol.* /var/www/html/cfg/scratch/")
- ret = ret + "<hr><br> client/tenant volume files (right-click to save-as):"
- for tenant in tenants :
- if tenant != "junk" :
- ret = ret + "<br><a href=\"/cfg/scratch/%s-fuse.vol.%s\">%s</a>" % (vol_name, tenant, tenant)
- ret = ret + "<hr>"
- ret = ret + "<form method=\"post\" action=\"cfgmain\">"
- ret = ret + "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
-
- ret = ret + "</body></html>"
- return ret
-
diff --git a/scripts/wwwinitcluster.py b/scripts/wwwinitcluster.py
deleted file mode 100644
index 2858d6a..0000000
--- a/scripts/wwwinitcluster.py
+++ /dev/null
@@ -1,40 +0,0 @@
-
-import datetime
-import os
-import re
-import string
-import socket
-import sys
-
-def www_initcluster():
-
- hostname = socket.gethostname()
-
- node_ips = []
-
- sts = os.system("/usr/bin/sudo /sbin/chkconfig --add glusterd; /usr/bin/sudo /sbin/chkconfig glusterd on; /usr/bin/sudo /sbin/service glusterd start")
- peer_ips = os.popen("/usr/bin/sudo /usr/sbin/gluster peer status | /bin/grep Hostname:")
- for line in peer_ips :
- tokens = re.split(':', line)
- node_ips.append(socket.gethostbyname(string.strip(tokens[1])))
- peer_ips.close()
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\"><title>Initialize CloudFS Cluster</title>"
- ret = ret + "</head><body>"
- ret = ret + "<h2>Initialize CloudFS Cluster</h2>"
- ret = ret + "<hr>"
- ret = ret + "<h2>Cluster Nodes</h2>"
- for node_ip in node_ips :
- ret = ret + node_ip + "<br>"
- ret = ret + "<hr><br>"
- ret = ret + "Enter the hostname of a node to add to the cluster"
- ret = ret + "<form method=\"post\" action=\"wwwdoinitcluster\">"
- ret = ret + "Node Name: <input type=\"text\" name=\"NODENAME\">"
- ret = ret + "<input type=\"submit\" name=\"ADDNODE\" value=\"Add Node\"></form>"
- ret = ret + "<hr>"
- ret = ret + "<form method=\"post\" action=\"/cfgmain\">"
- ret = ret + "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
- ret = ret + "</body></html>"
- return ret
diff --git a/scripts/wwwprovision.py b/scripts/wwwprovision.py
deleted file mode 100644
index 72dada3..0000000
--- a/scripts/wwwprovision.py
+++ /dev/null
@@ -1,119 +0,0 @@
-
-import datetime
-import os
-import re
-import string
-import socket
-import sys
-
-def brick_used(needle, haystack) :
- for vol in haystack :
- for brick in vol :
- if brick == needle :
- return True
- return False
-
-def www_provision() :
-
- hostname = socket.gethostname()
-
- existing_vols = []
- bricks_by_vol = []
- bbv_index = -1
-
- volinfo_pipe = os.popen("/usr/bin/sudo /usr/sbin/gluster volume info all")
- for line in volinfo_pipe :
- line = line.lstrip()
- if line.startswith("Volume") :
- tokens = re.split(':', line)
- if tokens[0].strip() == "Volume Name" :
- existing_vols.append(tokens[1].strip())
- elif line.startswith("Bricks:") :
- bricks = []
- bricks_by_vol.append(bricks)
- bbv_index = bbv_index + 1
- elif line.startswith("Brick") :
- tokens = re.split(' ', line)
- bricks_by_vol[bbv_index].append(tokens[1].strip())
- volinfo_pipe.close()
-
- node_ips = []
-
- ifconfig_pipe = os.popen("/sbin/ifconfig eth0")
- for line in ifconfig_pipe :
- line = line.lstrip()
- tokens = re.split('[: ]', line)
- if tokens[0] == "inet" and tokens[1] == "addr" :
- node_ips.append(tokens[2])
- ifconfig_pipe.close()
-
- peer_ips = os.popen("/usr/bin/sudo /usr/sbin/gluster peer status | /bin/grep Hostname:")
- for line in peer_ips :
- tokens = re.split(':', line)
- node_ips.append(socket.gethostbyname(string.strip(tokens[1])))
- peer_ips.close()
-
- volumes_on_nodes = []
-
- for node_ip in node_ips :
- # ret = ret +("<p>%s</p>") % (node_ip)
- cmd = "/usr/bin/sudo /usr/bin/ssh " + node_ip + " df -H"
- volumes_on_node = os.popen(cmd)
- scratch = []
- for line in volumes_on_node :
- line = line.rstrip()
- if False == line.startswith("Filesystem") and False == line.startswith("/dev/mapper") and False == line.startswith("tmpfs") and False == line.endswith(" /") and False == line.endswith("/boot") :
- scratch.append(line)
- volumes_on_node.close()
- volumes_on_nodes.append(scratch)
-
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head><meta http-equiv=\"pragma\" content=\"no-cache\"><title>Provision CloudFS Volume</title>"
- ret = ret + "<style type=\"text/css\">"
- ret = ret + ".colmask{ position: relative; overflow: hidden; margin: 0px auto; width: 100%; background-color: #b4d2f7 }"
- ret = ret + ".header{ float: left; width: 100%; background-color: #f4f4f4}"
- ret = ret + ".wrapper{ position: relative; float: left; left: 0.00%; width: 100.00%; background-color: #cccccc}"
- ret = ret + "tr.d0 td { background-color: #CC9999; color: black; }"
- ret = ret + "tr.d1 td { background-color: #9999CC; color: black; }"
- ret = ret + ".footer{ float: left; width: 100%; background-color: #f4f4f4}"
- ret = ret + "body { border-width: 0px; padding: 0px; margin: 0px; font-size: 90%; background-color: #e7e7de}"
- ret = ret + "</style></head><body>"
- ret = ret + "<h1>Provision CloudFS Volume</h1>"
- ret = ret + "<h2>Existing Volumes:</h2>"
- bbv_index = 0
- for existing_vol in existing_vols :
- ret = ret + "<p>" + existing_vol + ":"
- for brick in bricks_by_vol[bbv_index] :
- ret = ret + "<br> " + brick
- bbv_index = bbv_index + 1
- ret = ret + "</p>"
- ret = ret + "<hr>"
- ret = ret + "<h2>Provision a Volume From Available Bricks:</h2>"
- ret = ret + "<form method=\"post\" name=\"provision\" action=\"wwwconfirmprovision\">"
- ret = ret + "<div class=\"header\"><hr></div>"
- ret = ret + "<div class=\"wrapper\">"
- ret = ret + "<table>"
- node_index = 0
- for node_ip in node_ips :
- ret = ret + "<tr class=\"d%d\">" % (node_index % 2)
- ret = ret + "<td>%s</td>" % (node_ip)
- for volumes_on_node in volumes_on_nodes[node_index] :
- tokens = volumes_on_node.rpartition(" ")
- if brick_used(node_ip + ":" + tokens[2], bricks_by_vol) == True :
- ret = ret + "<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" disabled />%s</td>" % (node_ip, tokens[2], tokens[2])
- else :
- ret = ret + "<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" />%s</td>" % (node_ip, tokens[2], tokens[2])
- ret = ret + "</tr>"
- node_index = node_index + 1
- ret = ret + "</table>"
- ret = ret + "</div>"
- ret = ret + "<div class=\"footer\"><hr></div>"
- ret = ret + "Volume Type: <input type=\"radio\" name=\"TYPE\" value=\"plain\" checked />Plain"
- ret = ret + "<input type=\"radio\" name=\"TYPE\" value=\"replica\" />Replicated"
- ret = ret + "<input type=\"radio\" name=\"TYPE\" value=\"stripe\" />Striped"
- ret = ret + "<br>Replica or Stripe count: <input type=\"text\" name=\"COUNT\" size=\"2\" />"
- ret = ret + "<br>Volume ID: <input type=\"text\" name=\"VOLUMEID\" />"
- ret = ret + "<input type=\"submit\" name=\"PROVISION\" value=\"Provision\" />"
- ret = ret + "</form>"
- ret = ret + "</body></html>"
- return ret
diff --git a/scripts/wwwroot.py b/scripts/wwwroot.py
deleted file mode 100644
index 6e9a910..0000000
--- a/scripts/wwwroot.py
+++ /dev/null
@@ -1,10 +0,0 @@
-
-def www_root() :
- ret = "Content-type: text/html\n"
- ret = ret + "<html><head>"
- ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=/cfg\" />"
- ret = ret + "</head><body>"
- ret = ret + "<p>Please follow <a href=\"/cfg\">link</a>!</p>"
- ret = ret + "</body></html>"
- return ret
commit 0d1a9f4cc5a7aa2074cb34efcdea8bf0c1517f09
Author: Kaleb S. KEITHLEY <kkeithle(a)cloudfs-node01.kkeithle.redhat.com>
Date: Tue May 3 12:51:46 2011 -0400
checkpoint wip
flow from page to page in www gui works
things remaining:
+ use curl loopback to start volumes, i.e. including creating the
cloudified config files (versus using gluster)
+ switch os.popen and os.system to use subprocess.Popen
but that's enough for today — I need to work on other things.
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 82324db..f68d76e 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
-from bottle import route, post, run
+from bottle import route, post, run, request
import paths
import volstart
@@ -60,18 +60,16 @@ def www_cfgmain():
@route("/wwwprovision")
def www_provision():
- print "www provision"
return wwwprovision.www_provision()
+@post("/wwwconfirmprovision")
+def www_confirmprovision():
+ return wwwconfirmprovision.www_confirmprovision(request.body)
+
@post("/wwwdoprovision")
def www_doprovision():
print "www doprovision"
- return wwwdoprovision.www_doprovision()
-
-@post("/wwwconfirmprovision")
-def www_confirmprovision():
- print "www confirmprovision"
- return wwwconfirmprovision.www_confirmprovision()
+ return wwwdoprovision.www_doprovision(request.body)
@route("/wwwinitcluster")
def www_initcluster():
diff --git a/scripts/wwwcfgmain.py b/scripts/wwwcfgmain.py
index 23f019e..b577a4a 100644
--- a/scripts/wwwcfgmain.py
+++ b/scripts/wwwcfgmain.py
@@ -1,97 +1,19 @@
-import fileinput
+import wwwcss
-def www_cfgroot() :
- fileinput.close()
+def www_cfgmain() :
- print "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
- print "<html><head>"
- print "<title>_Red Hat CloudFS Configuration_</title>"
- print "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
- print "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
- print "<style type=\"text/css\">"
- print "/*<![CDATA[*/"
- print "body {"
- print "background-color: #fff;"
- print "color: #000;"
- print "font-size: 0.9em;"
- print "font-family: sans-serif,helvetica;"
- print "margin: 0;"
- print "padding: 0;"
- print "}"
- print ":link {"
- print "color: #c00;"
- print "}"
- print ":visited {"
- print "color: #c00;"
- print "}"
- print "a:hover {"
- print "color: #f50;"
- print "}"
- print "h1 {"
- print "text-align: center;"
- print "margin: 0;"
- print "padding: 0.6em 2em 0.4em;"
- print "background-color: #900;"
- print "color: #fff;"
- print "font-weight: normal;"
- print "font-size: 1.75em;"
- print "border-bottom: 2px solid #000;"
- print "}"
- print "h1 strong {"
- print "font-weight: bold;"
- print "}"
- print "h2 {"
- print "font-size: 1.1em;"
- print "font-weight: bold;"
- print "}"
- print "hr {"
- print "display: none;"
- print "}"
- print ".content {"
- print "padding: 1em 5em;"
- print "}"
- print ".content-columns {"
- print "/* Setting relative positioning allows for "
- print "absolute positioning for sub-classes */"
- print "position: relative;"
- print "padding-top: 1em;"
- print "}"
- print ".content-column-left {"
- print "/* Value for IE/Win; will be overwritten for other browsers */"
- print "width: 47%;"
- print "padding-right: 3%;"
- print "float: left;"
- print "padding-bottom: 2em;"
- print "}"
- print ".content-column-left hr {"
- print "display: none;"
- print "}"
- print ".content-column-right {"
- print "/* Values for IE/Win; will be overwritten for other browsers */"
- print "width: 47%;"
- print "padding-left: 3%;"
- print "float: left;"
- print "padding-bottom: 2em;"
- print "}"
- print ".content-columns>.content-column-left, .content-columns>.content-column-right {"
- print "/* Non-IE/Win */"
- print "}"
- print "img {"
- print "border: 2px solid #fff;"
- print "padding: 2px;"
- print "margin: 2px;"
- print "}"
- print "a:hover img {"
- print "border: 2px solid #f50;"
- print "}"
- print "/*]]>*/"
- print "</style>"
- print "<meta http-equiv=\"Refresh\" content=\"0; url=https:/cfg/main\" />"
- print "</head><body>"
- print "<h1>Red Hat CloudFS Configuration Main<h1/>"
- print "<p><a href=\"http:8080/wwwinitcluster\">Initialize Cluster</a></p>"
- print "<p><a href=\"http:8080/wwwlisttenants\">Tenant Management</a></p>"
- print "<p><a href=\"http:8080/wwwprovision\">Provision Storage</a></p>"
- print "</body></html>"
+ ret = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
+ ret = ret + "<html><head>"
+ ret = ret + "<title>_Red Hat CloudFS Configuration_</title>"
+ ret = ret + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
+ ret = ret + "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
+ ret = ret + wwwcss.css
+ ret = ret + "</head><body>"
+ ret = ret + "<h1>Red Hat CloudFS Configuration Main<h1/>"
+ ret = ret + "<p><a href=\"wwwinitcluster\">Initialize Cluster</a></p>"
+ ret = ret + "<p><a href=\"wwwlisttenants\">Tenant Management</a></p>"
+ ret = ret + "<p><a href=\"wwwprovision\">Provision Storage</a></p>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwcfgroot.py b/scripts/wwwcfgroot.py
index 2f130cc..b9faede 100644
--- a/scripts/wwwcfgroot.py
+++ b/scripts/wwwcfgroot.py
@@ -1,95 +1,17 @@
-import fileinput
+import wwwcss
def www_cfgroot() :
- fileinput.close()
- print "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
- print "<html><head>"
- print "<title>_Red Hat CloudFS Configuration_</title>"
- print "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
- print "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
- print "<style type=\"text/css\">"
- print "/*<![CDATA[*/"
- print "body {"
- print "background-color: #fff;"
- print "color: #000;"
- print "font-size: 0.9em;"
- print "font-family: sans-serif,helvetica;"
- print "margin: 0;"
- print "padding: 0;"
- print "}"
- print ":link {"
- print "color: #c00;"
- print "}"
- print ":visited {"
- print "color: #c00;"
- print "}"
- print "a:hover {"
- print "color: #f50;"
- print "}"
- print "h1 {"
- print "text-align: center;"
- print "margin: 0;"
- print "padding: 0.6em 2em 0.4em;"
- print "background-color: #900;"
- print "color: #fff;"
- print "font-weight: normal;"
- print "font-size: 1.75em;"
- print "border-bottom: 2px solid #000;"
- print "}"
- print "h1 strong {"
- print "font-weight: bold;"
- print "}"
- print "h2 {"
- print "font-size: 1.1em;"
- print "font-weight: bold;"
- print "}"
- print "hr {"
- print "display: none;"
- print "}"
- print ".content {"
- print "padding: 1em 5em;"
- print "}"
- print ".content-columns {"
- print "/* Setting relative positioning allows for "
- print "absolute positioning for sub-classes */"
- print "position: relative;"
- print "padding-top: 1em;"
- print "}"
- print ".content-column-left {"
- print "/* Value for IE/Win; will be overwritten for other browsers */"
- print "width: 47%;"
- print "padding-right: 3%;"
- print "float: left;"
- print "padding-bottom: 2em;"
- print "}"
- print ".content-column-left hr {"
- print "display: none;"
- print "}"
- print ".content-column-right {"
- print "/* Values for IE/Win; will be overwritten for other browsers */"
- print "width: 47%;"
- print "padding-left: 3%;"
- print "float: left;"
- print "padding-bottom: 2em;"
- print "}"
- print ".content-columns>.content-column-left, .content-columns>.content-column-right {"
- print "/* Non-IE/Win */"
- print "}"
- print "img {"
- print "border: 2px solid #fff;"
- print "padding: 2px;"
- print "margin: 2px;"
- print "}"
- print "a:hover img {"
- print "border: 2px solid #f50;"
- print "}"
- print "/*]]>*/"
- print "</style>"
- print "<meta http-equiv=\"Refresh\" content=\"0; url=http:8080/cfgmain\" />"
- print "</head><body>"
- print "<h1>Red Hat CloudFS Configuration<h1/>"
+ ret = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
+ ret = ret + "<html><head>"
+ ret = ret + "<title>_Red Hat CloudFS Configuration_</title>"
+ ret = ret + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
+ ret = ret + "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
+ ret = ret + wwwcss.css
+ ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=/cfgmain\" />"
+ ret = ret + "</head><body>"
+ ret = ret + "<h1>Red Hat CloudFS Configuration<h1/>"
# authentication disabled for now
# <h2>Sign On<h2/>
# <form method="post" action="/cgi-bin/authenticate">
@@ -105,6 +27,8 @@ def www_cfgroot() :
# <input type="submit" name="signon" value="Sign On">
# </p>
# </form>
- print "<p>Please follow <a href=\"http:8080/cfgmain\">link</a>!</p>"
- print "</body></html>"
+ ret = ret + "<p>Please follow <a href=\"/cfgmain\">link</a>!</p>"
+ ret = ret + "</body></html>"
+
+ return ret
diff --git a/scripts/wwwconfirmprovision.py b/scripts/wwwconfirmprovision.py
index 801a5c3..8b3ccba 100644
--- a/scripts/wwwconfirmprovision.py
+++ b/scripts/wwwconfirmprovision.py
@@ -3,13 +3,11 @@ import fileinput
import re
import os
-def www_confirmprovision():
- print "Content-type: text/html"
- print
+def www_confirmprovision(body):
- f = fileinput.input()
- line = f.readline()
- fileinput.close()
+ # f = fileinput.input()
+ line = body.readline()
+ # fileinput.close()
tokens = re.split('&', line)
@@ -39,11 +37,12 @@ def www_confirmprovision():
volumes.append(scratch[1])
continue
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- print "<title>Confirm Provision Volume</title>"
- print "</head><body>"
- print "<h2>Confirm Provision Volume</h2>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ ret = ret + "<title>Confirm Provision Volume</title>"
+ ret = ret + "</head><body>"
+ ret = ret + "<h2>Confirm Provision Volume</h2>"
cmd = "/usr/sbin/gluster volume create " + volume_id
if volume_type != "plain" :
cmd = cmd + " " + volume_type + " " + replica_or_stripe_count
@@ -51,9 +50,10 @@ def www_confirmprovision():
for volume in volumes :
cmd = cmd + " " + volume.replace("%3A", ":").replace("%2F", "/")
- print "<form method=\"post\" name=\"provision\" action=\"wwwdoprovision\">"
- print ("<input type=\"hidden\" name=\"COMMAND\" value=\"%s\" />") % (cmd)
- print ("%s <br><input type=\"submit\" name=\"PROVISION\" value=\"Confirm\" />") % (cmd)
- print "</form>"
- print "</body></html>"
+ ret = ret + "<form method=\"post\" name=\"provision\" action=\"wwwdoprovision\">"
+ ret = ret + "<input type=\"hidden\" name=\"COMMAND\" value=\"" + cmd + "\" />"
+ ret = ret + cmd + " <br><input type=\"submit\" name=\"PROVISION\" value=\"Confirm\" />"
+ ret = ret + "</form>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwcss.py b/scripts/wwwcss.py
new file mode 100644
index 0000000..af378a8
--- /dev/null
+++ b/scripts/wwwcss.py
@@ -0,0 +1,79 @@
+
+css = """<style type=\"text/css\">
+/*<![CDATA[*/
+body {
+background-color: #fff;
+color: #000;
+font-size: 0.9em;
+font-family: sans-serif,helvetica;
+margin: 0;
+padding: 0;
+}
+:link {
+color: #fff;" + +
+}
+:visited {
+color: #fff;
+}
+a:hover {
+color: #f50;
+}
+h1 {
+text-align: center;
+margin: 0;
+padding: 0.6em 2em 0.4em;
+background-color: #900;
+color: #fff;
+font-weight: normal;
+font-size: 1.75em;
+border-bottom: 2px solid #000;
+}
+h1 strong {
+font-weight: bold;
+}
+h2 {
+font-size: 1.1em;
+font-weight: bold;
+}
+hr {
+display: none;
+}
+.content {
+padding: 1em 5em;
+}
+.content-columns {
+/* Setting relative positioning allows for
+absolute positioning for sub-classes */
+position: relative;
+padding-top: 1em;
+}
+.content-column-left {
+/* Value for IE/Win; will be overwritten for other browsers */
+width: 47%;
+padding-right: 3%;
+float: left;
+padding-bottom: 2em;
+}
+.content-column-left hr {
+display: none;
+}
+.content-column-right {
+/* Values for IE/Win; will be overwritten for other browsers */
+width: 47%;
+padding-left: 3%;
+float: left;
+padding-bottom: 2em;
+}
+.content-columns>.content-column-left, .content-columns>.content-column-right {
+/* Non-IE/Win */
+}
+img {
+border: 2px solid #fff;
+padding: 2px;
+margin: 2px;
+}
+a:hover img {
+border: 2px solid #f50;
+}
+/*]]>*/
+</style>"""
diff --git a/scripts/wwwdoinitcluster.py b/scripts/wwwdoinitcluster.py
index 615911b..29526a5 100644
--- a/scripts/wwwdoinitcluster.py
+++ b/scripts/wwwdoinitcluster.py
@@ -5,8 +5,6 @@ import os
import socket
def www_doinitcluster():
- print "Content-type: text/html"
- print
f = fileinput.input()
line = f.readline()
@@ -51,9 +49,11 @@ def www_doinitcluster():
clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
sts = os.system(clean_cmd + " > /dev/null 2>&1");
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- print "<meta http-equiv=\"Refresh\" content=\"0; url=http:8080/wwwinitcluster\" />"
- print "</head><body>"
- print "<p>Please follow <a href=\"http:8080/wwwinitcluster\">link</a>!</p>"
- print "</body></html>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=wwwinitcluster\" />"
+ ret = ret + "</head><body>"
+ ret = ret + "<p>Please follow <a href=\"wwwinitcluster\">link</a>!</p>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwdoprovision.py b/scripts/wwwdoprovision.py
index d76e451..6535c50 100644
--- a/scripts/wwwdoprovision.py
+++ b/scripts/wwwdoprovision.py
@@ -3,13 +3,11 @@ import fileinput
import re
import os
-def www_doprovision():
- print "Content-type: text/html"
- print
+def www_doprovision(body):
- f = fileinput.input()
- line = f.readline()
- fileinput.close()
+ # f = fileinput.input()
+ line = body.readline()
+ # fileinput.close()
tokens = re.split('&', line)
@@ -27,11 +25,12 @@ def www_doprovision():
command = scratch[1]
continue
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
- print "<title>Provisioned Volume</title>"
- print "</head><body>"
- print "<h2>Provisioned Volume</h2>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ ret = ret + "<title>Provisioned Volume</title>"
+ ret = ret + "</head><body>"
+ ret = ret + "<h2>Provisioned Volume</h2>"
## make the underlying volume with gluster
decode_cmd = command.replace("%3A", ":").replace("%2F", "/").replace("+", " ")
clean_cmd = decode_cmd.replace("|", "").replace("&", "").replace(">", "")
@@ -65,7 +64,7 @@ def www_doprovision():
clean_cmd = mkdir_cmd.replace("|", "").replace("&", "").replace(">", "")
mkdir_sts = os.system(clean_cmd + " > /dev/null 2>&1")
if mkdir_sts != 0 :
- print "<br> fail: " + clean_cmd
+ ret = ret + "<br> fail: " + clean_cmd
# copy the modified vol files to the peers
unique_nodes = set(nodes)
this_ip = ""
@@ -89,26 +88,27 @@ def www_doprovision():
scp_cmd = "/usr/bin/sudo /usr/bin/ssh " + node + " 'cd /var/lib/glusterd/vols/" + vol_name + " && /usr/bin/scp -q -r " + this_ip + ":/var/lib/glusterd/vols/" + vol_name + "/* .'"
scp_sts = os.system(scp_cmd)
if scp_sts != 0 :
- print "<br>fail: " + scp_cmd
+ ret = ret + "<br>fail: " + scp_cmd
# now start the volume
start_cmd = "/usr/bin/sudo /usr/sbin/gluster volume start " + vol_name
start_sts = os.system(start_cmd + " > /dev/null 2>&1")
if start_sts != 0 :
- print "<br> fail: " + start_cmd
+ ret = ret + "<br> fail: " + start_cmd
else :
- print("<br> %s started") % (vol_name)
+ ret = ret + "<br> started " + vol_name
# list FUSE volume specs for download
- mkdir_sts = os.system("/usr/bin/sudo /bin/mkdir -p /var/www/html/cfg/scratch")
- cp_sts = os.system("/usr/bin/sudo /bin/cp /var/lib/glusterd/vols/" + vol_name + "/" + vol_name + "-fuse.vol.* /var/www/html/cfg/scratch/")
- print "<hr><br> client/tenant volume files (right-click to save-as):"
+ mkdir_sts = os.system("/usr/bin/sudo /bin/mkdir -p scratch")
+ cp_sts = os.system("/usr/bin/sudo /bin/cp /var/lib/cloudfs/vols/" + vol_name + "/" + vol_name + "-fuse.vol.* /var/www/html/cfg/scratch/")
+ ret = ret + "<hr><br> client/tenant volume files (right-click to save-as):"
for tenant in tenants :
if tenant != "junk" :
- print("<br><a href=\"/cfg/scratch/%s-fuse.vol.%s\">%s</a>") % (vol_name, tenant, tenant)
- print "<hr>"
- print "<form method=\"post\" action=\"cfgmain\">"
- print "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
+ ret = ret + "<br><a href=\"/cfg/scratch/%s-fuse.vol.%s\">%s</a>" % (vol_name, tenant, tenant)
+ ret = ret + "<hr>"
+ ret = ret + "<form method=\"post\" action=\"cfgmain\">"
+ ret = ret + "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
- print "</body></html>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwinitcluster.py b/scripts/wwwinitcluster.py
index 44f9f5f..2858d6a 100644
--- a/scripts/wwwinitcluster.py
+++ b/scripts/wwwinitcluster.py
@@ -1,6 +1,5 @@
import datetime
-import fileinput
import os
import re
import string
@@ -8,7 +7,6 @@ import socket
import sys
def www_initcluster():
- fileinput.close()
hostname = socket.gethostname()
@@ -21,23 +19,22 @@ def www_initcluster():
node_ips.append(socket.gethostbyname(string.strip(tokens[1])))
peer_ips.close()
- print "Content-type: text/html"
- print
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\"><title>Initialize CloudFS Cluster</title>"
- print "</head><body>"
- print "<h2>Initialize CloudFS Cluster</h2>"
- print "<hr>"
- print "<h2>Cluster Nodes</h2>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\"><title>Initialize CloudFS Cluster</title>"
+ ret = ret + "</head><body>"
+ ret = ret + "<h2>Initialize CloudFS Cluster</h2>"
+ ret = ret + "<hr>"
+ ret = ret + "<h2>Cluster Nodes</h2>"
for node_ip in node_ips :
- print node_ip + "<br>"
- print "<hr><br>"
- print "Enter the hostname of a node to add to the cluster"
- print "<form method=\"post\" action=\"wwwdoinitcluster\">"
- print "Node Name: <input type=\"text\" name=\"NODENAME\">"
- print "<input type=\"submit\" name=\"ADDNODE\" value=\"Add Node\"></form>"
- print "<hr>"
- print "<form method=\"post\" action=\"/cfgmain.\">"
- print "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
-
- print "</body></html>"
+ ret = ret + node_ip + "<br>"
+ ret = ret + "<hr><br>"
+ ret = ret + "Enter the hostname of a node to add to the cluster"
+ ret = ret + "<form method=\"post\" action=\"wwwdoinitcluster\">"
+ ret = ret + "Node Name: <input type=\"text\" name=\"NODENAME\">"
+ ret = ret + "<input type=\"submit\" name=\"ADDNODE\" value=\"Add Node\"></form>"
+ ret = ret + "<hr>"
+ ret = ret + "<form method=\"post\" action=\"/cfgmain\">"
+ ret = ret + "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwprovision.py b/scripts/wwwprovision.py
index c4e2910..72dada3 100644
--- a/scripts/wwwprovision.py
+++ b/scripts/wwwprovision.py
@@ -1,14 +1,13 @@
import datetime
-import fileinput
import os
import re
import string
import socket
import sys
-def brick_used(needle) :
- for vol in bricks_by_vol :
+def brick_used(needle, haystack) :
+ for vol in haystack :
for brick in vol :
if brick == needle :
return True
@@ -16,14 +15,8 @@ def brick_used(needle) :
def www_provision() :
- fileinput.close()
-
hostname = socket.gethostname()
- print "Content-type: text/html"
- print
- print "<html><head><meta http-equiv=\"pragma\" content=\"no-cache\"><title>Provision CloudFS Volume</title>"
-
existing_vols = []
bricks_by_vol = []
bbv_index = -1
@@ -63,7 +56,7 @@ def www_provision() :
volumes_on_nodes = []
for node_ip in node_ips :
- # print("<p>%s</p>") % (node_ip)
+ # ret = ret +("<p>%s</p>") % (node_ip)
cmd = "/usr/bin/sudo /usr/bin/ssh " + node_ip + " df -H"
volumes_on_node = os.popen(cmd)
scratch = []
@@ -74,53 +67,53 @@ def www_provision() :
volumes_on_node.close()
volumes_on_nodes.append(scratch)
- print "<style type=\"text/css\">"
- print ".colmask{ position: relative; overflow: hidden; margin: 0px auto; width: 100%; background-color: #b4d2f7 }"
- print ".header{ float: left; width: 100%; background-color: #f4f4f4}"
- print ".wrapper{ position: relative; float: left; left: 0.00%; width: 100.00%; background-color: #cccccc}"
- #for node_ip in node_ips :
- # print(".col%s{ position: relative; float: left; width: %d%%; left: 1%%; background-color: #b4d2f7}") % (node_ip.replace(".", "_"), 100/len(node_ips)-1)
- print "tr.d0 td { background-color: #CC9999; color: black; }"
- print "tr.d1 td { background-color: #9999CC; color: black; }"
- print ".footer{ float: left; width: 100%; background-color: #f4f4f4}"
- print "body { border-width: 0px; padding: 0px; margin: 0px; font-size: 90%; background-color: #e7e7de}"
- print "</style></head><body>"
- print "<h1>Provision CloudFS Volume</h1>"
- print "<h2>Existing Volumes:</h2>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head><meta http-equiv=\"pragma\" content=\"no-cache\"><title>Provision CloudFS Volume</title>"
+ ret = ret + "<style type=\"text/css\">"
+ ret = ret + ".colmask{ position: relative; overflow: hidden; margin: 0px auto; width: 100%; background-color: #b4d2f7 }"
+ ret = ret + ".header{ float: left; width: 100%; background-color: #f4f4f4}"
+ ret = ret + ".wrapper{ position: relative; float: left; left: 0.00%; width: 100.00%; background-color: #cccccc}"
+ ret = ret + "tr.d0 td { background-color: #CC9999; color: black; }"
+ ret = ret + "tr.d1 td { background-color: #9999CC; color: black; }"
+ ret = ret + ".footer{ float: left; width: 100%; background-color: #f4f4f4}"
+ ret = ret + "body { border-width: 0px; padding: 0px; margin: 0px; font-size: 90%; background-color: #e7e7de}"
+ ret = ret + "</style></head><body>"
+ ret = ret + "<h1>Provision CloudFS Volume</h1>"
+ ret = ret + "<h2>Existing Volumes:</h2>"
bbv_index = 0
for existing_vol in existing_vols :
- print "<p>" + existing_vol + ":"
+ ret = ret + "<p>" + existing_vol + ":"
for brick in bricks_by_vol[bbv_index] :
- print "<br> " + brick
+ ret = ret + "<br> " + brick
bbv_index = bbv_index + 1
- print "</p>"
- print "<hr>"
- print "<h2>Provision a Volume From Available Bricks:</h2>"
- print "<form method=\"post\" name=\"provision\" action=\"wwwconfirmprovision\">"
- print "<div class=\"header\"><hr></div>"
- print "<div class=\"wrapper\">"
- print "<table>"
+ ret = ret + "</p>"
+ ret = ret + "<hr>"
+ ret = ret + "<h2>Provision a Volume From Available Bricks:</h2>"
+ ret = ret + "<form method=\"post\" name=\"provision\" action=\"wwwconfirmprovision\">"
+ ret = ret + "<div class=\"header\"><hr></div>"
+ ret = ret + "<div class=\"wrapper\">"
+ ret = ret + "<table>"
node_index = 0
for node_ip in node_ips :
- print("<tr class=\"d%d\">") % (node_index % 2)
- print("<td>%s</td>") % (node_ip)
+ ret = ret + "<tr class=\"d%d\">" % (node_index % 2)
+ ret = ret + "<td>%s</td>" % (node_ip)
for volumes_on_node in volumes_on_nodes[node_index] :
tokens = volumes_on_node.rpartition(" ")
- if brick_used(node_ip + ":" + tokens[2]) == True :
- print("<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" disabled />%s</td>") % (node_ip, tokens[2], tokens[2])
+ if brick_used(node_ip + ":" + tokens[2], bricks_by_vol) == True :
+ ret = ret + "<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" disabled />%s</td>" % (node_ip, tokens[2], tokens[2])
else :
- print("<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" />%s</td>") % (node_ip, tokens[2], tokens[2])
- print "</tr>"
+ ret = ret + "<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" />%s</td>" % (node_ip, tokens[2], tokens[2])
+ ret = ret + "</tr>"
node_index = node_index + 1
- print "</table>"
- print "</div>"
- print "<div class=\"footer\"><hr></div>"
- print "Volume Type: <input type=\"radio\" name=\"TYPE\" value=\"plain\" checked />Plain"
- print "<input type=\"radio\" name=\"TYPE\" value=\"replica\" />Replicated"
- print "<input type=\"radio\" name=\"TYPE\" value=\"stripe\" />Striped"
- print "<br>Replica or Stripe count: <input type=\"text\" name=\"COUNT\" size=\"2\" />"
- print "<br>Volume ID: <input type=\"text\" name=\"VOLUMEID\" />"
- print "<input type=\"submit\" name=\"PROVISION\" value=\"Provision\" />"
- print "</form>"
-
- print "</body></html>"
+ ret = ret + "</table>"
+ ret = ret + "</div>"
+ ret = ret + "<div class=\"footer\"><hr></div>"
+ ret = ret + "Volume Type: <input type=\"radio\" name=\"TYPE\" value=\"plain\" checked />Plain"
+ ret = ret + "<input type=\"radio\" name=\"TYPE\" value=\"replica\" />Replicated"
+ ret = ret + "<input type=\"radio\" name=\"TYPE\" value=\"stripe\" />Striped"
+ ret = ret + "<br>Replica or Stripe count: <input type=\"text\" name=\"COUNT\" size=\"2\" />"
+ ret = ret + "<br>Volume ID: <input type=\"text\" name=\"VOLUMEID\" />"
+ ret = ret + "<input type=\"submit\" name=\"PROVISION\" value=\"Provision\" />"
+ ret = ret + "</form>"
+ ret = ret + "</body></html>"
+ return ret
diff --git a/scripts/wwwroot.py b/scripts/wwwroot.py
index 5990022..6e9a910 100644
--- a/scripts/wwwroot.py
+++ b/scripts/wwwroot.py
@@ -1,15 +1,10 @@
-import fileinput
-
def www_root() :
- fileinput.close()
-
- print "Content-type: text/html"
- print
- print "<html><head>"
- print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
-# print "<meta http-equiv=\"Refresh\" content=\"0; url=https:/cfg/\" />"
- print "<meta http-equiv=\"Refresh\" content=\"0; url=http:8080/cfg/\" />"
- print "</head><body>"
- print "<p>Please follow <a href=\"https:/cfg/\">link</a>!</p>"
- print "</body></html>"
+ ret = "Content-type: text/html\n"
+ ret = ret + "<html><head>"
+ ret = ret + "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ ret = ret + "<meta http-equiv=\"Refresh\" content=\"0; url=/cfg\" />"
+ ret = ret + "</head><body>"
+ ret = ret + "<p>Please follow <a href=\"/cfg\">link</a>!</p>"
+ ret = ret + "</body></html>"
+ return ret
commit 240069824f30a4f1fdde665a0c5bec5501302d96
Author: Kaleb S. KEITHLEY <kkeithle(a)cloudfs-node01.kkeithle.redhat.com>
Date: Mon May 2 14:30:28 2011 -0400
check-in wip, moving prototype web UI into cloudfsd.
much work to do, e.g. os.system() -> subprocess.Popen()
not sure if links in redirects and forms work with the :8080 port, etc.
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 125ff29..82324db 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,16 +1,19 @@
#!/usr/bin/python
-import glob
-import json
-import os
-import re
-
-from bottle import route, run
+from bottle import route, post, run
+import paths
import volstart
import volstop
import volmap
-import paths
+import wwwroot
+import wwwcfgroot
+import wwwcfgmain
+import wwwinitcluster
+import wwwdoinitcluster
+import wwwprovision
+import wwwdoprovision
+import wwwconfirmprovision
CLOUDFSD_PORT = 8080
@@ -43,31 +46,48 @@ def delete_user(user_name):
def list_users():
print "list users"
+@route("/")
+def www_root():
+ return wwwroot.www_root()
+
+@route("/cfg")
+def www_cfgroot():
+ return wwwcfgroot.www_cfgroot()
+
+@route("/cfgmain")
+def www_cfgmain():
+ return wwwcfgmain.www_cfgmain()
+
@route("/wwwprovision")
def www_provision():
print "www provision"
+ return wwwprovision.www_provision()
-@route("/wwwdoprovision")
+@post("/wwwdoprovision")
def www_doprovision():
print "www doprovision"
+ return wwwdoprovision.www_doprovision()
-@route("/wwwconfirmprovision")
+@post("/wwwconfirmprovision")
def www_confirmprovision():
print "www confirmprovision"
+ return wwwconfirmprovision.www_confirmprovision()
@route("/wwwinitcluster")
def www_initcluster():
print "www initcluster"
+ return wwwinitcluster.www_initcluster()
-@route("/wwwdoinitcluster")
+@post("/wwwdoinitcluster")
def www_doinitcluster():
print "www doinitcluster"
+ return wwwdoinitcluster.www_doinitcluster()
@route("/wwwaddtenant")
def www_addtenant():
print "www addtenant"
-@route("/wwwdoaddtenant")
+@post("/wwwdoaddtenant")
def www_doaddtenant():
print "www doaddtenant"
diff --git a/scripts/wwwcfgmain.py b/scripts/wwwcfgmain.py
new file mode 100644
index 0000000..23f019e
--- /dev/null
+++ b/scripts/wwwcfgmain.py
@@ -0,0 +1,97 @@
+
+import fileinput
+
+def www_cfgroot() :
+ fileinput.close()
+
+ print "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
+ print "<html><head>"
+ print "<title>_Red Hat CloudFS Configuration_</title>"
+ print "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
+ print "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
+ print "<style type=\"text/css\">"
+ print "/*<![CDATA[*/"
+ print "body {"
+ print "background-color: #fff;"
+ print "color: #000;"
+ print "font-size: 0.9em;"
+ print "font-family: sans-serif,helvetica;"
+ print "margin: 0;"
+ print "padding: 0;"
+ print "}"
+ print ":link {"
+ print "color: #c00;"
+ print "}"
+ print ":visited {"
+ print "color: #c00;"
+ print "}"
+ print "a:hover {"
+ print "color: #f50;"
+ print "}"
+ print "h1 {"
+ print "text-align: center;"
+ print "margin: 0;"
+ print "padding: 0.6em 2em 0.4em;"
+ print "background-color: #900;"
+ print "color: #fff;"
+ print "font-weight: normal;"
+ print "font-size: 1.75em;"
+ print "border-bottom: 2px solid #000;"
+ print "}"
+ print "h1 strong {"
+ print "font-weight: bold;"
+ print "}"
+ print "h2 {"
+ print "font-size: 1.1em;"
+ print "font-weight: bold;"
+ print "}"
+ print "hr {"
+ print "display: none;"
+ print "}"
+ print ".content {"
+ print "padding: 1em 5em;"
+ print "}"
+ print ".content-columns {"
+ print "/* Setting relative positioning allows for "
+ print "absolute positioning for sub-classes */"
+ print "position: relative;"
+ print "padding-top: 1em;"
+ print "}"
+ print ".content-column-left {"
+ print "/* Value for IE/Win; will be overwritten for other browsers */"
+ print "width: 47%;"
+ print "padding-right: 3%;"
+ print "float: left;"
+ print "padding-bottom: 2em;"
+ print "}"
+ print ".content-column-left hr {"
+ print "display: none;"
+ print "}"
+ print ".content-column-right {"
+ print "/* Values for IE/Win; will be overwritten for other browsers */"
+ print "width: 47%;"
+ print "padding-left: 3%;"
+ print "float: left;"
+ print "padding-bottom: 2em;"
+ print "}"
+ print ".content-columns>.content-column-left, .content-columns>.content-column-right {"
+ print "/* Non-IE/Win */"
+ print "}"
+ print "img {"
+ print "border: 2px solid #fff;"
+ print "padding: 2px;"
+ print "margin: 2px;"
+ print "}"
+ print "a:hover img {"
+ print "border: 2px solid #f50;"
+ print "}"
+ print "/*]]>*/"
+ print "</style>"
+ print "<meta http-equiv=\"Refresh\" content=\"0; url=https:/cfg/main\" />"
+ print "</head><body>"
+ print "<h1>Red Hat CloudFS Configuration Main<h1/>"
+ print "<p><a href=\"http:8080/wwwinitcluster\">Initialize Cluster</a></p>"
+ print "<p><a href=\"http:8080/wwwlisttenants\">Tenant Management</a></p>"
+ print "<p><a href=\"http:8080/wwwprovision\">Provision Storage</a></p>"
+ print "</body></html>"
+
diff --git a/scripts/wwwcfgroot.py b/scripts/wwwcfgroot.py
new file mode 100644
index 0000000..2f130cc
--- /dev/null
+++ b/scripts/wwwcfgroot.py
@@ -0,0 +1,110 @@
+
+import fileinput
+
+def www_cfgroot() :
+ fileinput.close()
+
+ print "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">"
+ print "<html><head>"
+ print "<title>_Red Hat CloudFS Configuration_</title>"
+ print "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
+ print "<meta http-equiv=\"Pragma\" content=\"no-cache\" />"
+ print "<style type=\"text/css\">"
+ print "/*<![CDATA[*/"
+ print "body {"
+ print "background-color: #fff;"
+ print "color: #000;"
+ print "font-size: 0.9em;"
+ print "font-family: sans-serif,helvetica;"
+ print "margin: 0;"
+ print "padding: 0;"
+ print "}"
+ print ":link {"
+ print "color: #c00;"
+ print "}"
+ print ":visited {"
+ print "color: #c00;"
+ print "}"
+ print "a:hover {"
+ print "color: #f50;"
+ print "}"
+ print "h1 {"
+ print "text-align: center;"
+ print "margin: 0;"
+ print "padding: 0.6em 2em 0.4em;"
+ print "background-color: #900;"
+ print "color: #fff;"
+ print "font-weight: normal;"
+ print "font-size: 1.75em;"
+ print "border-bottom: 2px solid #000;"
+ print "}"
+ print "h1 strong {"
+ print "font-weight: bold;"
+ print "}"
+ print "h2 {"
+ print "font-size: 1.1em;"
+ print "font-weight: bold;"
+ print "}"
+ print "hr {"
+ print "display: none;"
+ print "}"
+ print ".content {"
+ print "padding: 1em 5em;"
+ print "}"
+ print ".content-columns {"
+ print "/* Setting relative positioning allows for "
+ print "absolute positioning for sub-classes */"
+ print "position: relative;"
+ print "padding-top: 1em;"
+ print "}"
+ print ".content-column-left {"
+ print "/* Value for IE/Win; will be overwritten for other browsers */"
+ print "width: 47%;"
+ print "padding-right: 3%;"
+ print "float: left;"
+ print "padding-bottom: 2em;"
+ print "}"
+ print ".content-column-left hr {"
+ print "display: none;"
+ print "}"
+ print ".content-column-right {"
+ print "/* Values for IE/Win; will be overwritten for other browsers */"
+ print "width: 47%;"
+ print "padding-left: 3%;"
+ print "float: left;"
+ print "padding-bottom: 2em;"
+ print "}"
+ print ".content-columns>.content-column-left, .content-columns>.content-column-right {"
+ print "/* Non-IE/Win */"
+ print "}"
+ print "img {"
+ print "border: 2px solid #fff;"
+ print "padding: 2px;"
+ print "margin: 2px;"
+ print "}"
+ print "a:hover img {"
+ print "border: 2px solid #f50;"
+ print "}"
+ print "/*]]>*/"
+ print "</style>"
+ print "<meta http-equiv=\"Refresh\" content=\"0; url=http:8080/cfgmain\" />"
+ print "</head><body>"
+ print "<h1>Red Hat CloudFS Configuration<h1/>"
+# authentication disabled for now
+# <h2>Sign On<h2/>
+# <form method="post" action="/cgi-bin/authenticate">
+# <p>
+# <strong>Please enter user ID and password:</strong>
+# <br>
+# <strong>User ID</strong>
+# <input type="text" size="20" name="USERNAME">
+# <strong>Password</strong>
+# <input type="password" size="20" name="PASSWORD">
+# </p>
+# <p>
+# <input type="submit" name="signon" value="Sign On">
+# </p>
+# </form>
+ print "<p>Please follow <a href=\"http:8080/cfgmain\">link</a>!</p>"
+ print "</body></html>"
+
diff --git a/scripts/wwwconfirmprovision.py b/scripts/wwwconfirmprovision.py
new file mode 100644
index 0000000..801a5c3
--- /dev/null
+++ b/scripts/wwwconfirmprovision.py
@@ -0,0 +1,59 @@
+
+import fileinput
+import re
+import os
+
+def www_confirmprovision():
+ print "Content-type: text/html"
+ print
+
+ f = fileinput.input()
+ line = f.readline()
+ fileinput.close()
+
+ tokens = re.split('&', line)
+
+ volume_id = ""
+ volume_type = ""
+ replica_or_stripe_count = ""
+ command = ""
+ volumes = []
+
+ # assert PROVISION= will be the last one, thus we may also assert that
+ # all options will be extracted before we try to cons up a cmd
+ for index in range(len(tokens)) :
+ if tokens[index].startswith("TYPE=") :
+ scratch = re.split('=', tokens[index])
+ volume_type = scratch[1]
+ continue
+ if tokens[index].startswith("COUNT=") :
+ scratch = re.split('=', tokens[index])
+ replica_or_stripe_count = scratch[1]
+ continue
+ elif tokens[index].startswith("VOLUMEID=") :
+ scratch = re.split('=', tokens[index])
+ volume_id = scratch[1]
+ continue
+ elif tokens[index].startswith("VOLUME=") :
+ scratch = re.split('=', tokens[index])
+ volumes.append(scratch[1])
+ continue
+
+ print "<html><head>"
+ print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ print "<title>Confirm Provision Volume</title>"
+ print "</head><body>"
+ print "<h2>Confirm Provision Volume</h2>"
+ cmd = "/usr/sbin/gluster volume create " + volume_id
+ if volume_type != "plain" :
+ cmd = cmd + " " + volume_type + " " + replica_or_stripe_count
+ cmd = cmd + " transport tcp"
+ for volume in volumes :
+ cmd = cmd + " " + volume.replace("%3A", ":").replace("%2F", "/")
+
+ print "<form method=\"post\" name=\"provision\" action=\"wwwdoprovision\">"
+ print ("<input type=\"hidden\" name=\"COMMAND\" value=\"%s\" />") % (cmd)
+ print ("%s <br><input type=\"submit\" name=\"PROVISION\" value=\"Confirm\" />") % (cmd)
+ print "</form>"
+ print "</body></html>"
+
diff --git a/scripts/wwwdoinitcluster.py b/scripts/wwwdoinitcluster.py
new file mode 100644
index 0000000..615911b
--- /dev/null
+++ b/scripts/wwwdoinitcluster.py
@@ -0,0 +1,59 @@
+
+import fileinput
+import re
+import os
+import socket
+
+def www_doinitcluster():
+ print "Content-type: text/html"
+ print
+
+ f = fileinput.input()
+ line = f.readline()
+ fileinput.close()
+
+ host_name = socket.gethostname()
+
+ tokens = re.split('&', line)
+ node_name = ""
+
+ # all options will be extracted before we try to cons up a cmd
+ for index in range(len(tokens)) :
+ if tokens[index].startswith("NODENAME=") :
+ scratch = re.split('=', tokens[index])
+ node_name = scratch[1]
+
+ # derive the "real" node addr, e.g. if the user enters, e.g., 192.168.122.55
+ # and that is this IP for 'this' node then when we're done can_node_addr will
+ # either be 192.168.122.55 or 127.0.0.1.
+ # similarly if the user enters, e.g., <principalnode>.foo.bar.baz.com, the
+ # result will be the same
+ host_addr = socket.gethostbyname(host_name)
+ node_addr = socket.gethostbyname(node_name)
+ can_node_name = socket.gethostbyaddr(node_addr)
+ can_node_addr = socket.gethostbyname(can_node_name[0])
+
+
+ # now we can do the right thing if we're on the principal node or not
+ if node_addr == host_addr or "127.0.0.1" == can_node_addr :
+ sts = os.system("/usr/bin/sudo /sbin/chkconfig --add glusterd; /usr/bin/sudo /sbin/chkconfig glusterd on; /usr/bin/sudo /sbin/service glusterd start")
+ else :
+ cmd = "/usr/bin/sudo /usr/bin/ssh " + can_node_addr + " /sbin/chkconfig --add glusterd"
+ clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
+ sts = os.system(clean_cmd + " > /dev/null 2>&1");
+ cmd = "/usr/bin/sudo /usr/bin/ssh " + can_node_addr + " /sbin/chkconfig glusterd on"
+ clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
+ sts = os.system(clean_cmd + " > /dev/null 2>&1");
+ cmd = "/usr/bin/sudo /usr/bin/ssh " + can_node_addr + " /sbin/service glusterd start"
+ clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
+ sts = os.system(clean_cmd + " > /dev/null 2>&1");
+ cmd = "/usr/bin/sudo /usr/sbin/gluster peer probe " + node_name
+ clean_cmd = cmd.replace("|", "").replace("&", "").replace(">", "")
+ sts = os.system(clean_cmd + " > /dev/null 2>&1");
+
+ print "<html><head>"
+ print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ print "<meta http-equiv=\"Refresh\" content=\"0; url=http:8080/wwwinitcluster\" />"
+ print "</head><body>"
+ print "<p>Please follow <a href=\"http:8080/wwwinitcluster\">link</a>!</p>"
+ print "</body></html>"
diff --git a/scripts/wwwdoprovision.py b/scripts/wwwdoprovision.py
new file mode 100644
index 0000000..d76e451
--- /dev/null
+++ b/scripts/wwwdoprovision.py
@@ -0,0 +1,114 @@
+
+import fileinput
+import re
+import os
+
+def www_doprovision():
+ print "Content-type: text/html"
+ print
+
+ f = fileinput.input()
+ line = f.readline()
+ fileinput.close()
+
+ tokens = re.split('&', line)
+
+ volume_id = ""
+ volume_type = ""
+ replica_or_stripe_count = ""
+ command = ""
+ volumes = []
+
+ # assert PROVISION= will be the only one, thus we may also assert that
+ # all options will be extracted before we try to cons up a cmd
+ for index in range(len(tokens)) :
+ if tokens[index].startswith("COMMAND=") :
+ scratch = re.split('=', tokens[index])
+ command = scratch[1]
+ continue
+
+ print "<html><head>"
+ print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+ print "<title>Provisioned Volume</title>"
+ print "</head><body>"
+ print "<h2>Provisioned Volume</h2>"
+ ## make the underlying volume with gluster
+ decode_cmd = command.replace("%3A", ":").replace("%2F", "/").replace("+", " ")
+ clean_cmd = decode_cmd.replace("|", "").replace("&", "").replace(">", "")
+ gluster_sts = os.system("/usr/bin/sudo " + clean_cmd + " > /dev/null 2>&1")
+ ## make the cloudfs vol files with cloudfs
+ cloudfs_sts = -1
+ cmd_tokens = re.split(' ', clean_cmd)
+ if gluster_sts != -1 and cmd_tokens[0] == "/usr/sbin/gluster" and cmd_tokens[1] == "volume" and cmd_tokens[2] == "create" :
+ cloudfs_cmd = "/usr/bin/sudo /usr/bin/cloudfs init " + cmd_tokens[3] + " /var/lib/glusterd/cloudfs.tenants > /dev/null 2>&1"
+ cloudfs_sts = os.system(cloudfs_cmd)
+
+ ## make the dirs on each node/volume
+ if cloudfs_sts != -1 :
+ ## first get all the tenants
+ tenants = []
+ tenants.append("junk")
+ for tenantline in fileinput.input("/var/lib/glusterd/cloudfs.tenants") :
+ scratch = re.split(' ', tenantline)
+ tenants.append(scratch[0])
+ fileinput.close()
+ ## now make the dirs on every volume
+ first_node = 6
+ if cmd_tokens[4] != "transport" :
+ first_node = first_node + 2
+ nodes = []
+ for ii in range(first_node, len(cmd_tokens)) :
+ scratch = re.split(':', cmd_tokens[ii])
+ nodes.append(scratch[0])
+ for tenant in tenants :
+ mkdir_cmd = "/usr/bin/sudo /usr/bin/ssh " + scratch[0] + " /bin/mkdir -p " + scratch[1] + "/" + tenant
+ clean_cmd = mkdir_cmd.replace("|", "").replace("&", "").replace(">", "")
+ mkdir_sts = os.system(clean_cmd + " > /dev/null 2>&1")
+ if mkdir_sts != 0 :
+ print "<br> fail: " + clean_cmd
+ # copy the modified vol files to the peers
+ unique_nodes = set(nodes)
+ this_ip = ""
+ ifconfig_pipe = os.popen("/sbin/ifconfig eth0")
+ for line in ifconfig_pipe :
+ line = line.lstrip()
+ tokens = re.split('[: ]', line)
+ if tokens[0] == "inet" and tokens[1] == "addr" :
+ this_ip = tokens[2]
+ scratch = []
+ scratch.append(this_ip)
+
+ unique_nodes = unique_nodes.difference(scratch)
+ ifconfig_pipe.close()
+ # ssh and scp (i.e. pull). could just scp
+ # (i.e. push) but then we would have to add
+ # scp to sudoers-- (Would like to minimize
+ # the number of things added to sudoers
+ vol_name = cmd_tokens[3].replace("|", "").replace("&", "").replace(">", "")
+ for node in unique_nodes :
+ scp_cmd = "/usr/bin/sudo /usr/bin/ssh " + node + " 'cd /var/lib/glusterd/vols/" + vol_name + " && /usr/bin/scp -q -r " + this_ip + ":/var/lib/glusterd/vols/" + vol_name + "/* .'"
+ scp_sts = os.system(scp_cmd)
+ if scp_sts != 0 :
+ print "<br>fail: " + scp_cmd
+
+ # now start the volume
+ start_cmd = "/usr/bin/sudo /usr/sbin/gluster volume start " + vol_name
+ start_sts = os.system(start_cmd + " > /dev/null 2>&1")
+ if start_sts != 0 :
+ print "<br> fail: " + start_cmd
+ else :
+ print("<br> %s started") % (vol_name)
+
+ # list FUSE volume specs for download
+ mkdir_sts = os.system("/usr/bin/sudo /bin/mkdir -p /var/www/html/cfg/scratch")
+ cp_sts = os.system("/usr/bin/sudo /bin/cp /var/lib/glusterd/vols/" + vol_name + "/" + vol_name + "-fuse.vol.* /var/www/html/cfg/scratch/")
+ print "<hr><br> client/tenant volume files (right-click to save-as):"
+ for tenant in tenants :
+ if tenant != "junk" :
+ print("<br><a href=\"/cfg/scratch/%s-fuse.vol.%s\">%s</a>") % (vol_name, tenant, tenant)
+ print "<hr>"
+ print "<form method=\"post\" action=\"cfgmain\">"
+ print "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
+
+ print "</body></html>"
+
diff --git a/scripts/wwwinitcluster.py b/scripts/wwwinitcluster.py
new file mode 100644
index 0000000..44f9f5f
--- /dev/null
+++ b/scripts/wwwinitcluster.py
@@ -0,0 +1,43 @@
+
+import datetime
+import fileinput
+import os
+import re
+import string
+import socket
+import sys
+
+def www_initcluster():
+ fileinput.close()
+
+ hostname = socket.gethostname()
+
+ node_ips = []
+
+ sts = os.system("/usr/bin/sudo /sbin/chkconfig --add glusterd; /usr/bin/sudo /sbin/chkconfig glusterd on; /usr/bin/sudo /sbin/service glusterd start")
+ peer_ips = os.popen("/usr/bin/sudo /usr/sbin/gluster peer status | /bin/grep Hostname:")
+ for line in peer_ips :
+ tokens = re.split(':', line)
+ node_ips.append(socket.gethostbyname(string.strip(tokens[1])))
+ peer_ips.close()
+
+ print "Content-type: text/html"
+ print
+ print "<html><head>"
+ print "<meta http-equiv=\"pragma\" content=\"no-cache\"><title>Initialize CloudFS Cluster</title>"
+ print "</head><body>"
+ print "<h2>Initialize CloudFS Cluster</h2>"
+ print "<hr>"
+ print "<h2>Cluster Nodes</h2>"
+ for node_ip in node_ips :
+ print node_ip + "<br>"
+ print "<hr><br>"
+ print "Enter the hostname of a node to add to the cluster"
+ print "<form method=\"post\" action=\"wwwdoinitcluster\">"
+ print "Node Name: <input type=\"text\" name=\"NODENAME\">"
+ print "<input type=\"submit\" name=\"ADDNODE\" value=\"Add Node\"></form>"
+ print "<hr>"
+ print "<form method=\"post\" action=\"/cfgmain.\">"
+ print "<input type=\"submit\" name=\"DONE\" value=\"Done\"></form>"
+
+ print "</body></html>"
diff --git a/scripts/wwwprovision.py b/scripts/wwwprovision.py
new file mode 100644
index 0000000..c4e2910
--- /dev/null
+++ b/scripts/wwwprovision.py
@@ -0,0 +1,126 @@
+
+import datetime
+import fileinput
+import os
+import re
+import string
+import socket
+import sys
+
+def brick_used(needle) :
+ for vol in bricks_by_vol :
+ for brick in vol :
+ if brick == needle :
+ return True
+ return False
+
+def www_provision() :
+
+ fileinput.close()
+
+ hostname = socket.gethostname()
+
+ print "Content-type: text/html"
+ print
+ print "<html><head><meta http-equiv=\"pragma\" content=\"no-cache\"><title>Provision CloudFS Volume</title>"
+
+ existing_vols = []
+ bricks_by_vol = []
+ bbv_index = -1
+
+ volinfo_pipe = os.popen("/usr/bin/sudo /usr/sbin/gluster volume info all")
+ for line in volinfo_pipe :
+ line = line.lstrip()
+ if line.startswith("Volume") :
+ tokens = re.split(':', line)
+ if tokens[0].strip() == "Volume Name" :
+ existing_vols.append(tokens[1].strip())
+ elif line.startswith("Bricks:") :
+ bricks = []
+ bricks_by_vol.append(bricks)
+ bbv_index = bbv_index + 1
+ elif line.startswith("Brick") :
+ tokens = re.split(' ', line)
+ bricks_by_vol[bbv_index].append(tokens[1].strip())
+ volinfo_pipe.close()
+
+ node_ips = []
+
+ ifconfig_pipe = os.popen("/sbin/ifconfig eth0")
+ for line in ifconfig_pipe :
+ line = line.lstrip()
+ tokens = re.split('[: ]', line)
+ if tokens[0] == "inet" and tokens[1] == "addr" :
+ node_ips.append(tokens[2])
+ ifconfig_pipe.close()
+
+ peer_ips = os.popen("/usr/bin/sudo /usr/sbin/gluster peer status | /bin/grep Hostname:")
+ for line in peer_ips :
+ tokens = re.split(':', line)
+ node_ips.append(socket.gethostbyname(string.strip(tokens[1])))
+ peer_ips.close()
+
+ volumes_on_nodes = []
+
+ for node_ip in node_ips :
+ # print("<p>%s</p>") % (node_ip)
+ cmd = "/usr/bin/sudo /usr/bin/ssh " + node_ip + " df -H"
+ volumes_on_node = os.popen(cmd)
+ scratch = []
+ for line in volumes_on_node :
+ line = line.rstrip()
+ if False == line.startswith("Filesystem") and False == line.startswith("/dev/mapper") and False == line.startswith("tmpfs") and False == line.endswith(" /") and False == line.endswith("/boot") :
+ scratch.append(line)
+ volumes_on_node.close()
+ volumes_on_nodes.append(scratch)
+
+ print "<style type=\"text/css\">"
+ print ".colmask{ position: relative; overflow: hidden; margin: 0px auto; width: 100%; background-color: #b4d2f7 }"
+ print ".header{ float: left; width: 100%; background-color: #f4f4f4}"
+ print ".wrapper{ position: relative; float: left; left: 0.00%; width: 100.00%; background-color: #cccccc}"
+ #for node_ip in node_ips :
+ # print(".col%s{ position: relative; float: left; width: %d%%; left: 1%%; background-color: #b4d2f7}") % (node_ip.replace(".", "_"), 100/len(node_ips)-1)
+ print "tr.d0 td { background-color: #CC9999; color: black; }"
+ print "tr.d1 td { background-color: #9999CC; color: black; }"
+ print ".footer{ float: left; width: 100%; background-color: #f4f4f4}"
+ print "body { border-width: 0px; padding: 0px; margin: 0px; font-size: 90%; background-color: #e7e7de}"
+ print "</style></head><body>"
+ print "<h1>Provision CloudFS Volume</h1>"
+ print "<h2>Existing Volumes:</h2>"
+ bbv_index = 0
+ for existing_vol in existing_vols :
+ print "<p>" + existing_vol + ":"
+ for brick in bricks_by_vol[bbv_index] :
+ print "<br> " + brick
+ bbv_index = bbv_index + 1
+ print "</p>"
+ print "<hr>"
+ print "<h2>Provision a Volume From Available Bricks:</h2>"
+ print "<form method=\"post\" name=\"provision\" action=\"wwwconfirmprovision\">"
+ print "<div class=\"header\"><hr></div>"
+ print "<div class=\"wrapper\">"
+ print "<table>"
+ node_index = 0
+ for node_ip in node_ips :
+ print("<tr class=\"d%d\">") % (node_index % 2)
+ print("<td>%s</td>") % (node_ip)
+ for volumes_on_node in volumes_on_nodes[node_index] :
+ tokens = volumes_on_node.rpartition(" ")
+ if brick_used(node_ip + ":" + tokens[2]) == True :
+ print("<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" disabled />%s</td>") % (node_ip, tokens[2], tokens[2])
+ else :
+ print("<td><input type=\"checkbox\" name=\"VOLUME\" value=\"%s:%s\" />%s</td>") % (node_ip, tokens[2], tokens[2])
+ print "</tr>"
+ node_index = node_index + 1
+ print "</table>"
+ print "</div>"
+ print "<div class=\"footer\"><hr></div>"
+ print "Volume Type: <input type=\"radio\" name=\"TYPE\" value=\"plain\" checked />Plain"
+ print "<input type=\"radio\" name=\"TYPE\" value=\"replica\" />Replicated"
+ print "<input type=\"radio\" name=\"TYPE\" value=\"stripe\" />Striped"
+ print "<br>Replica or Stripe count: <input type=\"text\" name=\"COUNT\" size=\"2\" />"
+ print "<br>Volume ID: <input type=\"text\" name=\"VOLUMEID\" />"
+ print "<input type=\"submit\" name=\"PROVISION\" value=\"Provision\" />"
+ print "</form>"
+
+ print "</body></html>"
diff --git a/scripts/wwwroot.py b/scripts/wwwroot.py
new file mode 100644
index 0000000..5990022
--- /dev/null
+++ b/scripts/wwwroot.py
@@ -0,0 +1,15 @@
+
+import fileinput
+
+def www_root() :
+ fileinput.close()
+
+ print "Content-type: text/html"
+ print
+ print "<html><head>"
+ print "<meta http-equiv=\"pragma\" content=\"no-cache\">"
+# print "<meta http-equiv=\"Refresh\" content=\"0; url=https:/cfg/\" />"
+ print "<meta http-equiv=\"Refresh\" content=\"0; url=http:8080/cfg/\" />"
+ print "</head><body>"
+ print "<p>Please follow <a href=\"https:/cfg/\">link</a>!</p>"
+ print "</body></html>"
commit 897a788f3383ddcdae3d704985f315754afc99a3
Author: Kaleb S. KEITHLEY <kkeithle(a)cloudfs-node01.kkeithle.redhat.com>
Date: Fri Apr 29 11:29:20 2011 -0400
checkpoint wip
diff --git a/scripts/bottle.py b/scripts/bottle.py
new file mode 100644
index 0000000..8f2be9e
--- /dev/null
+++ b/scripts/bottle.py
@@ -0,0 +1,1934 @@
+# -*- coding: utf-8 -*-
+"""
+Bottle is a fast and simple micro-framework for small web applications. It
+offers request dispatching (Routes) with url parameter support, templates,
+a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
+template engines - all in a single file and with no dependencies other than the
+Python Standard Library.
+
+Homepage and documentation: http://bottle.paws.de/
+
+Licence (MIT)
+-------------
+
+ Copyright (c) 2009, Marcel Hellkamp.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+
+Example
+-------
+
+This is an example::
+
+ from bottle import route, run, request, response, static_file, abort
+
+ @route('/')
+ def hello_world():
+ return 'Hello World!'
+
+ @route('/hello/:name')
+ def hello_name(name):
+ return 'Hello %s!' % name
+
+ @route('/hello', method='POST')
+ def hello_post():
+ name = request.POST['name']
+ return 'Hello %s!' % name
+
+ @route('/static/:filename#.*#')
+ def static(filename):
+ return static_file(filename, root='/path/to/static/files/')
+
+ run(host='localhost', port=8080)
+"""
+
+from __future__ import with_statement
+
+__author__ = 'Marcel Hellkamp'
+__version__ = '0.8.5'
+__license__ = 'MIT'
+
+import base64
+import cgi
+import email.utils
+import functools
+import hmac
+import inspect
+import itertools
+import mimetypes
+import os
+import re
+import subprocess
+import sys
+import thread
+import threading
+import time
+import tokenize
+import tempfile
+
+from Cookie import SimpleCookie
+from tempfile import TemporaryFile
+from traceback import format_exc
+from urllib import quote as urlquote
+from urlparse import urlunsplit, urljoin
+
+try:
+ from collections import MutableMapping as DictMixin
+except ImportError: # pragma: no cover
+ from UserDict import DictMixin
+
+try:
+ from urlparse import parse_qs
+except ImportError: # pragma: no cover
+ from cgi import parse_qs
+
+try:
+ import cPickle as pickle
+except ImportError: # pragma: no cover
+ import pickle
+
+try:
+ try:
+ from json import dumps as json_dumps
+ except ImportError: # pragma: no cover
+ from simplejson import dumps as json_dumps
+except ImportError: # pragma: no cover
+ json_dumps = None
+
+if sys.version_info >= (3,0,0): # pragma: no cover
+ # See Request.POST
+ from io import BytesIO
+ from io import TextIOWrapper
+ class NCTextIOWrapper(TextIOWrapper):
+ ''' Garbage collecting an io.TextIOWrapper(buffer) instance closes the
+ wrapped buffer. This subclass keeps it open. '''
+ def close(self): pass
+ StringType = bytes
+ def touni(x, enc='utf8'): # Convert anything to unicode (py3)
+ return str(x, encoding=enc) if isinstance(x, bytes) else str(x)
+else:
+ from StringIO import StringIO as BytesIO
+ from types import StringType
+ NCTextIOWrapper = None
+ def touni(x, enc='utf8'): # Convert anything to unicode (py2)
+ return x if isinstance(x, unicode) else unicode(str(x), encoding=enc)
+
+def tob(data, enc='utf8'): # Convert strings to bytes (py2 and py3)
+ return data.encode(enc) if isinstance(data, unicode) else data
+
+# Background compatibility
+import warnings
+def depr(message, critical=False):
+ if critical: raise DeprecationWarning(message)
+ warnings.warn(message, DeprecationWarning, stacklevel=3)
+
+
+
+
+
+
+# Exceptions and Events
+
+class BottleException(Exception):
+ """ A base class for exceptions used by bottle. """
+ pass
+
+
+class HTTPResponse(BottleException):
+ """ Used to break execution and immediately finish the response """
+ def __init__(self, output='', status=200, header=None):
+ super(BottleException, self).__init__("HTTP Response %d" % status)
+ self.status = int(status)
+ self.output = output
+ self.headers = HeaderDict(header) if header else None
+
+ def apply(self, response):
+ if self.headers:
+ for key, value in self.headers.iterallitems():
+ response.headers[key] = value
+ response.status = self.status
+
+
+class HTTPError(HTTPResponse):
+ """ Used to generate an error page """
+ def __init__(self, code=500, output='Unknown Error', exception=None, traceback=None, header=None):
+ super(HTTPError, self).__init__(output, code, header)
+ self.exception = exception
+ self.traceback = traceback
+
+ def __repr__(self):
+ return ''.join(ERROR_PAGE_TEMPLATE.render(e=self))
+
+
+
+
+
+
+# Routing
+
+class RouteError(BottleException):
+ """ This is a base class for all routing related exceptions """
+
+
+class RouteSyntaxError(RouteError):
+ """ The route parser found something not supported by this router """
+
+
+class RouteBuildError(RouteError):
+ """ The route could not been build """
+
+
+class Route(object):
+ ''' Represents a single route and can parse the dynamic route syntax '''
+ syntax = re.compile(r'(.*?)(?<!\\):([a-zA-Z_]+)?(?:#(.*?)#)?')
+ default = '[^/]+'
+
+ def __init__(self, route, target=None, name=None, static=False):
+ """ Create a Route. The route string may contain `:key`,
+ `:key#regexp#` or `:#regexp#` tokens for each dynamic part of the
+ route. These can be escaped with a backslash infront of the `:`
+ and are compleately ignored if static is true. A name may be used
+ to refer to this route later (depends on Router)
+ """
+ self.route = route
+ self.target = target
+ self.name = name
+ if static:
+ self.route = self.route.replace(':','\\:')
+ self._tokens = None
+
+ def tokens(self):
+ """ Return a list of (type, value) tokens. """
+ if not self._tokens:
+ self._tokens = list(self.tokenise(self.route))
+ return self._tokens
+
+ @classmethod
+ def tokenise(cls, route):
+ ''' Split a string into an iterator of (type, value) tokens. '''
+ match = None
+ for match in cls.syntax.finditer(route):
+ pre, name, rex = match.groups()
+ if pre: yield ('TXT', pre.replace('\\:',':'))
+ if rex and name: yield ('VAR', (rex, name))
+ elif name: yield ('VAR', (cls.default, name))
+ elif rex: yield ('ANON', rex)
+ if not match:
+ yield ('TXT', route.replace('\\:',':'))
+ elif match.end() < len(route):
+ yield ('TXT', route[match.end():].replace('\\:',':'))
+
+ def group_re(self):
+ ''' Return a regexp pattern with named groups '''
+ out = ''
+ for token, data in self.tokens():
+ if token == 'TXT': out += re.escape(data)
+ elif token == 'VAR': out += '(?P<%s>%s)' % (data[1], data[0])
+ elif token == 'ANON': out += '(?:%s)' % data
+ return out
+
+ def flat_re(self):
+ ''' Return a regexp pattern with non-grouping parentheses '''
+ rf = lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
+ return re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', rf, self.group_re())
+
+ def format_str(self):
+ ''' Return a format string with named fields. '''
+ out, i = '', 0
+ for token, value in self.tokens():
+ if token == 'TXT': out += value.replace('%','%%')
+ elif token == 'ANON': out += '%%(anon%d)s' % i; i+=1
+ elif token == 'VAR': out += '%%(%s)s' % value[1]
+ return out
+
+ @property
+ def static(self):
+ return not self.is_dynamic()
+
+ def is_dynamic(self):
+ ''' Return true if the route contains dynamic parts '''
+ for token, value in self.tokens():
+ if token != 'TXT':
+ return True
+ return False
+
+ def __repr__(self):
+ return "<Route(%s) />" % repr(self.route)
+
+ def __eq__(self, other):
+ return self.route == other.route
+
+class Router(object):
+ ''' A route associates a string (e.g. URL) with an object (e.g. function)
+ Some dynamic routes may extract parts of the string and provide them as
+ a dictionary. This router matches a string against multiple routes and
+ returns the associated object along with the extracted data.
+ '''
+
+ def __init__(self):
+ self.routes = [] # List of all installed routes
+ self.named = {} # Cache for named routes and their format strings
+ self.static = {} # Cache for static routes
+ self.dynamic = [] # Search structure for dynamic routes
+
+ def add(self, route, target=None, **ka):
+ """ Add a route->target pair or a :class:`Route` object to the Router.
+ Return the Route object. See :class:`Route` for details.
+ """
+ if not isinstance(route, Route):
+ route = Route(route, target, **ka)
+ if self.get_route(route):
+ return RouteError('Route %s is not uniqe.' % route)
+ self.routes.append(route)
+ return route
+
+ def get_route(self, route, target=None, **ka):
+ ''' Get a route from the router by specifying either the same
+ parameters as in :meth:`add` or comparing to an instance of
+ :class:`Route`. Note that not all parameters are considered by the
+ compare function. '''
+ if not isinstance(route, Route):
+ route = Route(route, **ka)
+ for known in self.routes:
+ if route == known:
+ return known
+ return None
+
+ def match(self, uri):
+ ''' Match an URI and return a (target, urlargs) tuple '''
+ if uri in self.static:
+ return self.static[uri], {}
+ for combined, subroutes in self.dynamic:
+ match = combined.match(uri)
+ if not match: continue
+ target, args_re = subroutes[match.lastindex - 1]
+ args = args_re.match(uri).groupdict() if args_re else {}
+ return target, args
+ return None, {}
+
+ def build(self, _name, **args):
+ ''' Build an URI out of a named route and values for te wildcards. '''
+ try:
+ return self.named[_name] % args
+ except KeyError:
+ raise RouteBuildError("No route found with name '%s'." % _name)
+
+ def compile(self):
+ ''' Build the search structures. Call this before actually using the
+ router.'''
+ self.named = {}
+ self.static = {}
+ self.dynamic = []
+ for route in self.routes:
+ if route.name:
+ self.named[route.name] = route.format_str()
+ if route.static:
+ self.static[route.route] = route.target
+ continue
+ gpatt = route.group_re()
+ fpatt = route.flat_re()
+ try:
+ gregexp = re.compile('^(%s)$' % gpatt) if '(?P' in gpatt else None
+ combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, fpatt)
+ self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
+ self.dynamic[-1][1].append((route.target, gregexp))
+ except (AssertionError, IndexError), e: # AssertionError: Too many groups
+ self.dynamic.append((re.compile('(^%s$)'%fpatt),[(route.target, gregexp)]))
+ except re.error, e:
+ raise RouteSyntaxError("Could not add Route: %s (%s)" % (route, e))
+
+ def __eq__(self, other):
+ return self.routes == other.routes
+
+
+
+
+
+# WSGI abstraction: Application, Request and Response objects
+
+class Bottle(object):
+ """ WSGI application """
+
+ def __init__(self, catchall=True, autojson=True, config=None):
+ """ Create a new bottle instance.
+ You usually don't do that. Use `bottle.app.push()` instead.
+ """
+ self.routes = Router()
+ self.mounts = {}
+ self.error_handler = {}
+ self.catchall = catchall
+ self.config = config or {}
+ self.serve = True
+ self.castfilter = []
+ if autojson and json_dumps:
+ self.add_filter(dict, dict2json)
+
+ def optimize(self, *a, **ka):
+ depr("Bottle.optimize() is obsolete.")
+
+ def mount(self, app, script_path):
+ ''' Mount a Bottle application to a specific URL prefix '''
+ if not isinstance(app, Bottle):
+ raise TypeError('Only Bottle instances are supported for now.')
+ script_path = '/'.join(filter(None, script_path.split('/')))
+ path_depth = script_path.count('/') + 1
+ if not script_path:
+ raise TypeError('Empty script_path. Perhaps you want a merge()?')
+ for other in self.mounts:
+ if other.startswith(script_path):
+ raise TypeError('Conflict with existing mount: %s' % other)
+ @self.route('/%s/:#.*#' % script_path, method="ANY")
+ def mountpoint():
+ request.path_shift(path_depth)
+ return app.handle(request.path, request.method)
+ self.mounts[script_path] = app
+
+ def add_filter(self, ftype, func):
+ ''' Register a new output filter. Whenever bottle hits a handler output
+ matching `ftype`, `func` is applied to it. '''
+ if not isinstance(ftype, type):
+ raise TypeError("Expected type object, got %s" % type(ftype))
+ self.castfilter = [(t, f) for (t, f) in self.castfilter if t != ftype]
+ self.castfilter.append((ftype, func))
+ self.castfilter.sort()
+
+ def match_url(self, path, method='GET'):
+ """ Find a callback bound to a path and a specific HTTP method.
+ Return (callback, param) tuple or raise HTTPError.
+ method: HEAD falls back to GET. All methods fall back to ANY.
+ """
+ path, method = path.strip().lstrip('/'), method.upper()
+ callbacks, args = self.routes.match(path)
+ if not callbacks:
+ raise HTTPError(404, "Not found: " + path)
+ if method in callbacks:
+ return callbacks[method], args
+ if method == 'HEAD' and 'GET' in callbacks:
+ return callbacks['GET'], args
+ if 'ANY' in callbacks:
+ return callbacks['ANY'], args
+ allow = [m for m in callbacks if m != 'ANY']
+ if 'GET' in allow and 'HEAD' not in allow:
+ allow.append('HEAD')
+ raise HTTPError(405, "Method not allowed.",
+ header=[('Allow',",".join(allow))])
+
+ def get_url(self, routename, **kargs):
+ """ Return a string that matches a named route """
+ scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
+ location = self.routes.build(routename, **kargs).lstrip('/')
+ return urljoin(urljoin('/', scriptname), location)
+
+ def route(self, path=None, method='GET', **kargs):
+ """ Decorator: bind a function to a GET request path.
+
+ If the path parameter is None, the signature of the decorated
+ function is used to generate the paths. See yieldroutes()
+ for details.
+
+ The method parameter (default: GET) specifies the HTTP request
+ method to listen to. You can specify a list of methods too.
+ """
+ def wrapper(callback):
+ routes = [path] if path else yieldroutes(callback)
+ methods = method.split(';') if isinstance(method, str) else method
+ for r in routes:
+ for m in methods:
+ r, m = r.strip().lstrip('/'), m.strip().upper()
+ old = self.routes.get_route(r, **kargs)
+ if old:
+ old.target[m] = callback
+ else:
+ self.routes.add(r, {m: callback}, **kargs)
+ self.routes.compile()
+ return callback
+ return wrapper
+
+ def get(self, path=None, method='GET', **kargs):
+ """ Decorator: Bind a function to a GET request path.
+ See :meth:'route' for details. """
+ return self.route(path, method, **kargs)
+
+ def post(self, path=None, method='POST', **kargs):
+ """ Decorator: Bind a function to a POST request path.
+ See :meth:'route' for details. """
+ return self.route(path, method, **kargs)
+
+ def put(self, path=None, method='PUT', **kargs):
+ """ Decorator: Bind a function to a PUT request path.
+ See :meth:'route' for details. """
+ return self.route(path, method, **kargs)
+
+ def delete(self, path=None, method='DELETE', **kargs):
+ """ Decorator: Bind a function to a DELETE request path.
+ See :meth:'route' for details. """
+ return self.route(path, method, **kargs)
+
+ def error(self, code=500):
+ """ Decorator: Registrer an output handler for a HTTP error code"""
+ def wrapper(handler):
+ self.error_handler[int(code)] = handler
+ return handler
+ return wrapper
+
+ def handle(self, url, method):
+ """ Execute the handler bound to the specified url and method and return
+ its output. If catchall is true, exceptions are catched and returned as
+ HTTPError(500) objects. """
+ if not self.serve:
+ return HTTPError(503, "Server stopped")
+ try:
+ handler, args = self.match_url(url, method)
+ return handler(**args)
+ except HTTPResponse, e:
+ return e
+ except Exception, e:
+ if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
+ or not self.catchall:
+ raise
+ return HTTPError(500, 'Unhandled exception', e, format_exc(10))
+
+ def _cast(self, out, request, response, peek=None):
+ """ Try to convert the parameter into something WSGI compatible and set
+ correct HTTP headers when possible.
+ Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
+ iterable of strings and iterable of unicodes
+ """
+ # Filtered types (recursive, because they may return anything)
+ for testtype, filterfunc in self.castfilter:
+ if isinstance(out, testtype):
+ return self._cast(filterfunc(out), request, response)
+
+ # Empty output is done here
+ if not out:
+ response.headers['Content-Length'] = 0
+ return []
+ # Join lists of byte or unicode strings. Mixed lists are NOT supported
+ if isinstance(out, (tuple, list))\
+ and isinstance(out[0], (StringType, unicode)):
+ out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
+ # Encode unicode strings
+ if isinstance(out, unicode):
+ out = out.encode(response.charset)
+ # Byte Strings are just returned
+ if isinstance(out, StringType):
+ response.headers['Content-Length'] = str(len(out))
+ return [out]
+ # HTTPError or HTTPException (recursive, because they may wrap anything)
+ if isinstance(out, HTTPError):
+ out.apply(response)
+ return self._cast(self.error_handler.get(out.status, repr)(out), request, response)
+ if isinstance(out, HTTPResponse):
+ out.apply(response)
+ return self._cast(out.output, request, response)
+
+ # File-like objects.
+ if hasattr(out, 'read'):
+ if 'wsgi.file_wrapper' in request.environ:
+ return request.environ['wsgi.file_wrapper'](out)
+ elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
+ return WSGIFileWrapper(out)
+
+ # Handle Iterables. We peek into them to detect their inner type.
+ try:
+ out = iter(out)
+ first = out.next()
+ while not first:
+ first = out.next()
+ except StopIteration:
+ return self._cast('', request, response)
+ except HTTPResponse, e:
+ first = e
+ except Exception, e:
+ first = HTTPError(500, 'Unhandled exception', e, format_exc(10))
+ if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
+ or not self.catchall:
+ raise
+ # These are the inner types allowed in iterator or generator objects.
+ if isinstance(first, HTTPResponse):
+ return self._cast(first, request, response)
+ if isinstance(first, StringType):
+ return itertools.chain([first], out)
+ if isinstance(first, unicode):
+ return itertools.imap(lambda x: x.encode(response.charset),
+ itertools.chain([first], out))
+ return self._cast(HTTPError(500, 'Unsupported response type: %s'\
+ % type(first)), request, response)
+
+ def __call__(self, environ, start_response):
+ """ The bottle WSGI-interface. """
+ try:
+ environ['bottle.app'] = self
+ request.bind(environ)
+ response.bind(self)
+ out = self.handle(request.path, request.method)
+ out = self._cast(out, request, response)
+ # rfc2616 section 4.3
+ if response.status in (100, 101, 204, 304) or request.method == 'HEAD':
+ out = []
+ status = '%d %s' % (response.status, HTTP_CODES[response.status])
+ start_response(status, response.headerlist)
+ return out
+ except (KeyboardInterrupt, SystemExit, MemoryError):
+ raise
+ except Exception, e:
+ if not self.catchall:
+ raise
+ err = '<h1>Critical error while processing request: %s</h1>' \
+ % environ.get('PATH_INFO', '/')
+ if DEBUG:
+ err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e)
+ err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10)
+ environ['wsgi.errors'].write(err) #TODO: wsgi.error should not get html
+ start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')])
+ return [tob(err)]
+
+
+class Request(threading.local, DictMixin):
+ """ Represents a single HTTP request using thread-local attributes.
+ The Request object wraps a WSGI environment and can be used as such.
+ """
+ def __init__(self, environ=None, config=None):
+ """ Create a new Request instance.
+
+ You usually don't do this but use the global `bottle.request`
+ instance instead.
+ """
+ self.bind(environ or {}, config)
+
+ def bind(self, environ, config=None):
+ """ Bind a new WSGI enviroment.
+
+ This is done automatically for the global `bottle.request`
+ instance on every request.
+ """
+ self.environ = environ
+ self.config = config or {}
+ # These attributes are used anyway, so it is ok to compute them here
+ self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/')
+ self.method = environ.get('REQUEST_METHOD', 'GET').upper()
+
+ @property
+ def _environ(self):
+ depr("Request._environ renamed to Request.environ")
+ return self.environ
+
+ def copy(self):
+ ''' Returns a copy of self '''
+ return Request(self.environ.copy(), self.config)
+
+ def path_shift(self, shift=1):
+ ''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
+
+ :param shift: The number of path fragments to shift. May be negative to
+ change the shift direction. (default: 1)
+ '''
+ script_name = self.environ.get('SCRIPT_NAME','/')
+ self['SCRIPT_NAME'], self.path = path_shift(script_name, self.path, shift)
+ self['PATH_INFO'] = self.path
+
+ def __getitem__(self, key): return self.environ[key]
+ def __delitem__(self, key): self[key] = ""; del(self.environ[key])
+ def __iter__(self): return iter(self.environ)
+ def __len__(self): return len(self.environ)
+ def keys(self): return self.environ.keys()
+ def __setitem__(self, key, value):
+ """ Shortcut for Request.environ.__setitem__ """
+ self.environ[key] = value
+ todelete = []
+ if key in ('PATH_INFO','REQUEST_METHOD'):
+ self.bind(self.environ, self.config)
+ elif key == 'wsgi.input': todelete = ('body','forms','files','params')
+ elif key == 'QUERY_STRING': todelete = ('get','params')
+ elif key.startswith('HTTP_'): todelete = ('headers', 'cookies')
+ for key in todelete:
+ if 'bottle.' + key in self.environ:
+ del self.environ['bottle.' + key]
+
+ @property
+ def query_string(self):
+ """ The content of the QUERY_STRING environment variable. """
+ return self.environ.get('QUERY_STRING', '')
+
+ @property
+ def fullpath(self):
+ """ Request path including SCRIPT_NAME (if present) """
+ return self.environ.get('SCRIPT_NAME', '').rstrip('/') + self.path
+
+ @property
+ def url(self):
+ """ Full URL as requested by the client (computed).
+
+ This value is constructed out of different environment variables
+ and includes scheme, host, port, scriptname, path and query string.
+ """
+ scheme = self.environ.get('wsgi.url_scheme', 'http')
+ host = self.environ.get('HTTP_X_FORWARDED_HOST', self.environ.get('HTTP_HOST', None))
+ if not host:
+ host = self.environ.get('SERVER_NAME')
+ port = self.environ.get('SERVER_PORT', '80')
+ if scheme + port not in ('https443', 'http80'):
+ host += ':' + port
+ parts = (scheme, host, urlquote(self.fullpath), self.query_string, '')
+ return urlunsplit(parts)
+
+ @property
+ def content_length(self):
+ """ Content-Length header as an integer, -1 if not specified """
+ return int(self.environ.get('CONTENT_LENGTH','') or -1)
+
+ @property
+ def header(self):
+ ''' :class:`HeaderDict` filled with request headers.
+
+ HeaderDict keys are case insensitive str.title()d
+ '''
+ if 'bottle.headers' not in self.environ:
+ header = self.environ['bottle.headers'] = HeaderDict()
+ for key, value in self.environ.iteritems():
+ if key.startswith('HTTP_'):
+ key = key[5:].replace('_','-').title()
+ header[key] = value
+ return self.environ['bottle.headers']
+
+ @property
+ def GET(self):
+ """ The QUERY_STRING parsed into a MultiDict.
+
+ Keys and values are strings. Multiple values per key are possible.
+ See MultiDict for details.
+ """
+ if 'bottle.get' not in self.environ:
+ data = parse_qs(self.query_string, keep_blank_values=True)
+ get = self.environ['bottle.get'] = MultiDict()
+ for key, values in data.iteritems():
+ for value in values:
+ get[key] = value
+ return self.environ['bottle.get']
+
+ @property
+ def POST(self):
+ """ Property: The HTTP POST body parsed into a MultiDict.
+
+ This supports urlencoded and multipart POST requests. Multipart
+ is commonly used for file uploads and may result in some of the
+ values being cgi.FieldStorage objects instead of strings.
+
+ Multiple values per key are possible. See MultiDict for details.
+ """
+ if 'bottle.post' not in self.environ:
+ self.environ['bottle.post'] = MultiDict()
+ self.environ['bottle.forms'] = MultiDict()
+ self.environ['bottle.files'] = MultiDict()
+ safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
+ for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
+ if key in self.environ: safe_env[key] = self.environ[key]
+ if NCTextIOWrapper:
+ fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
+ # TODO: Content-Length may be wrong now. Does cgi.FieldStorage
+ # use it at all? I think not, because all tests pass.
+ else:
+ fb = self.body
+ data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
+ for item in data.list or []:
+ if item.filename:
+ self.environ['bottle.post'][item.name] = item
+ self.environ['bottle.files'][item.name] = item
+ else:
+ self.environ['bottle.post'][item.name] = item.value
+ self.environ['bottle.forms'][item.name] = item.value
+ return self.environ['bottle.post']
+
+ @property
+ def forms(self):
+ """ Property: HTTP POST form data parsed into a MultiDict. """
+ if 'bottle.forms' not in self.environ: self.POST
+ return self.environ['bottle.forms']
+
+ @property
+ def files(self):
+ """ Property: HTTP POST file uploads parsed into a MultiDict. """
+ if 'bottle.files' not in self.environ: self.POST
+ return self.environ['bottle.files']
+
+ @property
+ def params(self):
+ """ A combined MultiDict with POST and GET parameters. """
+ if 'bottle.params' not in self.environ:
+ self.environ['bottle.params'] = MultiDict(self.GET)
+ self.environ['bottle.params'].update(dict(self.forms))
+ return self.environ['bottle.params']
+
+ @property
+ def body(self):
+ """ The HTTP request body as a seekable buffer object.
+
+ This property returns a copy of the `wsgi.input` stream and should
+ be used instead of `environ['wsgi.input']`.
+ """
+ if 'bottle.body' not in self.environ:
+ maxread = max(0, self.content_length)
+ stream = self.environ['wsgi.input']
+ body = BytesIO() if maxread < MEMFILE_MAX else TemporaryFile(mode='w+b')
+ while maxread > 0:
+ part = stream.read(min(maxread, MEMFILE_MAX))
+ if not part: #TODO: Wrong content_length. Error? Do nothing?
+ break
+ body.write(part)
+ maxread -= len(part)
+ self.environ['wsgi.input'] = body
+ self.environ['bottle.body'] = body
+ self.environ['bottle.body'].seek(0)
+ return self.environ['bottle.body']
+
+ @property
+ def auth(self): #TODO: Tests and docs. Add support for digest. namedtuple?
+ """ HTTP authorisation data as a (user, passwd) tuple. (experimental)
+
+ This implementation currently only supports basic auth and returns
+ None on errors.
+ """
+ return parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
+
+ @property
+ def COOKIES(self):
+ """ Cookie information parsed into a dictionary.
+
+ Secure cookies are NOT decoded automatically. See
+ Request.get_cookie() for details.
+ """
+ if 'bottle.cookies' not in self.environ:
+ raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
+ self.environ['bottle.cookies'] = {}
+ for cookie in raw_dict.itervalues():
+ self.environ['bottle.cookies'][cookie.key] = cookie.value
+ return self.environ['bottle.cookies']
+
+ def get_cookie(self, name, secret=None):
+ """ Return the (decoded) value of a cookie. """
+ value = self.COOKIES.get(name)
+ dec = cookie_decode(value, secret) if secret else None
+ return dec or value
+
+ @property
+ def is_ajax(self):
+ ''' True if the request was generated using XMLHttpRequest '''
+ #TODO: write tests
+ return self.header.get('X-Requested-With') == 'XMLHttpRequest'
+
+
+
+class Response(threading.local):
+ """ Represents a single HTTP response using thread-local attributes.
+ """
+
+ def __init__(self, config=None):
+ self.bind(config)
+
+ def bind(self, config=None):
+ """ Resets the Response object to its factory defaults. """
+ self._COOKIES = None
+ self.status = 200
+ self.headers = HeaderDict()
+ self.content_type = 'text/html; charset=UTF-8'
+ self.config = config or {}
+
+ @property
+ def header(self):
+ depr("Response.header renamed to Response.headers")
+ return self.headers
+
+ def copy(self):
+ ''' Returns a copy of self '''
+ copy = Response(self.config)
+ copy.status = self.status
+ copy.headers = self.headers.copy()
+ copy.content_type = self.content_type
+ return copy
+
+ def wsgiheader(self):
+ ''' Returns a wsgi conform list of header/value pairs. '''
+ for c in self.COOKIES.values():
+ if c.OutputString() not in self.headers.getall('Set-Cookie'):
+ self.headers.append('Set-Cookie', c.OutputString())
+ # rfc2616 section 10.2.3, 10.3.5
+ if self.status in (204, 304) and 'content-type' in self.headers:
+ del self.headers['content-type']
+ if self.status == 304:
+ for h in ('allow', 'content-encoding', 'content-language',
+ 'content-length', 'content-md5', 'content-range',
+ 'content-type', 'last-modified'): # + c-location, expires?
+ if h in self.headers:
+ del self.headers[h]
+ return list(self.headers.iterallitems())
+ headerlist = property(wsgiheader)
+
+ @property
+ def charset(self):
+ """ Return the charset specified in the content-type header.
+
+ This defaults to `UTF-8`.
+ """
+ if 'charset=' in self.content_type:
+ return self.content_type.split('charset=')[-1].split(';')[0].strip()
+ return 'UTF-8'
+
+ @property
+ def COOKIES(self):
+ """ A dict-like SimpleCookie instance. Use Response.set_cookie() instead. """
+ if not self._COOKIES:
+ self._COOKIES = SimpleCookie()
+ return self._COOKIES
+
+ def set_cookie(self, key, value, secret=None, **kargs):
+ """ Add a new cookie with various options.
+
+ If the cookie value is not a string, a secure cookie is created.
+
+ Possible options are:
+ expires, path, comment, domain, max_age, secure, version, httponly
+ See http://de.wikipedia.org/wiki/HTTP-Cookie#Aufbau for details
+ """
+ if not isinstance(value, basestring):
+ if not secret:
+ raise TypeError('Cookies must be strings when secret is not set')
+ value = cookie_encode(value, secret).decode('ascii') #2to3 hack
+ self.COOKIES[key] = value
+ for k, v in kargs.iteritems():
+ self.COOKIES[key][k.replace('_', '-')] = v
+
+ def get_content_type(self):
+ """ Current 'Content-Type' header. """
+ return self.headers['Content-Type']
+
+ def set_content_type(self, value):
+ self.headers['Content-Type'] = value
+
+ content_type = property(get_content_type, set_content_type, None,
+ get_content_type.__doc__)
+
+
+
+
+
+
+# Data Structures
+
+class MultiDict(DictMixin):
+ """ A dict that remembers old values for each key """
+ # collections.MutableMapping would be better for Python >= 2.6
+ def __init__(self, *a, **k):
+ self.dict = dict()
+ for k, v in dict(*a, **k).iteritems():
+ self[k] = v
+
+ def __len__(self): return len(self.dict)
+ def __iter__(self): return iter(self.dict)
+ def __contains__(self, key): return key in self.dict
+ def __delitem__(self, key): del self.dict[key]
+ def keys(self): return self.dict.keys()
+ def __getitem__(self, key): return self.get(key, KeyError, -1)
+ def __setitem__(self, key, value): self.append(key, value)
+
+ def append(self, key, value): self.dict.setdefault(key, []).append(value)
+ def replace(self, key, value): self.dict[key] = [value]
+ def getall(self, key): return self.dict.get(key) or []
+
+ def get(self, key, default=None, index=-1):
+ if key not in self.dict and default != KeyError:
+ return [default][index]
+ return self.dict[key][index]
+
+ def iterallitems(self):
+ for key, values in self.dict.iteritems():
+ for value in values:
+ yield key, value
+
+
+class HeaderDict(MultiDict):
+ """ Same as :class:`MultiDict`, but title()s the keys and overwrites by default. """
+ def __contains__(self, key): return MultiDict.__contains__(self, self.httpkey(key))
+ def __getitem__(self, key): return MultiDict.__getitem__(self, self.httpkey(key))
+ def __delitem__(self, key): return MultiDict.__delitem__(self, self.httpkey(key))
+ def __setitem__(self, key, value): self.replace(key, value)
+ def get(self, key, default=None, index=-1): return MultiDict.get(self, self.httpkey(key), default, index)
+ def append(self, key, value): return MultiDict.append(self, self.httpkey(key), str(value))
+ def replace(self, key, value): return MultiDict.replace(self, self.httpkey(key), str(value))
+ def getall(self, key): return MultiDict.getall(self, self.httpkey(key))
+ def httpkey(self, key): return str(key).replace('_','-').title()
+
+
+class AppStack(list):
+ """ A stack implementation. """
+
+ def __call__(self):
+ """ Return the current default app. """
+ return self[-1]
+
+ def push(self, value=None):
+ """ Add a new Bottle instance to the stack """
+ if not isinstance(value, Bottle):
+ value = Bottle()
+ self.append(value)
+ return value
+
+class WSGIFileWrapper(object):
+
+ def __init__(self, fp, buffer_size=1024*64):
+ self.fp, self.buffer_size = fp, buffer_size
+ for attr in ('fileno', 'close', 'read', 'readlines'):
+ if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
+
+ def __iter__(self):
+ read, buff = self.fp.read, self.buffer_size
+ while True:
+ part = read(buff)
+ if not part: break
+ yield part
+
+
+
+# Module level functions
+
+# Output filter
+
+def dict2json(d):
+ response.content_type = 'application/json'
+ return json_dumps(d)
+
+
+def abort(code=500, text='Unknown Error: Appliction stopped.'):
+ """ Aborts execution and causes a HTTP error. """
+ raise HTTPError(code, text)
+
+
+def redirect(url, code=303):
+ """ Aborts execution and causes a 303 redirect """
+ scriptname = request.environ.get('SCRIPT_NAME', '').rstrip('/') + '/'
+ location = urljoin(request.url, urljoin(scriptname, url))
+ raise HTTPResponse("", status=code, header=dict(Location=location))
+
+
+def send_file(*a, **k): #BC 0.6.4
+ """ Raises the output of static_file(). (deprecated) """
+ raise static_file(*a, **k)
+
+
+def static_file(filename, root, guessmime=True, mimetype=None, download=False):
+ """ Opens a file in a safe way and returns a HTTPError object with status
+ code 200, 305, 401 or 404. Sets Content-Type, Content-Length and
+ Last-Modified header. Obeys If-Modified-Since header and HEAD requests.
+ """
+ root = os.path.abspath(root) + os.sep
+ filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
+ header = dict()
+
+ if not filename.startswith(root):
+ return HTTPError(403, "Access denied.")
+ if not os.path.exists(filename) or not os.path.isfile(filename):
+ return HTTPError(404, "File does not exist.")
+ if not os.access(filename, os.R_OK):
+ return HTTPError(403, "You do not have permission to access this file.")
+
+ if not mimetype and guessmime:
+ header['Content-Type'] = mimetypes.guess_type(filename)[0]
+ else:
+ header['Content-Type'] = mimetype if mimetype else 'text/plain'
+
+ if download == True:
+ download = os.path.basename(filename)
+ if download:
+ header['Content-Disposition'] = 'attachment; filename="%s"' % download
+
+ stats = os.stat(filename)
+ lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
+ header['Last-Modified'] = lm
+ ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
+ if ims:
+ ims = ims.split(";")[0].strip() # IE sends "<date>; length=146"
+ ims = parse_date(ims)
+ if ims is not None and ims >= int(stats.st_mtime):
+ header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
+ return HTTPResponse(status=304, header=header)
+ header['Content-Length'] = stats.st_size
+ if request.method == 'HEAD':
+ return HTTPResponse('', header=header)
+ else:
+ return HTTPResponse(open(filename, 'rb'), header=header)
+
+
+
+
+
+
+# Utilities
+
+def debug(mode=True):
+ """ Change the debug level.
+ There is only one debug level supported at the moment."""
+ global DEBUG
+ DEBUG = bool(mode)
+
+
+def parse_date(ims):
+ """ Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
+ try:
+ ts = email.utils.parsedate_tz(ims)
+ return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
+ except (TypeError, ValueError, IndexError):
+ return None
+
+
+def parse_auth(header):
+ """ Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
+ try:
+ method, data = header.split(None, 1)
+ if method.lower() == 'basic':
+ name, pwd = base64.b64decode(data).split(':', 1)
+ return name, pwd
+ except (KeyError, ValueError, TypeError):
+ return None
+
+
+def _lscmp(a, b):
+ ''' Compares two strings in a cryptographically save way:
+ Runtime is not affected by a common prefix. '''
+ return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
+
+
+def cookie_encode(data, key):
+ ''' Encode and sign a pickle-able object. Return a string '''
+ msg = base64.b64encode(pickle.dumps(data, -1))
+ sig = base64.b64encode(hmac.new(key, msg).digest())
+ return tob('!') + sig + tob('?') + msg
+
+
+def cookie_decode(data, key):
+ ''' Verify and decode an encoded string. Return an object or None'''
+ data = tob(data)
+ if cookie_is_encoded(data):
+ sig, msg = data.split(tob('?'), 1)
+ if _lscmp(sig[1:], base64.b64encode(hmac.new(key, msg).digest())):
+ return pickle.loads(base64.b64decode(msg))
+ return None
+
+
+def cookie_is_encoded(data):
+ ''' Return True if the argument looks like a encoded cookie.'''
+ return bool(data.startswith(tob('!')) and tob('?') in data)
+
+
+def tonativefunc(enc='utf-8'):
+ ''' Returns a function that turns everything into 'native' strings using enc '''
+ if sys.version_info >= (3,0,0):
+ return lambda x: x.decode(enc) if isinstance(x, bytes) else str(x)
+ return lambda x: x.encode(enc) if isinstance(x, unicode) else str(x)
+
+
+def yieldroutes(func):
+ """ Return a generator for routes that match the signature (name, args)
+ of the func parameter. This may yield more than one route if the function
+ takes optional keyword arguments. The output is best described by example:
+ a() -> '/a'
+ b(x, y) -> '/b/:x/:y'
+ c(x, y=5) -> '/c/:x' and '/c/:x/:y'
+ d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
+ """
+ path = func.__name__.replace('__','/').lstrip('/')
+ spec = inspect.getargspec(func)
+ argc = len(spec[0]) - len(spec[3] or [])
+ path += ('/:%s' * argc) % tuple(spec[0][:argc])
+ yield path
+ for arg in spec[0][argc:]:
+ path += '/:%s' % arg
+ yield path
+
+def path_shift(script_name, path_info, shift=1):
+ ''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
+
+ :return: The modified paths.
+ :param script_name: The SCRIPT_NAME path.
+ :param script_name: The PATH_INFO path.
+ :param shift: The number of path fragments to shift. May be negative to
+ change ths shift direction. (default: 1)
+ '''
+ if shift == 0: return script_name, path_info
+ pathlist = path_info.strip('/').split('/')
+ scriptlist = script_name.strip('/').split('/')
+ if pathlist and pathlist[0] == '': pathlist = []
+ if scriptlist and scriptlist[0] == '': scriptlist = []
+ if shift > 0 and shift <= len(pathlist):
+ moved = pathlist[:shift]
+ scriptlist = scriptlist + moved
+ pathlist = pathlist[shift:]
+ elif shift < 0 and shift >= -len(scriptlist):
+ moved = scriptlist[shift:]
+ pathlist = moved + pathlist
+ scriptlist = scriptlist[:shift]
+ else:
+ empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
+ raise AssertionError("Cannot shift. Nothing left from %s" % empty)
+ new_script_name = '/' + '/'.join(scriptlist)
+ new_path_info = '/' + '/'.join(pathlist)
+ if path_info.endswith('/') and pathlist: new_path_info += '/'
+ return new_script_name, new_path_info
+
+
+
+
+# Decorators
+#TODO: Replace default_app() with app()
+
+def validate(**vkargs):
+ """
+ Validates and manipulates keyword arguments by user defined callables.
+ Handles ValueError and missing arguments by raising HTTPError(403).
+ """
+ def decorator(func):
+ def wrapper(**kargs):
+ for key, value in vkargs.iteritems():
+ if key not in kargs:
+ abort(403, 'Missing parameter: %s' % key)
+ try:
+ kargs[key] = value(kargs[key])
+ except ValueError:
+ abort(403, 'Wrong parameter format for: %s' % key)
+ return func(**kargs)
+ return wrapper
+ return decorator
+
+
+route = functools.wraps(Bottle.route)(lambda *a, **ka: app().route(*a, **ka))
+get = functools.wraps(Bottle.get)(lambda *a, **ka: app().get(*a, **ka))
+post = functools.wraps(Bottle.post)(lambda *a, **ka: app().post(*a, **ka))
+put = functools.wraps(Bottle.put)(lambda *a, **ka: app().put(*a, **ka))
+delete = functools.wraps(Bottle.delete)(lambda *a, **ka: app().delete(*a, **ka))
+error = functools.wraps(Bottle.error)(lambda *a, **ka: app().error(*a, **ka))
+url = functools.wraps(Bottle.get_url)(lambda *a, **ka: app().get_url(*a, **ka))
+mount = functools.wraps(Bottle.mount)(lambda *a, **ka: app().mount(*a, **ka))
+
+def default():
+ depr("The default() decorator is deprecated. Use @error(404) instead.")
+ return error(404)
+
+
+
+
+
+
+# Server adapter
+
+class ServerAdapter(object):
+ quiet = False
+
+ def __init__(self, host='127.0.0.1', port=8080, **kargs):
+ self.options = kargs
+ self.host = host
+ self.port = int(port)
+
+ def run(self, handler): # pragma: no cover
+ pass
+
+ def __repr__(self):
+ args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
+ return "%s(%s)" % (self.__class__.__name__, args)
+
+
+class CGIServer(ServerAdapter):
+ quiet = True
+ def run(self, handler): # pragma: no cover
+ from wsgiref.handlers import CGIHandler
+ CGIHandler().run(handler) # Just ignore host and port here
+
+
+class FlupFCGIServer(ServerAdapter):
+ def run(self, handler): # pragma: no cover
+ import flup.server.fcgi
+ flup.server.fcgi.WSGIServer(handler, bindAddress=(self.host, self.port)).run()
+
+
+class WSGIRefServer(ServerAdapter):
+ def run(self, handler): # pragma: no cover
+ from wsgiref.simple_server import make_server, WSGIRequestHandler
+ if self.quiet:
+ class QuietHandler(WSGIRequestHandler):
+ def log_request(*args, **kw): pass
+ self.options['handler_class'] = QuietHandler
+ srv = make_server(self.host, self.port, handler, **self.options)
+ srv.serve_forever()
+
+
+class CherryPyServer(ServerAdapter):
+ def run(self, handler): # pragma: no cover
+ from cherrypy import wsgiserver
+ server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
+ server.start()
+
+
+class PasteServer(ServerAdapter):
+ def run(self, handler): # pragma: no cover
+ from paste import httpserver
+ from paste.translogger import TransLogger
+ app = TransLogger(handler)
+ httpserver.serve(app, host=self.host, port=str(self.port), **self.options)
+
+
+class FapwsServer(ServerAdapter):
+ """
+ Extremly fast webserver using libev.
+ See http://william-os4y.livejournal.com/
+ """
+ def run(self, handler): # pragma: no cover
+ import fapws._evwsgi as evwsgi
+ from fapws import base
+ evwsgi.start(self.host, self.port)
+ evwsgi.set_base_module(base)
+ def app(environ, start_response):
+ environ['wsgi.multiprocess'] = False
+ return handler(environ, start_response)
+ evwsgi.wsgi_cb(('',app))
+ evwsgi.run()
+
+
+class TornadoServer(ServerAdapter):
+ """ Untested. As described here:
+ http://github.com/facebook/tornado/blob/master/tornado/wsgi.py#L187 """
+ def run(self, handler): # pragma: no cover
+ import tornado.wsgi
+ import tornado.httpserver
+ import tornado.ioloop
+ container = tornado.wsgi.WSGIContainer(handler)
+ server = tornado.httpserver.HTTPServer(container)
+ server.listen(port=self.port)
+ tornado.ioloop.IOLoop.instance().start()
+
+
+class AppEngineServer(ServerAdapter):
+ """ Untested. """
+ quiet = True
+ def run(self, handler):
+ from google.appengine.ext.webapp import util
+ util.run_wsgi_app(handler)
+
+
+class TwistedServer(ServerAdapter):
+ """ Untested. """
+ def run(self, handler):
+ from twisted.web import server, wsgi
+ from twisted.python.threadpool import ThreadPool
+ from twisted.internet import reactor
+ thread_pool = ThreadPool()
+ thread_pool.start()
+ reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
+ factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
+ reactor.listenTCP(self.port, factory, interface=self.host)
+ reactor.run()
+
+
+class DieselServer(ServerAdapter):
+ """ Untested. """
+ def run(self, handler):
+ from diesel.protocols.wsgi import WSGIApplication
+ app = WSGIApplication(handler, port=self.port)
+ app.run()
+
+
+class GunicornServer(ServerAdapter):
+ """ Untested. """
+ def run(self, handler):
+ import gunicorn.arbiter
+ gunicorn.arbiter.Arbiter((self.host, self.port), 4, handler).run()
+
+
+class EventletServer(ServerAdapter):
+ """ Untested """
+ def run(self, handler):
+ from eventlet import wsgi, listen
+ wsgi.server(listen((self.host, self.port)), handler)
+
+
+class RocketServer(ServerAdapter):
+ """ Untested. As requested in issue 63
+ http://github.com/defnull/bottle/issues/#issue/63 """
+ def run(self, handler):
+ from rocket import Rocket
+ server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
+ server.start()
+
+
+class AutoServer(ServerAdapter):
+ """ Untested. """
+ adapters = [CherryPyServer, PasteServer, TwistedServer, WSGIRefServer]
+ def run(self, handler):
+ for sa in self.adapters:
+ try:
+ return sa(self.host, self.port, **self.options).run(handler)
+ except ImportError:
+ pass
+
+
+def run(app=None, server=WSGIRefServer, host='127.0.0.1', port=8080,
+ interval=1, reloader=False, quiet=False, **kargs):
+ """ Runs bottle as a web server. """
+ app = app if app else default_app()
+ # Instantiate server, if it is a class instead of an instance
+ if isinstance(server, type):
+ server = server(host=host, port=port, **kargs)
+ if not isinstance(server, ServerAdapter):
+ raise RuntimeError("Server must be a subclass of WSGIAdapter")
+ server.quiet = server.quiet or quiet
+ if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
+ print "Bottle server starting up (using %s)..." % repr(server)
+ print "Listening on http://%s:%d/" % (server.host, server.port)
+ print "Use Ctrl-C to quit."
+ print
+ try:
+ if reloader:
+ interval = min(interval, 1)
+ if os.environ.get('BOTTLE_CHILD'):
+ _reloader_child(server, app, interval)
+ else:
+ _reloader_observer(server, app, interval)
+ else:
+ server.run(app)
+ except KeyboardInterrupt: pass
+ if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
+ print "Shutting down..."
+
+
+class FileCheckerThread(threading.Thread):
+ ''' Thread that periodically checks for changed module files. '''
+
+ def __init__(self, lockfile, interval):
+ threading.Thread.__init__(self)
+ self.lockfile, self.interval = lockfile, interval
+ #1: lockfile to old; 2: lockfile missing
+ #3: module file changed; 5: external exit
+ self.status = 0
+
+ def run(self):
+ exists = os.path.exists
+ mtime = lambda path: os.stat(path).st_mtime
+ files = dict()
+ for module in sys.modules.values():
+ try:
+ path = inspect.getsourcefile(module)
+ if path and exists(path): files[path] = mtime(path)
+ except TypeError: pass
+ while not self.status:
+ for path, lmtime in files.iteritems():
+ if not exists(path) or mtime(path) > lmtime:
+ self.status = 3
+ if not exists(self.lockfile):
+ self.status = 2
+ elif mtime(self.lockfile) < time.time() - self.interval - 5:
+ self.status = 1
+ if not self.status:
+ time.sleep(self.interval)
+ if self.status != 5:
+ thread.interrupt_main()
+
+
+def _reloader_child(server, app, interval):
+ ''' Start the server and check for modified files in a background thread.
+ As soon as an update is detected, KeyboardInterrupt is thrown in
+ the main thread to exit the server loop. The process exists with status
+ code 3 to request a reload by the observer process. If the lockfile
+ is not modified in 2*interval second or missing, we assume that the
+ observer process died and exit with status code 1 or 2.
+ '''
+ lockfile = os.environ.get('BOTTLE_LOCKFILE')
+ bgcheck = FileCheckerThread(lockfile, interval)
+ try:
+ bgcheck.start()
+ server.run(app)
+ except KeyboardInterrupt, e: pass
+ bgcheck.status, status = 5, bgcheck.status
+ bgcheck.join() # bgcheck.status == 5 --> silent exit
+ if status: sys.exit(status)
+
+
+def _reloader_observer(server, app, interval):
+ ''' Start a child process with identical commandline arguments and restart
+ it as long as it exists with status code 3. Also create a lockfile and
+ touch it (update mtime) every interval seconds.
+ '''
+ fd, lockfile = tempfile.mkstemp(prefix='bottle-reloader.', suffix='.lock')
+ os.close(fd) # We only need this file to exist. We never write to it
+ try:
+ while os.path.exists(lockfile):
+ args = [sys.executable] + sys.argv
+ environ = os.environ.copy()
+ environ['BOTTLE_CHILD'] = 'true'
+ environ['BOTTLE_LOCKFILE'] = lockfile
+ p = subprocess.Popen(args, env=environ)
+ while p.poll() is None: # Busy wait...
+ os.utime(lockfile, None) # I am alive!
+ time.sleep(interval)
+ if p.poll() != 3:
+ if os.path.exists(lockfile): os.unlink(lockfile)
+ sys.exit(p.poll())
+ elif not server.quiet:
+ print "Reloading server..."
+ except KeyboardInterrupt: pass
+ if os.path.exists(lockfile): os.unlink(lockfile)
+
+
+
+# Templates
+
+class TemplateError(HTTPError):
+ def __init__(self, message):
+ HTTPError.__init__(self, 500, message)
+
+
+class BaseTemplate(object):
+ """ Base class and minimal API for template adapters """
+ extentions = ['tpl','html','thtml','stpl']
+ settings = {} #used in prepare()
+ defaults = {} #used in render()
+
+ def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
+ """ Create a new template.
+ If the source parameter (str or buffer) is missing, the name argument
+ is used to guess a template filename. Subclasses can assume that
+ self.source and/or self.filename are set. Both are strings.
+ The lookup, encoding and settings parameters are stored as instance
+ variables.
+ The lookup parameter stores a list containing directory paths.
+ The encoding parameter should be used to decode byte strings or files.
+ The settings parameter contains a dict for engine-specific settings.
+ """
+ self.name = name
+ self.source = source.read() if hasattr(source, 'read') else source
+ self.filename = source.filename if hasattr(source, 'filename') else None
+ self.lookup = map(os.path.abspath, lookup)
+ self.encoding = encoding
+ self.settings = self.settings.copy() # Copy from class variable
+ self.settings.update(settings) # Apply
+ if not self.source and self.name:
+ self.filename = self.search(self.name, self.lookup)
+ if not self.filename:
+ raise TemplateError('Template %s not found.' % repr(name))
+ if not self.source and not self.filename:
+ raise TemplateError('No template specified.')
+ self.prepare(**self.settings)
+
+ @classmethod
+ def search(cls, name, lookup=[]):
+ """ Search name in all directories specified in lookup.
+ First without, then with common extensions. Return first hit. """
+ if os.path.isfile(name): return name
+ for spath in lookup:
+ fname = os.path.join(spath, name)
+ if os.path.isfile(fname):
+ return fname
+ for ext in cls.extentions:
+ if os.path.isfile('%s.%s' % (fname, ext)):
+ return '%s.%s' % (fname, ext)
+
+ @classmethod
+ def global_config(cls, key, *args):
+ ''' This reads or sets the global settings stored in class.settings. '''
+ if args:
+ cls.settings[key] = args[0]
+ else:
+ return cls.settings[key]
+
+ def prepare(self, **options):
+ """ Run preparations (parsing, caching, ...).
+ It should be possible to call this again to refresh a template or to
+ update settings.
+ """
+ raise NotImplementedError
+
+ def render(self, **args):
+ """ Render the template with the specified local variables and return
+ a single byte or unicode string. If it is a byte string, the encoding
+ must match self.encoding. This method must be thread-safe!
+ """
+ raise NotImplementedError
+
+
+class MakoTemplate(BaseTemplate):
+ def prepare(self, **options):
+ from mako.template import Template
+ from mako.lookup import TemplateLookup
+ options.update({'input_encoding':self.encoding})
+ #TODO: This is a hack... http://github.com/defnull/bottle/issues#issue/8
+ mylookup = TemplateLookup(directories=['.']+self.lookup, **options)
+ if self.source:
+ self.tpl = Template(self.source, lookup=mylookup)
+ else: #mako cannot guess extentions. We can, but only at top level...
+ name = self.name
+ if not os.path.splitext(name)[1]:
+ name += os.path.splitext(self.filename)[1]
+ self.tpl = mylookup.get_template(name)
+
+ def render(self, **args):
+ _defaults = self.defaults.copy()
+ _defaults.update(args)
+ return self.tpl.render(**_defaults)
+
+
+class CheetahTemplate(BaseTemplate):
+ def prepare(self, **options):
+ from Cheetah.Template import Template
+ self.context = threading.local()
+ self.context.vars = {}
+ options['searchList'] = [self.context.vars]
+ if self.source:
+ self.tpl = Template(source=self.source, **options)
+ else:
+ self.tpl = Template(file=self.filename, **options)
+
+ def render(self, **args):
+ self.context.vars.update(self.defaults)
+ self.context.vars.update(args)
+ out = str(self.tpl)
+ self.context.vars.clear()
+ return [out]
+
+
+class Jinja2Template(BaseTemplate):
+ def prepare(self, filters=None, tests=None, **kwargs):
+ from jinja2 import Environment, FunctionLoader
+ if 'prefix' in kwargs: # TODO: to be removed after a while
+ raise RuntimeError('The keyword argument `prefix` has been removed. '
+ 'Use the full jinja2 environment name line_statement_prefix instead.')
+ self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
+ if filters: self.env.filters.update(filters)
+ if tests: self.env.tests.update(tests)
+ if self.source:
+ self.tpl = self.env.from_string(self.source)
+ else:
+ self.tpl = self.env.get_template(self.filename)
+
+ def render(self, **args):
+ _defaults = self.defaults.copy()
+ _defaults.update(args)
+ return self.tpl.render(**_defaults).encode("utf-8")
+
+ def loader(self, name):
+ fname = self.search(name, self.lookup)
+ if fname:
+ with open(fname, "rb") as f:
+ return f.read().decode(self.encoding)
+
+
+class SimpleTemplate(BaseTemplate):
+ blocks = ('if','elif','else','try','except','finally','for','while','with','def','class')
+ dedent_blocks = ('elif', 'else', 'except', 'finally')
+
+ def prepare(self, escape_func=cgi.escape, noescape=False):
+ self.cache = {}
+ if self.source:
+ self.code = self.translate(self.source)
+ self.co = compile(self.code, '<string>', 'exec')
+ else:
+ self.code = self.translate(open(self.filename).read())
+ self.co = compile(self.code, self.filename, 'exec')
+ enc = self.encoding
+ self._str = lambda x: touni(x, enc)
+ self._escape = lambda x: escape_func(touni(x, enc))
+ if noescape:
+ self._str, self._escape = self._escape, self._str
+
+ def translate(self, template):
+ stack = [] # Current Code indentation
+ lineno = 0 # Current line of code
+ ptrbuffer = [] # Buffer for printable strings and token tuple instances
+ codebuffer = [] # Buffer for generated python code
+ touni = functools.partial(unicode, encoding=self.encoding)
+ multiline = dedent = False
+
+ def yield_tokens(line):
+ for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
+ if i % 2:
+ if part.startswith('!'): yield 'RAW', part[1:]
+ else: yield 'CMD', part
+ else: yield 'TXT', part
+
+ def split_comment(codeline):
+ """ Removes comments from a line of code. """
+ line = codeline.splitlines()[0]
+ try:
+ tokens = list(tokenize.generate_tokens(iter(line).next))
+ except tokenize.TokenError:
+ return line.rsplit('#',1) if '#' in line else (line, '')
+ for token in tokens:
+ if token[0] == tokenize.COMMENT:
+ start, end = token[2][1], token[3][1]
+ return codeline[:start] + codeline[end:], codeline[start:end]
+ return line, ''
+
+ def flush(): # Flush the ptrbuffer
+ if not ptrbuffer: return
+ cline = ''
+ for line in ptrbuffer:
+ for token, value in line:
+ if token == 'TXT': cline += repr(value)
+ elif token == 'RAW': cline += '_str(%s)' % value
+ elif token == 'CMD': cline += '_escape(%s)' % value
+ cline += ', '
+ cline = cline[:-2] + '\\\n'
+ cline = cline[:-2]
+ if cline[:-1].endswith('\\\\\\\\\\n'):
+ cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
+ cline = '_printlist([' + cline + '])'
+ del ptrbuffer[:] # Do this before calling code() again
+ code(cline)
+
+ def code(stmt):
+ for line in stmt.splitlines():
+ codebuffer.append(' ' * len(stack) + line.strip())
+
+ for line in template.splitlines(True):
+ lineno += 1
+ line = line if isinstance(line, unicode)\
+ else unicode(line, encoding=self.encoding)
+ if lineno <= 2:
+ m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line)
+ if m: self.encoding = m.group(1)
+ if m: line = line.replace('coding','coding (removed)')
+ if line.strip()[:2].count('%') == 1:
+ line = line.split('%',1)[1].lstrip() # Full line following the %
+ cline = split_comment(line)[0].strip()
+ cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
+ flush() ##encodig (TODO: why?)
+ if cmd in self.blocks or multiline:
+ cmd = multiline or cmd
+ dedent = cmd in self.dedent_blocks # "else:"
+ if dedent and not oneline and not multiline:
+ cmd = stack.pop()
+ code(line)
+ oneline = not cline.endswith(':') # "if 1: pass"
+ multiline = cmd if cline.endswith('\\') else False
+ if not oneline and not multiline:
+ stack.append(cmd)
+ elif cmd == 'end' and stack:
+ code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
+ elif cmd == 'include':
+ p = cline.split(None, 2)[1:]
+ if len(p) == 2:
+ code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
+ elif p:
+ code("_=_include(%s, _stdout)" % repr(p[0]))
+ else: # Empty %include -> reverse of %rebase
+ code("_printlist(_base)")
+ elif cmd == 'rebase':
+ p = cline.split(None, 2)[1:]
+ if len(p) == 2:
+ code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
+ elif p:
+ code("globals()['_rebase']=(%s, {})" % repr(p[0]))
+ else:
+ code(line)
+ else: # Line starting with text (not '%') or '%%' (escaped)
+ if line.strip().startswith('%%'):
+ line = line.replace('%%', '%', 1)
+ ptrbuffer.append(yield_tokens(line))
+ flush()
+ return '\n'.join(codebuffer) + '\n'
+
+ def subtemplate(self, _name, _stdout, **args):
+ if _name not in self.cache:
+ self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
+ return self.cache[_name].execute(_stdout, **args)
+
+ def execute(self, _stdout, **args):
+ env = self.defaults.copy()
+ env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
+ '_include': self.subtemplate, '_str': self._str,
+ '_escape': self._escape})
+ env.update(args)
+ eval(self.co, env)
+ if '_rebase' in env:
+ subtpl, rargs = env['_rebase']
+ subtpl = self.__class__(name=subtpl, lookup=self.lookup)
+ rargs['_base'] = _stdout[:] #copy stdout
+ del _stdout[:] # clear stdout
+ return subtpl.execute(_stdout, **rargs)
+ return env
+
+ def render(self, **args):
+ """ Render the template using keyword arguments as local variables. """
+ stdout = []
+ self.execute(stdout, **args)
+ return ''.join(stdout)
+
+
+def template(tpl, template_adapter=SimpleTemplate, **kwargs):
+ '''
+ Get a rendered template as a string iterator.
+ You can use a name, a filename or a template string as first parameter.
+ '''
+ if tpl not in TEMPLATES or DEBUG:
+ settings = kwargs.get('template_settings',{})
+ lookup = kwargs.get('template_lookup', TEMPLATE_PATH)
+ if isinstance(tpl, template_adapter):
+ TEMPLATES[tpl] = tpl
+ if settings: TEMPLATES[tpl].prepare(**settings)
+ elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
+ TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
+ else:
+ TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
+ if not TEMPLATES[tpl]:
+ abort(500, 'Template (%s) not found' % tpl)
+ return TEMPLATES[tpl].render(**kwargs)
+
+mako_template = functools.partial(template, template_adapter=MakoTemplate)
+cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
+jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
+
+def view(tpl_name, **defaults):
+ ''' Decorator: renders a template for a handler.
+ The handler can control its behavior like that:
+
+ - return a dict of template vars to fill out the template
+ - return something other than a dict and the view decorator will not
+ process the template, but return the handler result as is.
+ This includes returning a HTTPResponse(dict) to get,
+ for instance, JSON with autojson or other castfilters
+ '''
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ result = func(*args, **kwargs)
+ if isinstance(result, (dict, DictMixin)):
+ tplvars = defaults.copy()
+ tplvars.update(result)
+ return template(tpl_name, **tplvars)
+ return result
+ return wrapper
+ return decorator
+
+mako_view = functools.partial(view, template_adapter=MakoTemplate)
+cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
+jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
+
+
+
+
+
+
+# Modul initialization and configuration
+
+TEMPLATE_PATH = ['./', './views/']
+TEMPLATES = {}
+DEBUG = False
+MEMFILE_MAX = 1024*100
+HTTP_CODES = {
+ 100: 'CONTINUE',
+ 101: 'SWITCHING PROTOCOLS',
+ 200: 'OK',
+ 201: 'CREATED',
+ 202: 'ACCEPTED',
+ 203: 'NON-AUTHORITATIVE INFORMATION',
+ 204: 'NO CONTENT',
+ 205: 'RESET CONTENT',
+ 206: 'PARTIAL CONTENT',
+ 300: 'MULTIPLE CHOICES',
+ 301: 'MOVED PERMANENTLY',
+ 302: 'FOUND',
+ 303: 'SEE OTHER',
+ 304: 'NOT MODIFIED',
+ 305: 'USE PROXY',
+ 306: 'RESERVED',
+ 307: 'TEMPORARY REDIRECT',
+ 400: 'BAD REQUEST',
+ 401: 'UNAUTHORIZED',
+ 402: 'PAYMENT REQUIRED',
+ 403: 'FORBIDDEN',
+ 404: 'NOT FOUND',
+ 405: 'METHOD NOT ALLOWED',
+ 406: 'NOT ACCEPTABLE',
+ 407: 'PROXY AUTHENTICATION REQUIRED',
+ 408: 'REQUEST TIMEOUT',
+ 409: 'CONFLICT',
+ 410: 'GONE',
+ 411: 'LENGTH REQUIRED',
+ 412: 'PRECONDITION FAILED',
+ 413: 'REQUEST ENTITY TOO LARGE',
+ 414: 'REQUEST-URI TOO LONG',
+ 415: 'UNSUPPORTED MEDIA TYPE',
+ 416: 'REQUESTED RANGE NOT SATISFIABLE',
+ 417: 'EXPECTATION FAILED',
+ 500: 'INTERNAL SERVER ERROR',
+ 501: 'NOT IMPLEMENTED',
+ 502: 'BAD GATEWAY',
+ 503: 'SERVICE UNAVAILABLE',
+ 504: 'GATEWAY TIMEOUT',
+ 505: 'HTTP VERSION NOT SUPPORTED',
+}
+""" A dict of known HTTP error and status codes """
+
+
+
+ERROR_PAGE_TEMPLATE = SimpleTemplate("""
+%try:
+ %from bottle import DEBUG, HTTP_CODES, request
+ %status_name = HTTP_CODES.get(e.status, 'Unknown').title()
+ <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
+ <html>
+ <head>
+ <title>Error {{e.status}}: {{status_name}}</title>
+ <style type="text/css">
+ html {background-color: #eee; font-family: sans;}
+ body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;}
+ pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
+ </style>
+ </head>
+ <body>
+ <h1>Error {{e.status}}: {{status_name}}</h1>
+ <p>Sorry, the requested URL <tt>{{request.url}}</tt> caused an error:</p>
+ <pre>{{str(e.output)}}</pre>
+ %if DEBUG and e.exception:
+ <h2>Exception:</h2>
+ <pre>{{repr(e.exception)}}</pre>
+ %end
+ %if DEBUG and e.traceback:
+ <h2>Traceback:</h2>
+ <pre>{{e.traceback}}</pre>
+ %end
+ </body>
+ </html>
+%except ImportError:
+ <b>ImportError:</b> Could not generate the error page. Please add bottle to sys.path
+%end
+""")
+""" The HTML template used for error messages """
+
+request = Request()
+""" Whenever a page is requested, the :class:`Bottle` WSGI handler stores
+metadata about the current request into this instance of :class:`Request`.
+It is thread-safe and can be accessed from within handler functions. """
+
+response = Response()
+""" The :class:`Bottle` WSGI handler uses metadata assigned to this instance
+of :class:`Response` to generate the WSGI response. """
+
+local = threading.local()
+""" Thread-local namespace. Not used by Bottle, but could get handy """
+
+# Initialize app stack (create first empty Bottle app)
+# BC: 0.6.4 and needed for run()
+app = default_app = AppStack()
+app.push()
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 0b3433e..125ff29 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -1,164 +1,79 @@
+#!/usr/bin/python
+
import glob
import json
import os
import re
-import socket
-import sys
from bottle import route, run
-import volfilter
-gfs_dir = "/var/lib/glusterd"
-info_dir = "%s/%s" % (os.path.dirname(gfs_dir), "cloudfs")
-idle_subdir = "%s/.idle_ports" % info_dir
-used_subdir = "%s/.used_ports" % info_dir
-volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
-CLOUDFSD_PORT = 8080
+import volstart
+import volstop
+import volmap
+import paths
-# Make sure the volume directory exists and has the right stuff in it.
-def check_volume_directory (vol_name):
- if not os.path.exists(info_dir):
- os.mkdir(info_dir)
- user_file = open("%s/%s"%(info_dir,"default_users"),"w")
- # TBD: big gaping security hole until other code can deal
- # with having zero users defined.
- user_file.write("alice password1\nbob password2\n")
- user_file.flush()
- user_file.close()
- os.mkdir(idle_subdir)
- i = 24010
- while i < 24030:
- fp = open("%s/%d"%(idle_subdir,i),"w")
- i += 1
- os.mkdir(used_subdir)
-
- vol_dir = "%s/%s" % (info_dir, vol_name)
- if not os.path.exists(vol_dir):
- os.mkdir(vol_dir)
- return vol_dir
-
-# Figure out which of the GlusterFS volfiles belong to us. Volfiles might have
-# names based on partial host names, fully qualified names, or addresses, or
-# even a mix thanks to "gluster peer probe" silliness. To deal with all of
-# these possibilities, we resolve everything to addresses and compare those.
-def scan_gfs_volfiles (vol_name):
- my_name = os.uname()[1]
- # Getaddrinfo returns a list of tuples, each:
- # family, socktype, proto, canonname, sockaddr
- # We extract the sockaddr of the first item, and the IP addr from that
- # TBD: handle IPv6, multi-homed hosts, etc.
- my_addr = socket.getaddrinfo(my_name,0)[0][4][0]
- my_glob = "%s/vols/%s/%s.*.vol" % (gfs_dir, vol_name, vol_name)
- for vf in glob.iglob(my_glob):
- m = volfile_re.match(os.path.basename(vf))
- if m:
- this_host = m.groups(1)[0]
- this_addr = socket.getaddrinfo(this_host,0)[0][4][0]
- if this_addr == my_addr:
- yield vf
-
-# Allocate a port for a server to run on. Right now we do this in a very
-# "clever" way, by creating files to match ports and then grabbing a file here.
-# When we have a real volume database such games will be unnecessary.
-def allocate_port (vol_file):
- for pf in glob.iglob("%s/*"%idle_subdir):
- base = os.path.basename(pf)
- new_name = "%s/%s" % (used_subdir, base)
- os.symlink(vol_file,new_name)
- os.remove(pf)
- return base
- else:
- raise RuntimeError, "no ports available"
-
-# Parse the user file into a list of [name,password] sub-lists. Since
-# everything that uses this is in Python we could just make it a pickle/shelf
-# or whatever, but it would all go away with a real volume database so it's not
-# worth the trouble to re-do it now.
-def parse_user_file (vol_name):
- try:
- user_file = open("%s/%s/users"%(info_dir,vol_name),"r")
- except IOError:
- user_file = open("%s/default_users"%info_dir,"r")
-
- users = []
- for line in user_file.readlines():
- space = line.find(" ")
- if space == -1:
- print >> sys.stderr, "Bad line in userfile: %s" % line
- users.append([line[:space],line[space+1:-1]])
-
- return users
-
-# Convert a single GlusterFS server volfile to its CloudFS form, with one
-# translator stack per tenant and "evil" translators stripped out. Some day
-# this will also involve adding translators (e.g. UID mapping) at the top of
-# each stack.
-def cloudify_volfile (input, output, users, port):
- print "# Cloudifying server %s" % input
- graph, last = volfilter.load(input)
- last = volfilter.cleanup(last,graph)
-
- if last.type != "protocol/server":
- print >> sys.stderr, "Top translator must be protocol/server"
- sys.exit(1)
- old_stack = last.subvols[0]
-
- bad_opts = []
- for opt in last.opts.iterkeys():
- if opt[:9] == "auth.addr":
- bad_opts.append(opt)
- elif opt[:10] == "auth.login":
- bad_opts.append(opt)
- for opt in bad_opts:
- print "# stripping auth option %s = %s" % (opt, last.opts[opt])
- del last.opts[opt]
-
- last.subvols = []
- for user, pw in users:
- new_stack = volfilter.copy_stack(old_stack,user)
- last.subvols.append(new_stack)
- last.opts["auth.login.%s.allow"%new_stack.name] = user
- last.opts["auth.login.%s.password"%new_stack.name] = pw
-
- last.opts["transport.socket.listen-port"] = port
- volfilter.generate(graph,last,output)
+CLOUDFSD_PORT = 8080
@route("/:vol_name/start")
-def start_server (vol_name):
- vol_base = check_volume_directory(vol_name)
- users = parse_user_file(vol_name)
-
- for vf in scan_gfs_volfiles(vol_name):
- new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
- outfile = open(new_vf,"w")
- port = allocate_port(new_vf)
- cloudify_volfile(vf,outfile,users,port)
- # TBD: actually start the server
- print "glusterfsd -f %s" % new_vf
+def start_server(vol_name):
+ volstart.vol_start(vol_name)
+
+@route("/:vol_name/stop")
+def stop_server(vol_name):
+ volstop.vol_stop(vol_name)
@route("/:vol_name/fetch")
-def fetch_client_vf (vol_name):
- vf_path = "%s/vols/%s/%s-fuse.vol" % (gfs_dir, vol_name, vol_name)
+def fetch_client_vf(vol_name):
+ vf_path = "%s/vols/%s/%s-fuse.vol" % (paths.gfs_dir, vol_name, vol_name)
return open(vf_path,"r")
@route("/:vol_name/map")
-def map_paths (vol_name):
- vol_dir = "%s/%s" % (info_dir, vol_name)
- vol_re = re.compile(vol_dir+"/")
- map = {}
- for link in glob.iglob("%s/*"%used_subdir):
- real_file = os.readlink(link)
- m = vol_re.match(real_file)
- if not m:
- continue
- graph, last = volfilter.load(real_file)
- for xl in graph.itervalues():
- if xl.type == "storage/posix":
- path = os.path.dirname(xl.opts["directory"])
- port = os.path.basename(link)
- map[path] = port
- break
- return json.dumps(map)
+def map_paths(vol_name):
+ return volmap.vol_map(vol_name)
+
+@route("/:user_name/adduser")
+def add_user(user_name):
+ print "add user: " + user_name
+
+@route("/:user_name/deleteuser")
+def delete_user(user_name):
+ print "delete user: " + user_name
+
+@route("/listusers")
+def list_users():
+ print "list users"
+
+@route("/wwwprovision")
+def www_provision():
+ print "www provision"
+
+@route("/wwwdoprovision")
+def www_doprovision():
+ print "www doprovision"
+
+@route("/wwwconfirmprovision")
+def www_confirmprovision():
+ print "www confirmprovision"
+
+@route("/wwwinitcluster")
+def www_initcluster():
+ print "www initcluster"
+
+@route("/wwwdoinitcluster")
+def www_doinitcluster():
+ print "www doinitcluster"
+
+@route("/wwwaddtenant")
+def www_addtenant():
+ print "www addtenant"
+
+@route("/wwwdoaddtenant")
+def www_doaddtenant():
+ print "www doaddtenant"
+
+@route("/wwwlisttenants")
+def www_listtenants():
+ print "www listtenants"
if __name__ == "__main__":
run(host='',port=CLOUDFSD_PORT)
diff --git a/scripts/paths.py b/scripts/paths.py
new file mode 100644
index 0000000..51bdafd
--- /dev/null
+++ b/scripts/paths.py
@@ -0,0 +1,10 @@
+
+import re
+import os
+
+gfs_dir = "/var/lib/glusterd"
+info_dir = "/var/lib/cloudfs"
+idle_subdir = "/var/lib/cloudfs/.idle_ports"
+used_subdir = "/var/lib/cloudfs/.used_ports"
+volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
+
diff --git a/scripts/volfilter.py b/scripts/volfilter.py
old mode 100755
new mode 100644
diff --git a/scripts/volmap.py b/scripts/volmap.py
new file mode 100644
index 0000000..92b77e9
--- /dev/null
+++ b/scripts/volmap.py
@@ -0,0 +1,26 @@
+import glob
+import json
+import os
+import re
+
+import volfilter
+import paths
+
+def vol_map (vol_name):
+ vol_dir = "%s/%s" % (paths.info_dir, vol_name)
+ vol_re = re.compile(vol_dir+"/")
+ map = {}
+ for link in glob.iglob("%s/*" % paths.used_subdir):
+ real_file = os.readlink(link)
+ m = vol_re.match(real_file)
+ if not m:
+ continue
+ graph, last = volfilter.load(real_file)
+ for xl in graph.itervalues():
+ if xl.type == "storage/posix":
+ path = os.path.dirname(xl.opts["directory"])
+ port = os.path.basename(link)
+ map[path] = port
+ break
+ return json.dumps(map)
+
diff --git a/scripts/volstart.py b/scripts/volstart.py
new file mode 100644
index 0000000..a8f0655
--- /dev/null
+++ b/scripts/volstart.py
@@ -0,0 +1,171 @@
+
+import glob
+import os
+import re
+import socket
+import string
+import subprocess
+import sys
+
+import volfilter
+import paths
+
+# Make sure the volume directory exists and has the right stuff in it.
+def check_volume_directory(vol_name):
+ if not os.path.exists(paths.info_dir):
+ os.mkdir(paths.info_dir)
+ user_file = open("%s/%s" % (paths.info_dir, "default_users"), "w")
+ # TBD: big gaping security hole until other code can deal
+ # with having zero users defined.
+ user_file.write("alice password1\nbob password2\n")
+ user_file.flush()
+ user_file.close()
+ os.mkdir(paths.idle_subdir)
+ for i in range(24010, 24030):
+ fp = open("%s/%d" % (paths.idle_subdir, i), "w")
+ fp.close()
+ os.mkdir(paths.used_subdir)
+
+ vol_dir = "%s/%s" % (paths.info_dir, vol_name)
+ if not os.path.exists(vol_dir):
+ os.mkdir(vol_dir)
+ return vol_dir
+
+# Figure out which of the GlusterFS volfiles belong to us. Volfiles might have
+# names based on partial host names, fully qualified names, or addresses, or
+# even a mix thanks to "gluster peer probe" silliness. To deal with all of
+# these possibilities, we resolve everything to addresses and compare those.
+# ### bear in mind that depending on how a machine is set up, the IP addrs
+# ### for a node might include 127.0.0.1 and ::1 first
+def scan_gfs_volfiles(vol_name):
+ ret = ""
+ my_name = os.uname()[1]
+ # Getaddrinfo returns a list of tuples, each:
+ # family, socktype, proto, canonname, sockaddr
+ # We extract the sockaddr of the first item, and the IP addr from that
+ # TBD: handle IPv6, multi-homed hosts, etc.
+ my_addrs = socket.getaddrinfo(my_name, 0)
+ my_glob = "%s/vols/%s/%s.*.vol" % (paths.gfs_dir, vol_name, vol_name)
+ for vf in glob.iglob(my_glob):
+ m = paths.volfile_re.match(os.path.basename(vf))
+ if m:
+ this_host = m.groups(1)[0]
+ this_addr = socket.getaddrinfo(this_host, 0)[0][4][0]
+ for addr in my_addrs:
+ if this_addr == addr[4][0]:
+ ret = vf
+ break
+ return ret
+
+# Allocate a port for a server to run on. Right now we do this in a very
+# "clever" way, by creating files to match ports and then grabbing a file here.
+# When we have a real volume database such games will be unnecessary.
+def allocate_port(vol_file):
+ for pf in glob.iglob("%s/*" % paths.idle_subdir):
+ base = os.path.basename(pf)
+ new_name = "%s/%s" % (paths.used_subdir, base)
+ os.symlink(vol_file, new_name)
+ os.remove(pf)
+ return base
+ else:
+ raise RuntimeError, "no ports available"
+
+# Parse the user file into a list of [name,password] sub-lists. Since
+# everything that uses this is in Python we could just make it a pickle/shelf
+# or whatever, but it would all go away with a real volume database so it's not
+# worth the trouble to re-do it now.
+def parse_user_file(vol_name):
+ try:
+ user_file = open("%s/%s/users" % (paths.info_dir, vol_name), "r")
+ except IOError:
+ user_file = open("%s/default_users" % paths.info_dir, "r")
+
+ users = []
+ for line in user_file.readlines():
+ space = line.find(" ")
+ if space == -1:
+ print >> sys.stderr, "Bad line in userfile: %s" % line
+ users.append([line[:space],line[space+1:-1]])
+ return users
+
+# Convert a single GlusterFS server volfile to its CloudFS form, with one
+# translator stack per tenant and "evil" translators stripped out. Some day
+# this will also involve adding translators (e.g. UID mapping) at the top of
+# each stack.
+def cloudify_volfile(input, output, users, port):
+ graph, last = volfilter.load(input)
+ last = volfilter.cleanup(last, graph)
+
+ if last.type != "protocol/server":
+ print >> sys.stderr, "Top translator must be protocol/server"
+ sys.exit(1)
+ old_stack = last.subvols[0]
+
+ bad_opts = []
+ for opt in last.opts.iterkeys():
+ if opt[:9] == "auth.addr":
+ bad_opts.append(opt)
+ elif opt[:10] == "auth.login":
+ bad_opts.append(opt)
+ for opt in bad_opts:
+ print "# stripping auth option %s = %s" % (opt, last.opts[opt])
+ del last.opts[opt]
+
+ last.subvols = []
+ for user, pw in users:
+ new_stack = volfilter.copy_stack(old_stack, user)
+ last.subvols.append(new_stack)
+ last.opts["auth.login.%s.allow" % new_stack.name] = user
+ last.opts["auth.login.%s.password" % new_stack.name] = pw
+
+ last.opts["transport.socket.listen-port"] = port
+ volfilter.generate(graph, last, output)
+
+def create_tenant_dirs(vol_file):
+ cmd = "/bin/grep \"option directory\" %s" % vol_file
+ path = ""
+ opt_dir_lines = os.popen(cmd)
+ for opt_dir_line in opt_dir_lines:
+ tokens = re.split(' ', string.lstrip(opt_dir_line))
+ path = string.rstrip(tokens[2])
+ if not os.path.exists(path):
+ os.mkdir(path)
+ opt_dir_lines.close()
+ junkdir = os.path.dirname(path) + "/junk"
+ if not os.path.exists(junkdir):
+ os.mkdir(junkdir)
+
+def vol_start(vol_name):
+ vol_base = check_volume_directory(vol_name)
+ users = parse_user_file(vol_name)
+ vf = scan_gfs_volfiles(vol_name)
+ new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
+ outfile = open(new_vf, "w")
+ port = allocate_port(new_vf)
+ cloudify_volfile(vf, outfile, users, port)
+ outfile.flush()
+ outfile.close()
+ v_key = string.replace(os.path.basename(new_vf), ".vol", "")
+ # print "v_key: %s" % v_key
+ # make dirs for each of the users
+ create_tenant_dirs(new_vf)
+ # actually start the server
+ # note: pid file in /var/lib/glusterd/vols/<vol_name>/... so that
+ # gluster can find it
+ cmd = "/usr/sbin/glusterfsd --volfile=%s --xlator-option %s-server.listen-port=%s --pid-file=/var/lib/glusterd/vols/%s/run/%s.pid --socket-file=/tmp/%s.socket --log-file=/var/log/glusterfs/bricks/%s.log" % (new_vf, vol_name, port, vol_name, v_key, v_key, vol_name)
+ # before 3.1.4 there were --brick-name and --brick-port for use by
+ # the gluster port mapper. These were secret/hidden cmdline options.
+ try:
+ p = subprocess.Popen(cmd, close_fds=True, shell=True)
+ retcode = os.waitpid(p.pid, 0)[1]
+ if retcode < 0:
+ print >>sys.stderr, "killed ", -retcode
+ except ValueError, v:
+ print >>sys.stderr, "value error: ", v
+ except OSError, o:
+ print >>sys.stderr, "os error: ", o
+ except NameError, n:
+ print >>sys.stderr, "name error: ", n
+ except:
+ print >>sys.stderr, "unknown error: ", sys.exc_info()[0]
+
diff --git a/scripts/volstop.py b/scripts/volstop.py
new file mode 100644
index 0000000..068baeb
--- /dev/null
+++ b/scripts/volstop.py
@@ -0,0 +1,45 @@
+
+import fileinput
+import glob
+import os
+import subprocess
+import sys
+
+import paths
+
+def kill_daemon(vol_name, vol_id):
+ cooked = vol_id.rsplit(".", 1)
+ pid_file_name = paths.gfs_dir + "/vols/" + vol_name + "/run/" + cooked[0] + ".pid"
+ for pid in fileinput.input(pid_file_name):
+ cmd = "/bin/kill " + pid
+ try:
+ p = subprocess.Popen(cmd, close_fds=True, shell=True);
+ retcode = os.waitpid(p.pid, 0)[1]
+ if retcode < 0:
+ print >>sys.stderr, "killed ", -retcode
+ except ValueError, v:
+ print >>sys.stderr, "value error: ", v
+ except OSError, o:
+ print >>sys.stderr, "os error: ", o
+ except NameError, n:
+ print >>sys.stderr, "name error: ", n
+ except:
+ print >>sys.stderr, "unknown error: ", sys.exc_info()[0]
+ fileinput.close()
+
+def recycle_port(path):
+ port_num = os.path.basename(path)
+ os.unlink(path)
+ fp = open("%s/%s" % (paths.idle_subdir, port_num), "w")
+ fp.close()
+
+def vol_stop(vol_name):
+ for symlink in glob.glob(paths.used_subdir + "/*"):
+ vol_link = os.readlink(symlink)
+ vol = os.path.basename(vol_link)
+ tokens = vol.split(".")
+ if vol_name == tokens[0]:
+ kill_daemon(vol_name, vol)
+ recycle_port(symlink)
+ break
+
commit 928766a775474459a6bac9108506db3430264ea7
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Apr 19 11:46:27 2011 -0400
Changes got left out of previous cloudfsd-branch commit.
diff --git a/scripts/volfilter.py b/scripts/volfilter.py
index 98ab20e..1b61595 100755
--- a/scripts/volfilter.py
+++ b/scripts/volfilter.py
@@ -17,8 +17,52 @@
# You should have received a copy of the GNU Affero General Public License *
# along with CloudFS. If not, see <http://www.gnu.org/licenses/>.
+import copy
import string
import sys
+import types
+
+good_xlators = [
+ "cluster/afr",
+ "cluster/dht",
+ "cluster/distribute",
+ "cluster/replicate",
+ "cluster/stripe",
+ "debug/io-stats",
+ "features/access-control",
+ "features/locks",
+ "features/marker",
+ "performance/io-threads",
+ "protocol/client",
+ "protocol/server",
+ "storage/posix",
+]
+
+def copy_stack (old_xl,suffix,recursive=False):
+ if recursive:
+ new_name = old_xl.name + "-" + suffix
+ else:
+ new_name = suffix
+ new_xl = Translator(new_name)
+ new_xl.type = old_xl.type
+ # The results with normal assignment here are . . . amusing.
+ new_xl.opts = copy.deepcopy(old_xl.opts)
+ for sv in old_xl.subvols:
+ new_xl.subvols.append(copy_stack(sv,suffix,True))
+ # Patch up the path at the bottom.
+ if new_xl.type == "storage/posix":
+ new_xl.opts["directory"] += ("/" + suffix)
+ return new_xl
+
+def cleanup (parent, graph):
+ if parent.type in good_xlators:
+ sv = []
+ for child in parent.subvols:
+ sv.append(cleanup(child,graph))
+ parent.subvols = sv
+ else:
+ parent = cleanup(parent.subvols[0],graph)
+ return parent
class Translator:
def __init__ (self, name):
@@ -31,7 +75,12 @@ class Translator:
return "<Translator %s>" % self.name
def load (path):
- fp = file(path,"r")
+ # If it's a string, open it; otherwise, assume it's already a
+ # file-like object (most notably from urllib*).
+ if type(path) in types.StringTypes:
+ fp = file(path,"r")
+ else:
+ fp = path
all_xlators = {}
xlator = None
last_xlator = None
commit 3be3fd214b18da3d3fa3e485fbfc13df1d74cecc
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Apr 15 10:55:32 2011 -0400
New code for daemon/port management.
diff --git a/scripts/cfs_mount.py b/scripts/cfs_mount.py
new file mode 100755
index 0000000..af0715d
--- /dev/null
+++ b/scripts/cfs_mount.py
@@ -0,0 +1,65 @@
+import json
+import sys
+import urllib2
+
+import volfilter
+
+CLOUDFSD_PORT = 8080
+
+# Simple cache of brick-to-port mappings, so we don't have to keep re-fetching
+# the maps from the same host if it has multiple bricks.
+class mapper:
+ def __init__ (self):
+ self.cache = {}
+ def lookup (self, host, volume, subv):
+ if self.cache.has_key(host):
+ mydict = self.cache[host]
+ else:
+ url = "http://%s:%d/%s/map" % \
+ (host, CLOUDFSD_PORT, volume)
+ mydict = json.load(urllib2.urlopen(url))
+ self.cache[host] = mydict
+ if mydict.has_key(subv):
+ return mydict[subv]
+
+if __name__ == "__main__":
+
+ cache = mapper()
+
+ if len(sys.argv) != 6:
+ print >> sys.stderr, \
+ "Usage: %s server volume username password mountpoint" \
+ % sys.argv[0]
+ sys.exit(0)
+ (host, volume, username, password, mount) = sys.argv[1:6]
+
+ # Fetch the GlusterFS client-side volfile.
+ url = "http://%s:%d/%s/fetch" % (host, CLOUDFSD_PORT, volume)
+ vol_file = urllib2.urlopen(url)
+
+ # Load the volfile and clean out some of the crud.
+ graph, last = volfilter.load(vol_file)
+ last = volfilter.cleanup(last,graph)
+
+ # For each client translator, map to the port that cloudfsd allocated
+ # for that brick and add our username/password to select the tenant.
+ for xl in graph.itervalues():
+ if xl.type != "protocol/client":
+ continue
+ host = xl.opts["remote-host"]
+ subv = xl.opts["remote-subvolume"]
+ port = cache.lookup(host,volume,subv)
+ if not port:
+ errstr = "Could not find port for %s:%s" % (host, subv)
+ print >> sys.stderr, errstr
+ sys.exit(1)
+ xl.opts["remote-port"] = port
+ xl.opts["remote-subvolume"] = username
+ xl.opts["username"] = username
+ xl.opts["password"] = password
+
+ volfilter.generate(graph,last,sys.stdout)
+
+
+
+
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
new file mode 100755
index 0000000..0b3433e
--- /dev/null
+++ b/scripts/cloudfsd.py
@@ -0,0 +1,164 @@
+import glob
+import json
+import os
+import re
+import socket
+import sys
+
+from bottle import route, run
+import volfilter
+
+gfs_dir = "/var/lib/glusterd"
+info_dir = "%s/%s" % (os.path.dirname(gfs_dir), "cloudfs")
+idle_subdir = "%s/.idle_ports" % info_dir
+used_subdir = "%s/.used_ports" % info_dir
+volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
+CLOUDFSD_PORT = 8080
+
+# Make sure the volume directory exists and has the right stuff in it.
+def check_volume_directory (vol_name):
+ if not os.path.exists(info_dir):
+ os.mkdir(info_dir)
+ user_file = open("%s/%s"%(info_dir,"default_users"),"w")
+ # TBD: big gaping security hole until other code can deal
+ # with having zero users defined.
+ user_file.write("alice password1\nbob password2\n")
+ user_file.flush()
+ user_file.close()
+ os.mkdir(idle_subdir)
+ i = 24010
+ while i < 24030:
+ fp = open("%s/%d"%(idle_subdir,i),"w")
+ i += 1
+ os.mkdir(used_subdir)
+
+ vol_dir = "%s/%s" % (info_dir, vol_name)
+ if not os.path.exists(vol_dir):
+ os.mkdir(vol_dir)
+ return vol_dir
+
+# Figure out which of the GlusterFS volfiles belong to us. Volfiles might have
+# names based on partial host names, fully qualified names, or addresses, or
+# even a mix thanks to "gluster peer probe" silliness. To deal with all of
+# these possibilities, we resolve everything to addresses and compare those.
+def scan_gfs_volfiles (vol_name):
+ my_name = os.uname()[1]
+ # Getaddrinfo returns a list of tuples, each:
+ # family, socktype, proto, canonname, sockaddr
+ # We extract the sockaddr of the first item, and the IP addr from that
+ # TBD: handle IPv6, multi-homed hosts, etc.
+ my_addr = socket.getaddrinfo(my_name,0)[0][4][0]
+ my_glob = "%s/vols/%s/%s.*.vol" % (gfs_dir, vol_name, vol_name)
+ for vf in glob.iglob(my_glob):
+ m = volfile_re.match(os.path.basename(vf))
+ if m:
+ this_host = m.groups(1)[0]
+ this_addr = socket.getaddrinfo(this_host,0)[0][4][0]
+ if this_addr == my_addr:
+ yield vf
+
+# Allocate a port for a server to run on. Right now we do this in a very
+# "clever" way, by creating files to match ports and then grabbing a file here.
+# When we have a real volume database such games will be unnecessary.
+def allocate_port (vol_file):
+ for pf in glob.iglob("%s/*"%idle_subdir):
+ base = os.path.basename(pf)
+ new_name = "%s/%s" % (used_subdir, base)
+ os.symlink(vol_file,new_name)
+ os.remove(pf)
+ return base
+ else:
+ raise RuntimeError, "no ports available"
+
+# Parse the user file into a list of [name,password] sub-lists. Since
+# everything that uses this is in Python we could just make it a pickle/shelf
+# or whatever, but it would all go away with a real volume database so it's not
+# worth the trouble to re-do it now.
+def parse_user_file (vol_name):
+ try:
+ user_file = open("%s/%s/users"%(info_dir,vol_name),"r")
+ except IOError:
+ user_file = open("%s/default_users"%info_dir,"r")
+
+ users = []
+ for line in user_file.readlines():
+ space = line.find(" ")
+ if space == -1:
+ print >> sys.stderr, "Bad line in userfile: %s" % line
+ users.append([line[:space],line[space+1:-1]])
+
+ return users
+
+# Convert a single GlusterFS server volfile to its CloudFS form, with one
+# translator stack per tenant and "evil" translators stripped out. Some day
+# this will also involve adding translators (e.g. UID mapping) at the top of
+# each stack.
+def cloudify_volfile (input, output, users, port):
+ print "# Cloudifying server %s" % input
+ graph, last = volfilter.load(input)
+ last = volfilter.cleanup(last,graph)
+
+ if last.type != "protocol/server":
+ print >> sys.stderr, "Top translator must be protocol/server"
+ sys.exit(1)
+ old_stack = last.subvols[0]
+
+ bad_opts = []
+ for opt in last.opts.iterkeys():
+ if opt[:9] == "auth.addr":
+ bad_opts.append(opt)
+ elif opt[:10] == "auth.login":
+ bad_opts.append(opt)
+ for opt in bad_opts:
+ print "# stripping auth option %s = %s" % (opt, last.opts[opt])
+ del last.opts[opt]
+
+ last.subvols = []
+ for user, pw in users:
+ new_stack = volfilter.copy_stack(old_stack,user)
+ last.subvols.append(new_stack)
+ last.opts["auth.login.%s.allow"%new_stack.name] = user
+ last.opts["auth.login.%s.password"%new_stack.name] = pw
+
+ last.opts["transport.socket.listen-port"] = port
+ volfilter.generate(graph,last,output)
+
+@route("/:vol_name/start")
+def start_server (vol_name):
+ vol_base = check_volume_directory(vol_name)
+ users = parse_user_file(vol_name)
+
+ for vf in scan_gfs_volfiles(vol_name):
+ new_vf = "%s/%s" % (vol_base, os.path.basename(vf))
+ outfile = open(new_vf,"w")
+ port = allocate_port(new_vf)
+ cloudify_volfile(vf,outfile,users,port)
+ # TBD: actually start the server
+ print "glusterfsd -f %s" % new_vf
+
+@route("/:vol_name/fetch")
+def fetch_client_vf (vol_name):
+ vf_path = "%s/vols/%s/%s-fuse.vol" % (gfs_dir, vol_name, vol_name)
+ return open(vf_path,"r")
+
+@route("/:vol_name/map")
+def map_paths (vol_name):
+ vol_dir = "%s/%s" % (info_dir, vol_name)
+ vol_re = re.compile(vol_dir+"/")
+ map = {}
+ for link in glob.iglob("%s/*"%used_subdir):
+ real_file = os.readlink(link)
+ m = vol_re.match(real_file)
+ if not m:
+ continue
+ graph, last = volfilter.load(real_file)
+ for xl in graph.itervalues():
+ if xl.type == "storage/posix":
+ path = os.path.dirname(xl.opts["directory"])
+ port = os.path.basename(link)
+ map[path] = port
+ break
+ return json.dumps(map)
+
+if __name__ == "__main__":
+ run(host='',port=CLOUDFSD_PORT)
commit 77cbe28984a7fad097575f6d7e95bb62cacf3115
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Mar 17 15:41:46 2011 -0400
Add support for hex keys (inline or file) up to 256 bits.
diff --git a/xlators/encryption/crypt/src/crypt.c b/xlators/encryption/crypt/src/crypt.c
index 1e4a364..8748749 100644
--- a/xlators/encryption/crypt/src/crypt.c
+++ b/xlators/encryption/crypt/src/crypt.c
@@ -103,17 +103,6 @@ encrypt_chunk (AES_KEY *key, unsigned char *input, unsigned char *output,
* change either the key or the GFID to regain confidentiality.
* This is simple if you just do copy/rename, but involves
* a lot of data movement and calculation.
- *
- * The main protection this provides, vs. using just a "naked"
- * GFID and block number as the IV, is to protect against
- * recycling of a GFID. Whenever a GFID is recycled, a new
- * key can be generated and stored separately from the file,
- * with a key ID/fingerprint attached to the file as an xattr.
- * Thus, the new file will have a completely different IV
- * sequence and thus a different keystream than any previous
- * file that had the same GFID. This makes the copy/rename
- * mentioned above more effective as a way to regain
- * confidentiality for a compromised file.
*/
memset(iv_input,0,sizeof(iv_input));
memcpy(iv_input,gfid,GFID_SIZE);
@@ -126,7 +115,7 @@ encrypt_chunk (AES_KEY *key, unsigned char *input, unsigned char *output,
* and to use iv_increment for consistency with the above.
*/
AES_encrypt(actual_iv,keystream,key);
- DPRINTF("keystream for %u:%u starts with %02x %02x %02x\n",
+ DPRINTF("keystream for %lu:%lu starts with %02x %02x %02x\n",
file_offset / BLOCK_SIZE,
(file_offset % BLOCK_SIZE) / AES_BLOCK_SIZE,
keystream[0], keystream[1], keystream[2]);
@@ -253,13 +242,82 @@ crypt_writev (call_frame_t *frame,
return 0;
}
+int32_t
+crypt_set_key (data_t *data, AES_KEY *key)
+{
+ int rc = -1;
+ unsigned char hex_buf[32] = {0}; /* binary AES-256 */
+ unsigned char i = 0;
+ int hex_byte = 0;
+ int fd = -1;
+ unsigned char file_buf[64] = {0}; /* hex AES-256 */
+
+ if (!data) {
+ gf_log(__func__,GF_LOG_ERROR,"missing key option");
+ return EINVAL;
+ }
+ gf_log(__func__,GF_LOG_DEBUG,"data length is %d",data->len);
+
+ /*
+ * Mostly we let AES_set_encrypt_key do key-size checking, since
+ * it'll do it anyway even if we already did. The exceptions are (1)
+ * a null key which is practically free to check in the switch, and
+ * (2) the hex-key length which could cause an overflow in this code
+ * before we ever call AES_set_encrypt_key.
+ */
+ switch (data->data[0]) {
+ case '\0':
+ gf_log(__func__,GF_LOG_DEBUG,"null key");
+ return EINVAL;
+ case '%': /* hex key */
+ gf_log(__func__,GF_LOG_DEBUG,"handling hex key");
+ switch (data->len) {
+ case 34: /* AES-128 = "%<32x>\0" */
+ case 50: /* AES-192 = "%<48x>\0" */
+ case 66: /* AES-256 = "%<64x>\0" */
+ break;
+ default:
+ gf_log(__func__,GF_LOG_DEBUG,"bad hex-key length");
+ return EINVAL;
+ }
+ for (i = 0; i < (data->len / 2 - 1); ++i) {
+ if (sscanf(data->data+i*2+1,"%2x",&hex_byte) != 1) {
+ break;
+ }
+ hex_buf[i] = hex_byte & 0xff;
+ }
+ rc = AES_set_encrypt_key(hex_buf,i*8,key);
+ break;
+ case '/': /* key in file */
+ gf_log(__func__,GF_LOG_DEBUG,"handling file key");
+ fd = open(data->data,O_RDONLY);
+ if (fd < 0) {
+ gf_log(__func__,GF_LOG_ERROR,"could not open key file");
+ return EINVAL;
+ }
+ rc = read(fd,file_buf,sizeof(file_buf));
+ close(fd);
+ for (i = 0; i < (rc / 2); ++i) {
+ if (sscanf(file_buf+i*2,"%2x",&hex_byte) != 1) {
+ break;
+ }
+ hex_buf[i] = hex_byte & 0xff;
+ }
+ rc = AES_set_encrypt_key(hex_buf,i*8,key);
+ break;
+ default: /* text key */
+ gf_log(__func__,GF_LOG_DEBUG,"handling text key");
+ rc = AES_set_encrypt_key(data->data,(data->len-1)*8,key);
+ }
+
+ return rc ? EINVAL : 0;
+}
int32_t
init (xlator_t *this)
{
crypt_private_t *priv = NULL;
- data_t *data = NULL;
- unsigned char ckey[16];
+ int32_t status = 0;
if (!this->children || this->children->next) {
gf_log ("crypt", GF_LOG_ERROR,
@@ -278,23 +336,15 @@ init (xlator_t *this)
}
this->private = priv;
- priv->block_size = 4096; /* TBD: make this an option */
-
/*
* TBD: the option should really specify a file containing a longer key
* for a better encryption algorithm.
*/
- data = dict_get(this->options,"key");
- if (data) {
- memset(ckey,0,sizeof(ckey));
- strncpy((char *)ckey,data->data,sizeof(ckey));
- AES_set_encrypt_key(ckey,sizeof(ckey)*8,&priv->key);
- }
- else {
+ status = crypt_set_key(dict_get(this->options,"key"),&priv->key);
+ if (status != 0) {
gf_log(this->name,GF_LOG_ERROR,"key missing");
- return EINVAL;
+ return status;
}
-
gf_log ("crypt", GF_LOG_INFO, "crypt xlator loaded");
return 0;
}
diff --git a/xlators/encryption/crypt/src/crypt.h b/xlators/encryption/crypt/src/crypt.h
index 0c7e5ea..0aa228b 100644
--- a/xlators/encryption/crypt/src/crypt.h
+++ b/xlators/encryption/crypt/src/crypt.h
@@ -37,7 +37,6 @@
} while (0);
typedef struct {
- uint32_t block_size;
AES_KEY key;
} crypt_private_t;
@@ -46,8 +45,4 @@ typedef struct {
uuid_t gfid;
} crypt_rlocal_t;
-typedef struct {
- int not_needed;
-} crypt_wlocal_t;
-
#endif /* __CRYPT_H__ */
commit 2bd0dff0e743f7ed01c3c1bd797b59f894d234ae
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Mar 16 16:29:35 2011 -0400
New code using AES, non-constant IV, no read/modify/write.
diff --git a/xlators/encryption/crypt/src/crypt.c b/xlators/encryption/crypt/src/crypt.c
index d2d3be1..1e4a364 100644
--- a/xlators/encryption/crypt/src/crypt.c
+++ b/xlators/encryption/crypt/src/crypt.c
@@ -31,148 +31,122 @@
#include "crypt.h"
-/* Forward decls so crypt_launch can retry. */
-int32_t
-crypt_rmw_done (call_frame_t *frame, xlator_t *this);
-int32_t
-crypt_lock_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno);
/*
- * The "lame" stuff is just for testing (makes it easier to verify correctness
- * of contents) and must be enabled by hand.
+ * TBD: make this endian-neutral. Right now it works on homogeneous machines
+ * because they'll all do the addition the same way, but it should work on
+ * heterogeneous machines as well.
*/
-
-inline unsigned char
-lrot (unsigned char c)
-{
- return ((c << 1) & 0xfe) | ((c >> 7) & 0x01);
-}
-
-inline unsigned char
-rrot (unsigned char c)
-{
- return ((c >> 1) & 0x7f) | ((c << 7) & 0x80);
-}
-
void
-lame_encrypt (crypt_private_t *priv, char *buf, int len)
+increment_iv (unsigned char *iv, unsigned int n)
{
- int i;
-
- for (i = 0; i < len; ++i) {
- *((unsigned char *)(buf+i)) = lrot((unsigned char)buf[i]);
- }
-}
+ unsigned int *iv_ints = (unsigned int *)iv;
+ unsigned char i;
-void
-lame_decrypt (crypt_private_t *priv, char *buf, int len)
-{ int i;
-
- for (i = 0; i < len; ++i) {
- *((unsigned char *)(buf+i)) = rrot((unsigned char)buf[i]);
+ /* Check for safety from 32-bit overflow. */
+ if (n <= (UINT_MAX - iv_ints[3])) {
+ iv_ints[3] += n;
+ return;
}
-}
-
-void
-lame_decrypt_iovec (crypt_private_t *priv, struct iovec *vector, int count)
-{
- int i;
- for (i = 0; i < count; i++) {
- lame_decrypt (priv, vector[i].iov_base, vector[i].iov_len);
- }
-}
-
-/*
- * True block ciphers are a real problem at EOF because there's no
- * reasonable place to put the "residue" if the file size is not a multiple
- * of the cipher block size. Therefore, we use cfb64 as a stream cipher,
- * but reset it at every FS-block boundary so that we can do reads and writes
- * in the middle of the file.
- */
-
-void
-good_crypt_buf (crypt_private_t *priv, char * buf, int len, int dir)
-{
- DES_cblock ivec;
- int num;
-
- while (len >= priv->block_size) {
- memset(&ivec,0,sizeof(ivec));
- num = 0;
- DES_cfb64_encrypt((const unsigned char *)buf,
- (unsigned char *)buf, priv->block_size,
- &priv->sched,&ivec,&num,dir);
- buf += priv->block_size;
- len -= priv->block_size;
- }
-
- if (len > 0) {
- memset(&ivec,0,sizeof(ivec));
- num = 0;
- DES_cfb64_encrypt((const unsigned char *)buf,
- (unsigned char *)buf,len,&priv->sched,&ivec,&num,dir);
+ /*
+ * Do this carefully to avoid actually hitting the overflow. We know
+ * that iv_ints[3] cannot be zero because then n could not have been
+ * greater than the remainder and we wouldn't be here.
+ */
+ iv_ints[3] = n - (UINT_MAX - iv_ints[3] + 1);
+
+ /* Propagate the carry bit. */
+ i = 2;
+ for (;;) {
+ if (iv_ints[i] != UINT_MAX) {
+ /* Carry bit stops here. */
+ ++iv_ints[i];
+ break;
+ }
+ iv_ints[i] = 0;
+ if (i == 0) {
+ /* Total overflow; wraparound is OK. */
+ break;
+ }
+ /* Propagate (at least) one further. */
+ --i;
}
}
void
-good_encrypt (crypt_private_t *priv, char * buf, int len)
-{
- good_crypt_buf(priv,buf,len,1);
-}
-
-void
-good_decrypt (crypt_private_t *priv, char * buf, int len)
+encrypt_chunk (AES_KEY *key, unsigned char *input, unsigned char *output,
+ unsigned char *gfid, off_t file_offset, size_t length)
{
- good_crypt_buf(priv,buf,len,0);
-}
-
-void
-good_crypt_iov (crypt_private_t *priv, struct iovec *vector, int count, int dir)
-{
- DES_cblock ivec;
- int num;
- int v;
- char * buf;
- int b;
- int bytes;
- int b_resid = priv->block_size;
-
- for (v = 0; v < count; ++v) {
- buf = (char *)(vector[v].iov_base);
- for (b = 0; b < vector[v].iov_len; b += bytes) {
- bytes = vector[v].iov_len - b;
- if (bytes > b_resid) {
- bytes = b_resid;
- }
- if (b_resid == priv->block_size) {
- memset(&ivec,0,sizeof(ivec));
- num = 0;
+ size_t length_now; /* length within data block */
+ unsigned char iv_input[AES_BLOCK_SIZE];
+ unsigned char actual_iv[AES_BLOCK_SIZE];
+ unsigned char keystream[AES_BLOCK_SIZE];
+ unsigned char ib_offset; /* intra-block offset */
+
+ while (length > 0) {
+ length_now = BLOCK_SIZE - (file_offset % BLOCK_SIZE);
+ if (length_now > length) {
+ length_now = length;
+ }
+ /* Generate a hard-to-predict IV for the block, using a method
+ * similar to ESSIV but without the redundant hash step. Note
+ * that the IV will still be constant for the same key, GFID,
+ * and block number. This does make us susceptible to a
+ * known-plaintext attack; anyone who can see both plaintext
+ * and ciphertext for a block can derive that section of the
+ * keystream even without having the key, and can then use that
+ * to read any plaintext in that same block thereafter. So
+ * don't let anyone see both. Really. If you send plaintext
+ * to the same people who receive the ciphertext (i.e. the
+ * people who run your storage servers) then you'll need to
+ * change either the key or the GFID to regain confidentiality.
+ * This is simple if you just do copy/rename, but involves
+ * a lot of data movement and calculation.
+ *
+ * The main protection this provides, vs. using just a "naked"
+ * GFID and block number as the IV, is to protect against
+ * recycling of a GFID. Whenever a GFID is recycled, a new
+ * key can be generated and stored separately from the file,
+ * with a key ID/fingerprint attached to the file as an xattr.
+ * Thus, the new file will have a completely different IV
+ * sequence and thus a different keystream than any previous
+ * file that had the same GFID. This makes the copy/rename
+ * mentioned above more effective as a way to regain
+ * confidentiality for a compromised file.
+ */
+ memset(iv_input,0,sizeof(iv_input));
+ memcpy(iv_input,gfid,GFID_SIZE);
+ *((off_t *)iv_input) = file_offset / BLOCK_SIZE;
+ AES_encrypt(iv_input,actual_iv,key);
+ increment_iv(actual_iv,(file_offset%BLOCK_SIZE)/AES_BLOCK_SIZE);
+ /*
+ * Now for the actual encryption. This is very similar to
+ * AES_ctr128_encrypt, but adjusted to avoid API inefficiency
+ * and to use iv_increment for consistency with the above.
+ */
+ AES_encrypt(actual_iv,keystream,key);
+ DPRINTF("keystream for %u:%u starts with %02x %02x %02x\n",
+ file_offset / BLOCK_SIZE,
+ (file_offset % BLOCK_SIZE) / AES_BLOCK_SIZE,
+ keystream[0], keystream[1], keystream[2]);
+ ib_offset = file_offset % AES_BLOCK_SIZE;
+ for (;;) {
+ *(output++) = *(input++) ^ keystream[ib_offset];
+ ++file_offset;
+ --length;
+ if (--length_now == 0) {
+ break;
}
- DES_cfb64_encrypt((const unsigned char *)buf,
- (unsigned char *)buf,
- bytes,&priv->sched,&ivec,&num,dir);
- buf += bytes;
- b_resid = (b_resid + bytes) % priv->block_size;
- if (!b_resid) {
- b_resid = priv->block_size;
+ if (++ib_offset == AES_BLOCK_SIZE) {
+ increment_iv(actual_iv,1);
+ AES_encrypt(actual_iv,keystream,key);
+ ib_offset = 0;
}
}
}
}
-void
-good_encrypt_iovec (crypt_private_t *priv, struct iovec *vector, int count)
-{
- good_crypt_iov(priv,vector,count,1);
-}
-
-void
-good_decrypt_iovec (crypt_private_t *priv, struct iovec *vector, int count)
-{
- good_crypt_iov(priv,vector,count,0);
-}
int32_t
crypt_readv_cbk (call_frame_t *frame,
@@ -187,132 +161,17 @@ crypt_readv_cbk (call_frame_t *frame,
{
crypt_rlocal_t *local = frame->local;
crypt_private_t *priv = this->private;
- struct iovec *tmp_vec = NULL;
- int32_t tmp_count = 0;
- int32_t in_vec = 0;
- size_t in_byte = 0;
- int32_t out_block = 0;
- size_t out_byte = 0;
- char **new_blocks = NULL;
- uint32_t nb_count = 0;
- char *the_block = NULL;
- size_t resid = 0;
- int unwound = 0;
-
- if (op_ret < 0) {
- goto err;
- }
-
- if ((count == 0) || (vector[0].iov_len == 0)) {
- /* Not really an error, but we can save time at EOF. */
- goto err;
- }
-
- tmp_count = local->my_size / priv->block_size;
- tmp_vec = CALLOC(tmp_count,sizeof(*tmp_vec));
- if (!tmp_vec) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto err;
- }
-
- /*
- * For now, assume that we won't get a single contiguous region split
- * across two (or more) iovs. If it turns out that such things do
- * happen, add a step here to coalesce.
- */
-
- for (out_block = 0; out_block < tmp_count; ++out_block) {
- if (in_vec >= count) {
- break;
- }
- if ((in_byte + priv->block_size) <= vector[in_vec].iov_len) {
- /* We can just use this block in place. */
- tmp_vec[out_block].iov_base
- = ((char *)(vector[in_vec].iov_base)) + in_byte;
- tmp_vec[out_block].iov_len = priv->block_size;
- in_byte += priv->block_size;
- if (in_byte == vector[in_vec].iov_len) {
- ++in_vec;
- in_byte = 0;
- }
- out_byte = priv->block_size;
- }
- else {
- /* We have to allocate a new block and copy. */
- if (!new_blocks) {
- new_blocks = CALLOC(tmp_count,sizeof(char *));
- if (!new_blocks) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto err;
- }
- }
- the_block = malloc(priv->block_size);
- if (!the_block) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto err_free;
- }
- new_blocks[nb_count++] = the_block;
- out_byte = vector[in_vec].iov_len - in_byte;
- memcpy(the_block,
- ((char *)(vector[in_vec].iov_base)) + in_byte,
- out_byte);
- ++in_vec;
- while (in_vec < count) {
- in_byte = 0;
- resid = priv->block_size - out_byte;
- if (resid > vector[in_vec].iov_len) {
- resid = vector[in_vec].iov_len;
- }
- memcpy(the_block+out_byte,
- vector[in_vec].iov_base, resid);
- in_byte += resid;
- if (in_byte == vector[in_vec].iov_len) {
- ++in_vec;
- in_byte = 0;
- }
- out_byte += resid;
- if (out_byte == priv->block_size) {
- break;
- }
- }
- tmp_vec[out_block].iov_base = the_block;
- tmp_vec[out_block].iov_len = priv->block_size;
- }
- }
-
- good_decrypt_iovec(priv,tmp_vec,out_block);
- tmp_vec[out_block-1].iov_len = out_byte;
-
- resid = local->orig_offset - local->my_offset;
- if (resid) {
- tmp_vec[0].iov_base = ((char *)(tmp_vec[0].iov_base)) + resid;
- if (resid < tmp_vec[0].iov_len) {
- tmp_vec[0].iov_len -= resid;
- }
- else {
- /* TBD: resid might actually span more than one iov */
- tmp_vec[0].iov_len = 0;
- }
- }
-
- STACK_UNWIND_STRICT (readv, frame, op_ret, op_errno, tmp_vec, out_block,
- stbuf, iobref);
- unwound = 1;
+ int32_t i = 0;
+ off_t cur_off = local->offset;
-err_free:
- free(tmp_vec);
- while (nb_count) {
- free(new_blocks[--nb_count]);
+ for (i = 0; i < count; ++i) {
+ encrypt_chunk(&priv->key,vector[i].iov_base,vector[i].iov_base,
+ local->gfid,cur_off,vector[i].iov_len);
+ cur_off += vector[i].iov_len;
}
-err:
- if (!unwound) {
- STACK_UNWIND_STRICT (readv, frame, op_ret, op_errno, vector,
- count, stbuf, iobref);
- }
+ STACK_UNWIND_STRICT(readv,frame,op_ret,op_errno,
+ vector,count,stbuf,iobref);
return 0;
}
@@ -329,8 +188,6 @@ crypt_readv (call_frame_t *frame,
off_t offset)
{
crypt_rlocal_t *local = NULL;
- size_t resid;
- crypt_private_t *priv = this->private;
uint32_t op_errno = EIO;
local = CALLOC(1,sizeof(*local));
@@ -340,24 +197,9 @@ crypt_readv (call_frame_t *frame,
}
frame->local = local;
- local->orig_size = size;
- local->orig_offset = offset;
+ local->offset = offset;
+ memcpy(local->gfid,fd->inode->gfid,sizeof(local->gfid));
- resid = offset % priv->block_size;
- if (resid) {
- offset -= resid;
- size += resid;
- }
-
- resid = (offset + size) % priv->block_size;
- if (resid) {
- size += (priv->block_size - resid);
- }
-
- local->my_size = size;
- local->my_offset = offset;
-
- gf_log(this->name,GF_LOG_DEBUG,"reading %lu at %ld",size,offset);
STACK_WIND (frame,
crypt_readv_cbk,
FIRST_CHILD (this),
@@ -370,47 +212,6 @@ err:
return 0;
}
-int32_t
-crypt_launch (call_frame_t *frame, xlator_t *this)
-{
- crypt_wlocal_t *local = frame->local;
- crypt_private_t *priv = this->private;
- int32_t op_errno = ENOMEM;
-
- local->call_count = (local->head_resid != 0) + (local->tail_resid != 0);
-
- /* Check for the only case which doesn't require a lock. */
- if (!local->call_count) {
- /* Head and tail were encrypted on the previous pass. */
- good_decrypt(priv,local->head_data,priv->block_size);
- good_encrypt(priv,local->head_data,priv->block_size);
- return crypt_rmw_done(frame,this);
- }
-
- local->xattr = get_new_dict();
- if (!local->xattr) {
- op_errno = ENOMEM;
- goto err;
- }
-
- if (dict_set_str(local->xattr,"trusted.glusterfs.lock","fubar") != 0) {
- op_errno = EIO;
- dict_unref(local->xattr);
- goto err;
- }
-
- local->op_ret = 0;
-
- STACK_WIND (frame, crypt_lock_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fsetxattr,
- local->fd, local->xattr, 0);
-
- return 0;
-
-err:
- STACK_UNWIND_STRICT(writev,frame,-1,op_errno,NULL,NULL);
- return 0;
-}
int32_t
crypt_writev_cbk (call_frame_t *frame,
@@ -421,275 +222,11 @@ crypt_writev_cbk (call_frame_t *frame,
struct iatt *prebuf,
struct iatt *postbuf)
{
- crypt_wlocal_t *local = frame->local;
-
- /*
- * This is where we might get an error indicating that somebody else
- * wrote to the file between our read and write. In that case, we
- * simply re-start at the lock call.
- */
-
- if ((op_ret < 0) && (op_errno == EBUSY)) {
- gf_log(this->name,GF_LOG_WARNING,"retrying conflicted write");
- local->is_retry = _gf_true;
- return crypt_launch(frame,this);
- }
-
- if (op_ret > local->orig_size) {
- op_ret = local->orig_size;
- }
- iobref_unref(local->iobref);
STACK_UNWIND_STRICT (writev, frame, op_ret, op_errno, prebuf, postbuf);
return 0;
}
-/*
- * This is where the real encryption work gets done. If there's a partial
- * block at the head, we should have read the whole block into head_data
- * (unless it's the same partial block as at the tail) so we copy our piece
- * on top of that and encrypt the whole. Then we encrypt any more whole
- * blocks. Lastly, if the end is unaligned (including if the head is too and
- * is in the same block) we should have read the whole into tail_data so we
- * copy our piece into that and encrypt.
- *
- * TBD: the encryption should be done with an IV based on the file ID and
- * block number, not a constant.
- */
-int32_t
-crypt_rmw_done (call_frame_t *frame, xlator_t *this)
-{
- crypt_wlocal_t *local = frame->local;
- crypt_private_t *priv = this->private;
- int32_t v_index = 0;
- size_t v_offset = 0;
- size_t b_offset = 0;
- int32_t niov = 0;
- size_t to_go = 0;
-
- if (local->op_ret < 0) {
- goto err;
- }
-
- /* Handle partial block at head. */
- b_offset = local->head_resid;
- while (b_offset && (v_index < local->count)) {
- to_go = local->vector[v_index].iov_len;
- if (to_go > (priv->block_size - b_offset)) {
- to_go = priv->block_size - b_offset;
- memcpy(&local->head_data[b_offset],
- local->vector[v_index].iov_base, to_go);
- v_offset = to_go;
- }
- else {
- memcpy(&local->head_data[b_offset],
- local->vector[v_index].iov_base, to_go);
- ++v_index;
- v_offset = 0;
- }
- b_offset = (b_offset + to_go) % priv->block_size;
- }
- if (local->head_resid) {
- good_encrypt(priv,local->head_data,priv->block_size);
- local->my_iov[niov].iov_base = local->head_data;
- local->my_iov[niov].iov_len = priv->block_size;
- ++niov;
- }
- /* Handle whole blocks in the middle. */
- while (v_index < local->count) {
- b_offset = 0;
- to_go = local->vector[v_index].iov_len - v_offset;
- while (to_go >= priv->block_size) {
- if (!local->is_retry) {
- good_encrypt(priv,
- (char *)(local->vector[v_index].iov_base)
- + v_offset + b_offset,
- priv->block_size);
- }
- b_offset += priv->block_size;
- to_go -= priv->block_size;
- }
- if (!b_offset) {
- break;
- }
- local->my_iov[niov].iov_base
- = (char *)(local->vector[v_index].iov_base) + v_offset;
- local->my_iov[niov].iov_len = b_offset;
- ++niov;
- if (to_go) {
- v_offset += b_offset;
- break;
- }
- ++v_index;
- v_offset = 0;
- }
- /*
- * Handle partial block at tail. This might be a partial block
- * following other whole or partial blocks that were handled above, or
- * it might be a partial block that's unaligned at both ends (i.e. all
- * within a single block). We detect this last case in crypt_writev
- * and clear local->head_resid, so if the current and "obvious" values
- * for that don't match then we know we're in that case and need to
- * fill at the unaligned offset. In all other cases, fill at zero.
- */
- if (v_index < local->count) {
- b_offset = local->orig_offset % priv->block_size;
- if (b_offset && local->head_resid) {
- b_offset = 0;
- }
- to_go = local->vector[v_index].iov_len - v_offset;
- memcpy(&local->tail_data[b_offset],
- (char *)(local->vector[v_index].iov_base) + v_offset,
- to_go);
- if (local->hit_eof) {
- b_offset += to_go;
- }
- else {
- b_offset = priv->block_size;
- }
- good_encrypt(priv,local->tail_data,b_offset);
- local->my_iov[niov].iov_base = local->tail_data;
- local->my_iov[niov].iov_len = b_offset;
- ++niov;
- if (++v_index != local->count) {
- gf_log(this->name,GF_LOG_ERROR,"used %d/%d iovs",
- v_index, local->count);
- goto err;
- }
- }
-
- /*
- * We always start our writes at block boundaries, so we subtract the
- * "residue" before passing the offset to the next translator. Since
- * local->head_resid might have been pounded in the middle-of-block
- * case, we just recalculate its "obvious" value here.
- */
- b_offset = local->orig_offset % priv->block_size;
- STACK_WIND (frame,
- crypt_writev_cbk,
- FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->writev,
- local->fd, local->my_iov, niov,
- local->orig_offset - b_offset, local->iobref);
-
- return 0;
-
-err:
- iobref_unref(local->iobref);
- STACK_UNWIND_STRICT(writev,frame,local->op_ret,local->op_errno,
- NULL,NULL);
- return 0;
-}
-
-int32_t
-crypt_head_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iovec *vector,
- int32_t count, struct iatt *stbuf, struct iobref *iobref)
-{
- crypt_wlocal_t *local = frame->local;
- uint32_t call_count = 0;
- int32_t v = 0;
- size_t offset = 0;
-
- LOCK(&frame->lock);
- {
- call_count = --(local->call_count);
- if (op_ret >= 0) {
- offset = 0;
- for (v = 0; v < count; ++v) {
- memcpy(local->head_data+offset,
- vector[v].iov_base, vector[v].iov_len);
- offset += vector[v].iov_len;
- }
- }
- else {
- local->op_ret = op_ret;
- local->op_errno = op_errno;
- }
- }
- UNLOCK(&frame->lock);
-
- if (call_count) {
- return 0;
- }
-
- return crypt_rmw_done(frame,this);
-}
-
-int32_t
-crypt_tail_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iovec *vector,
- int32_t count, struct iatt *stbuf, struct iobref *iobref)
-{
- crypt_wlocal_t *local = frame->local;
- crypt_private_t *priv = this->private;
- uint32_t call_count = 0;
- int32_t v = 0;
- size_t offset = 0;
-
- LOCK(&frame->lock);
- {
- call_count = --(local->call_count);
- if (op_ret >= 0) {
- offset = 0;
- for (v = 0; v < count; ++v) {
- memcpy(local->tail_data+offset,
- vector[v].iov_base, vector[v].iov_len);
- offset += vector[v].iov_len;
- }
- if (offset < priv->block_size) {
- local->hit_eof = 1;
- }
- }
- else {
- local->op_ret = op_ret;
- local->op_errno = op_errno;
- }
- }
- UNLOCK(&frame->lock);
-
- if (call_count) {
- return 0;
- }
-
- return crypt_rmw_done(frame,this);
-}
-
-/*
- * If we need any read/modify/write cycles at either head or tail, we come
- * here first before fanning out the reads. Pretty much everything should
- * have been set up for us in crypt_writev.
- */
-int32_t
-crypt_lock_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno)
-{
- crypt_wlocal_t *local = frame->local;
- crypt_private_t *priv = this->private;
- size_t my_offset;
-
- dict_unref(local->xattr);
-
- if (local->head_resid) {
- my_offset = local->orig_offset - local->head_resid;
- STACK_WIND(frame, crypt_head_cbk, this,
- this->fops->readv, local->fd,
- priv->block_size, my_offset);
- }
-
- if (local->tail_resid) {
- my_offset = local->orig_offset + local->orig_size
- - local->tail_resid;
- STACK_WIND(frame, crypt_tail_cbk, this,
- this->fops->readv, local->fd,
- priv->block_size, my_offset);
- }
-
- return 0;
-}
int32_t
crypt_writev (call_frame_t *frame,
@@ -700,126 +237,29 @@ crypt_writev (call_frame_t *frame,
off_t offset,
struct iobref *iobref)
{
- crypt_wlocal_t *local = NULL;
crypt_private_t *priv = this->private;
int32_t i = 0;
- size_t head_resid = 0;
- size_t tail_resid = 0;
- int32_t op_errno = ENOMEM;
-
- if (count != 1) {
- op_errno = EINVAL;
- goto err;
- }
-
- local = CALLOC(1,sizeof(*local));
- if (!local) {
- op_errno = ENOMEM;
- goto err;
- }
- frame->local = local;
+ off_t cur_off = offset;
- local->fd = fd;
- local->his_iov = vector[0];
- local->vector = &local->his_iov;
- local->count = count;
- local->iobref = iobref;
- iobref_ref(iobref);
-
- local->orig_size = 0;
for (i = 0; i < count; ++i) {
- local->orig_size += vector[i].iov_len;
- }
- local->orig_offset = offset;
-
- gf_log(this->name,GF_LOG_DEBUG,"WRITEV: size %lu, offset %ld",
- local->orig_size, local->orig_offset);
-
- head_resid = offset % priv->block_size;
- tail_resid = (offset + local->orig_size) % priv->block_size;
- /*
- * Even if we don't cross a block boundary, we still need to check
- * for EOF. Thus, prefer doing tail-only (which will handle that)
- * instead of head-only (which won't).
- */
- if (head_resid && tail_resid) {
- if ((head_resid + local->orig_size) <= priv->block_size) {
- head_resid = 0;
- }
- }
- local->head_resid = head_resid;
- local->tail_resid = tail_resid;
-
- local->is_retry = _gf_false;
- return crypt_launch(frame,this);
-
-err:
- STACK_UNWIND_STRICT(writev,frame,-1,op_errno,NULL,NULL);
- return 0;
-}
-
-int32_t
-crypt_open_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, fd_t *fd)
-{
- STACK_UNWIND_STRICT(open,frame,op_ret,op_errno,fd);
- return 0;
-}
-
-int32_t
-crypt_open (call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
- fd_t *fd, int32_t wbflags)
-{
- xlator_t *child;
-
- /*
- * We can't open O_WRONLY, because we need to do read-modify-write.
- */
- if ((flags & O_ACCMODE) == O_WRONLY) {
- flags = (flags & ~O_ACCMODE) | O_RDWR;
+ encrypt_chunk(&priv->key,vector[i].iov_base,vector[i].iov_base,
+ fd->inode->gfid,cur_off,vector[i].iov_len);
+ cur_off += vector[i].iov_len;
}
- child = FIRST_CHILD(this);
- STACK_WIND(frame,crypt_open_cbk,child,child->fops->open,
- loc,flags,fd,wbflags);
+ STACK_WIND (frame, crypt_writev_cbk,
+ FIRST_CHILD (this), FIRST_CHILD (this)->fops->writev,
+ fd, vector, count, offset, iobref);
return 0;
}
-int32_t
-crypt_create_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, fd_t *fd, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent)
-{
- STACK_UNWIND_STRICT(create,frame,op_ret,op_errno,fd,inode,buf,
- preparent,postparent);
- return 0;
-}
-
-int32_t
-crypt_create (call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
- mode_t mode, fd_t *fd, dict_t *params)
-{
- xlator_t *child;
-
- /*
- * We can't open O_WRONLY, because we need to do read-modify-write.
- */
- if ((flags & O_ACCMODE) == O_WRONLY) {
- flags = (flags & ~O_ACCMODE) | O_RDWR;
- }
-
- child = FIRST_CHILD(this);
- STACK_WIND(frame,crypt_create_cbk,child,child->fops->create,
- loc,flags,mode,fd,params);
- return 0;
-}
int32_t
init (xlator_t *this)
{
crypt_private_t *priv = NULL;
data_t *data = NULL;
+ unsigned char ckey[16];
if (!this->children || this->children->next) {
gf_log ("crypt", GF_LOG_ERROR,
@@ -846,12 +286,9 @@ init (xlator_t *this)
*/
data = dict_get(this->options,"key");
if (data) {
- strncpy((char *)&priv->key,data->data,sizeof(priv->key));
- DES_set_odd_parity(&priv->key);
- if (DES_set_key_checked(&priv->key,&priv->sched) < 0) {
- gf_log(this->name,GF_LOG_ERROR,"DES_set_key failed");
- return EIO;
- }
+ memset(ckey,0,sizeof(ckey));
+ strncpy((char *)ckey,data->data,sizeof(ckey));
+ AES_set_encrypt_key(ckey,sizeof(ckey)*8,&priv->key);
}
else {
gf_log(this->name,GF_LOG_ERROR,"key missing");
@@ -875,8 +312,6 @@ fini (xlator_t *this)
struct xlator_fops fops = {
.readv = crypt_readv,
.writev = crypt_writev,
- .open = crypt_open,
- .create = crypt_create
};
struct xlator_cbks cbks = {
diff --git a/xlators/encryption/crypt/src/crypt.h b/xlators/encryption/crypt/src/crypt.h
index fde242b..0c7e5ea 100644
--- a/xlators/encryption/crypt/src/crypt.h
+++ b/xlators/encryption/crypt/src/crypt.h
@@ -26,42 +26,28 @@
#include "config.h"
#endif
-#include <openssl/des.h>
+#include <limits.h>
+#include <openssl/aes.h>
+
+#define GFID_SIZE 16
+#define BLOCK_SIZE 1024
+
+#define DPRINTF(fmt,args...) do { \
+ gf_log(__func__,GF_LOG_DEBUG,fmt,##args); \
+} while (0);
typedef struct {
- uint32_t block_size;
- DES_cblock key;
- DES_key_schedule sched;
+ uint32_t block_size;
+ AES_KEY key;
} crypt_private_t;
typedef struct {
- /* For reads. */
- size_t orig_size;
- off_t orig_offset;
- size_t my_size;
- off_t my_offset;
+ off_t offset;
+ uuid_t gfid;
} crypt_rlocal_t;
typedef struct {
- /* For writes. */
- size_t orig_size;
- off_t orig_offset;
- fd_t *fd;
- struct iovec *vector;
- int32_t count;
- struct iobref *iobref;
- uint32_t call_count;
- int32_t op_ret;
- int32_t op_errno;
- struct iovec my_iov[3];
- struct iovec his_iov;
- char head_data[4096]; /* TBD: should be variable size. */
- char tail_data[4096]; /* TBD: should be variable size. */
- int hit_eof;
- size_t head_resid;
- size_t tail_resid;
- dict_t *xattr;
- gf_boolean_t is_retry;
+ int not_needed;
} crypt_wlocal_t;
#endif /* __CRYPT_H__ */
12 years, 4 months
Branch 'cloudfsd' - 4 commits - scripts/cfs_add_directory.py scripts/cfs_add_node.py scripts/cfs_add_tenant.py scripts/cfs_add_volume.py scripts/cfs_delete_tenant.py scripts/cfs_enable_tenant.py scripts/cfs_list_tenants.py scripts/cfs_list_vols.py scripts/cfs_paths.py scripts/cfs_rm_volume.py scripts/cfs_start_volume.py scripts/cfs_stop_volume.py scripts/cfs_utils.py scripts/cloudfsd.py scripts/views ToDo
by Jeff Darcy
ToDo | 4 --
scripts/cfs_add_directory.py | 18 +++++++++----
scripts/cfs_add_node.py | 7 ++++-
scripts/cfs_add_tenant.py | 15 ++++++++---
scripts/cfs_add_volume.py | 14 ++++++----
scripts/cfs_delete_tenant.py | 15 ++++++++---
scripts/cfs_enable_tenant.py | 17 +++++++++---
scripts/cfs_list_tenants.py | 14 ++++++++++
scripts/cfs_list_vols.py | 16 +++++++++++
scripts/cfs_paths.py | 6 ++--
scripts/cfs_rm_volume.py | 55 +++++++++++++++++++++++++++++++++++++++++
scripts/cfs_start_volume.py | 30 +++++++++++++++++-----
scripts/cfs_stop_volume.py | 14 +++++++++-
scripts/cfs_utils.py | 9 +++++-
scripts/cloudfsd.py | 9 ++++++
scripts/views/rm_vol_done.html | 15 +++++++++++
scripts/views/volumes.html | 3 +-
17 files changed, 223 insertions(+), 38 deletions(-)
New commits:
commit 04d9caaab692c3ca819e1b3038099c81eee058a5
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 15:42:11 2011 -0400
Added CLI programs to list volumes/tenants.
diff --git a/ToDo b/ToDo
index 696245b..2fbbcb9 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,5 @@
= High Priority =
-Add CLI equivalents to list nodes/volumes/tenants
+(nothing left)
= Medium Priority =
SSL
diff --git a/scripts/cfs_list_tenants.py b/scripts/cfs_list_tenants.py
new file mode 100755
index 0000000..3446418
--- /dev/null
+++ b/scripts/cfs_list_tenants.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+import cfs_utils
+
+db_obj = cfs_utils.open_db()
+
+for tenant in [t for t in db_obj.keys() if t.startswith("tv_")]:
+ print "%s:" % t[3:]
+ vol_list = db_obj[tenant]
+ if vol_list == "":
+ print " (no volumes enabled)"
+ else:
+ for vol in vol_list.split(","):
+ print " %s enabled" % vol
diff --git a/scripts/cfs_list_vols.py b/scripts/cfs_list_vols.py
new file mode 100755
index 0000000..f26f3b8
--- /dev/null
+++ b/scripts/cfs_list_vols.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+import cfs_utils
+
+brick_list = cfs_utils.get_bricks()
+vol_list = brick_list.keys()[:]
+vol_list.sort()
+db_obj = cfs_utils.open_db()
+
+for vol in vol_list:
+ if db_obj.has_key("vt_"+vol):
+ print "Volume %s (CloudFS):" % vol
+ else:
+ print "Volume %s (GlusterFS):" % vol
+ for brick in brick_list[vol]:
+ print " %s" % brick
commit 6a1ba723db3dee6496350f5a9e6f59741645ee6b
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 15:26:18 2011 -0400
Fixed CLI versions of most functions.
diff --git a/ToDo b/ToDo
index fec4353..696245b 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,5 @@
= High Priority =
-Make sure CLI equivalents work
+Add CLI equivalents to list nodes/volumes/tenants
= Medium Priority =
SSL
diff --git a/scripts/cfs_add_directory.py b/scripts/cfs_add_directory.py
old mode 100644
new mode 100755
index 508f501..1c9f0b9
--- a/scripts/cfs_add_directory.py
+++ b/scripts/cfs_add_directory.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -78,12 +79,16 @@ def add_local (path):
xp_list = []
paths_by_node[k[3:]] = set(xp_list)
# Add the user's paths.
+ members = cfs_utils.get_members()
for p in expand(path):
parts = string.split(p,":")
if len(parts) != 2:
- return "add_local(%s) rejected %s on %s" %(
+ return "add_local(%s) rejected %s on %s (no node)" %(
path, p, socket.gethostname())
node, dir = parts
+ if node not in members:
+ return "add_local(%s) rejected %s on %s (bad node)" %(
+ path, p, socket.gethostname())
if paths_by_node.has_key(node):
paths_by_node[node].add(dir)
else:
@@ -108,13 +113,16 @@ def run_common (path):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("add_dir_done.html",path=path,blob=blob);
+ return blob
def run_www ():
path = request.forms.get("path")
- return run_common(path)
+ blob = run_common(path)
+ return template("add_dir_done.html",path=path,blob=blob);
if __name__ == "__main__":
path = sys.argv[1]
- #run_common(tn_name,tn_pw)
- print add_local(path)
+ blob = run_common(path)
+ print "Directory %s added." % path
+ cfs_utils.print_blob(blob)
+
diff --git a/scripts/cfs_add_node.py b/scripts/cfs_add_node.py
old mode 100644
new mode 100755
index 497db36..b9bb7c4
--- a/scripts/cfs_add_node.py
+++ b/scripts/cfs_add_node.py
@@ -50,4 +50,9 @@ def run_www ():
return run_common(node_name)
if __name__ == "__main__":
- run_common(sys.argv[1])
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s node_name_or_addr" % sys.argv[0]
+ sys.exit(1)
+ name = sys.argv[1]
+ run_common(name)
+ print "Node %s added." % name
diff --git a/scripts/cfs_add_tenant.py b/scripts/cfs_add_tenant.py
old mode 100644
new mode 100755
index d36837e..d460367
--- a/scripts/cfs_add_tenant.py
+++ b/scripts/cfs_add_tenant.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -34,15 +35,21 @@ def run_common (tn_name, tn_pw):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("tn_act_done.html",name=tn_name,action="added",
- blob=blob);
+ return blob
def run_www ():
tn_name = request.forms.get("tn_name")
tn_pw = request.forms.get("tn_pw")
- return run_common(tn_name,tn_pw)
+ blob = run_common(tn_name,tn_pw)
+ return template("tn_act_done.html",name=tn_name,action="added",
+ blob=blob);
if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ print >> sys.stderr, "Usage: %s name password" % sys.argv[0]
+ sys.exit(1)
tn_name = sys.argv[1]
tn_pw = sys.argv[2]
- run_common(tn_name,tn_pw)
+ blob = run_common(tn_name,tn_pw)
+ print "Tenant %s added." % tn_name
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_add_volume.py b/scripts/cfs_add_volume.py
old mode 100644
new mode 100755
index 4af663b..32bdc60
--- a/scripts/cfs_add_volume.py
+++ b/scripts/cfs_add_volume.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -23,10 +24,10 @@ def run_common (vname, vtype, vcount, bricks):
if vtype != "plain":
cmd = "%s %s %s" % (cmd, vtype, vcount)
cmd = "%s %s" % (cmd, string.join(bricks))
+ print cmd
sts = cfs_utils.run_cmd("gluster",cmd).wait()
if sts:
- return template("add_vol_fail.html", name=vname,
- action="gluster", status=sts)
+ return [["gluster",["command failed with %d"%sts]]]
blob = []
for node in cfs_utils.get_nodes_for_vol(vname):
scratch = [node,[]]
@@ -40,7 +41,7 @@ def run_common (vname, vtype, vcount, bricks):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("add_vol_done.html",name=vname,blob=blob);
+ return blob
def run_www ():
volume_id = request.forms.get("VOLUMEID")
@@ -50,12 +51,15 @@ def run_www ():
for prop in request.forms.iterkeys():
if prop.startswith("VOLUME_"):
brick_list.append(prop[7:])
- return run_common(volume_id,volume_type,replica_or_stripe_count,
+ blob = run_common(volume_id,volume_type,replica_or_stripe_count,
brick_list)
+ return template("add_vol_done.html",name=volume_id,blob=blob);
if __name__ == "__main__":
volume_id = sys.argv[1]
volume_type = sys.argv[2]
replica_or_stripe_count = sys.argv[3]
brick_list = sys.argv[4:]
- run_common(volume_id,volume_type,replica_or_stripe_count,brick_list)
+ blob = run_common(volume_id,volume_type,replica_or_stripe_count,brick_list)
+ print "Volume %s added." % volume_id
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_delete_tenant.py b/scripts/cfs_delete_tenant.py
old mode 100644
new mode 100755
index ebbd35d..92c11d7
--- a/scripts/cfs_delete_tenant.py
+++ b/scripts/cfs_delete_tenant.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -31,12 +32,18 @@ def run_common (tn_name):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("tn_act_done.html",name=tn_name,action="deleted",
- blob=blob);
+ return blob
def run_www (tn_name):
- return run_common(tn_name)
+ blob = run_common(tn_name)
+ return template("tn_act_done.html",name=tn_name,action="deleted",
+ blob=blob);
if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s name" % sys.argv[0]
+ sys.exit(1)
tn_name = sys.argv[1]
- run_common(tn_name)
+ blob = run_common(tn_name)
+ print "Tenant %s deleted." % tn_name
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_enable_tenant.py b/scripts/cfs_enable_tenant.py
old mode 100644
new mode 100755
index 3855546..146c785
--- a/scripts/cfs_enable_tenant.py
+++ b/scripts/cfs_enable_tenant.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -48,8 +49,7 @@ def run_common (tn_name, vol_list):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
- return template("tn_act_done.html",name=tn_name,action="enabled",
- blob=blob)
+ return blob
def run_www (tn_name):
vol_list = []
@@ -57,8 +57,17 @@ def run_www (tn_name):
print prop
if prop.startswith("VOLUME_"):
vol_list.append(prop[7:])
- return run_common(tn_name,vol_list)
+ blob = run_common(tn_name,vol_list)
+ return template("tn_act_done.html",name=tn_name,action="enabled",
+ blob=blob)
if __name__ == "__main__":
- run_common(sys.argv[1],sys.argv[2:])
+ if len(sys.argv) < 2:
+ print >> sys.stderr, "Usage: %s name [volume...]" % sys.argv[0]
+ sys.exit(1)
+ name = sys.argv[1]
+ vols = sys.argv[2:]
+ blob = run_common(name,vols)
+ print "Volumes %s enabled for %s." % (string.join(vols,","), name)
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_paths.py b/scripts/cfs_paths.py
index c497fe7..bf47ebd 100644
--- a/scripts/cfs_paths.py
+++ b/scripts/cfs_paths.py
@@ -8,6 +8,6 @@ log_dir = "/var/log/cloudfs"
pid_dir = "/var/run/cloudfs"
idle_subdir = os.path.join(pid_dir,".idle_ports")
used_subdir = os.path.join(pid_dir,".used_ports")
-volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
+volfile_re = re.compile("(?P<vol>[^.]+)\.(?P<node>.+)\.(?P<path>[^.]+)\.vol")
CLOUDFSD_PORT = 8080
diff --git a/scripts/cfs_rm_volume.py b/scripts/cfs_rm_volume.py
old mode 100644
new mode 100755
index c871f65..bfab9b2
--- a/scripts/cfs_rm_volume.py
+++ b/scripts/cfs_rm_volume.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import os
import re
@@ -17,7 +18,7 @@ def rm_local (vname):
db_obj["vt_"+vname] = ""
return "rm_local(%s) OK on %s" % (vname, socket.gethostname())
-def run_www (vname):
+def run_common (vname):
# TBD: all sorts of input-validity checking
nodes_for_vol = cfs_utils.get_nodes_for_vol(vname)
cmd = "volume delete %s" % vname
@@ -37,9 +38,18 @@ def run_www (vname):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
+ return blob
+
+def run_www (vname):
+ blob = run_common(vname)
return template("rm_vol_done.html",name=vname,blob=blob);
if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s volume_name" % sys.argv[0]
+ sys.exit(0)
name = sys.argv[1]
- run_www(name)
+ blob = run_common(name)
+ print "Volume %s removed." % name
+ cfs_utils.print_blob(blob)
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
old mode 100644
new mode 100755
index 031735a..11c992b
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import glob
import os
@@ -54,7 +55,7 @@ def scan_gfs_volfiles(vol_name):
for vf in glob.iglob(my_glob):
m = cfs_paths.volfile_re.match(os.path.basename(vf))
if m:
- this_host = m.groups(1)[0]
+ this_host = m.group("node")
this_addr = socket.getaddrinfo(this_host, 0)[0][4][0]
for addr in my_addrs:
if this_addr == addr[4][0]:
@@ -167,7 +168,7 @@ def start_local (vol_name):
return "start_local(%s) returned %d on %s\n" % (
vol_name, retcode, socket.gethostname())
-def run_www (vol_name):
+def run_common (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
for node in node_list:
@@ -181,7 +182,18 @@ def run_www (vol_name):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
+ return blob
+
+def run_www (vol_name):
+ blob = run_common(vol_name)
return template("start_done.html",name=vol_name,blob=blob)
if __name__ == "__main__":
- run_www(sys.argv[1])
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s volume_name" % sys.argv[0]
+ sys.exit(1)
+ name = sys.argv[1]
+ blob = run_common(name)
+ print "Volume %s started." % name
+ cfs_utils.print_blob(blob)
+
diff --git a/scripts/cfs_stop_volume.py b/scripts/cfs_stop_volume.py
old mode 100644
new mode 100755
index 0851d6b..b93b438
--- a/scripts/cfs_stop_volume.py
+++ b/scripts/cfs_stop_volume.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
import fileinput
import glob
@@ -42,7 +43,7 @@ def stop_local (vol_name):
vol_name, retcode, socket.gethostname())
-def run_www (vol_name):
+def run_common (vol_name):
node_list = cfs_utils.get_nodes_for_vol(vol_name)
blob = []
for node in node_list:
@@ -56,7 +57,18 @@ def run_www (vol_name):
for line in url_obj:
scratch[1].append(line)
blob.append(scratch)
+ return blob
+
+def run_www (vol_name):
+ blob = run_common(vol_name)
return template("stop_done.html",name=vol_name,blob=blob)
if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print >> sys.stderr, "Usage: %s volume_name" % sys.argv[0]
+ sys.exit(1)
+ name = sys.argv[1]
+ blob = run_common(name)
+ print "Volume %s stopped." % name
+ cfs_utils.print_blob(blob)
run_www(sys.argv[1])
diff --git a/scripts/cfs_utils.py b/scripts/cfs_utils.py
index 9655b58..39a36e0 100644
--- a/scripts/cfs_utils.py
+++ b/scripts/cfs_utils.py
@@ -151,7 +151,7 @@ def get_nodes_for_vol (vol_name):
for vf in glob.iglob(my_glob):
m = cfs_paths.volfile_re.match(os.path.basename(vf))
if m:
- node_list.add(m.groups(1)[0])
+ node_list.add(m.group("node"))
return node_list
# Open our configuration database.
@@ -161,3 +161,10 @@ def get_nodes_for_vol (vol_name):
def open_db ():
db_path = os.path.join(cfs_paths.info_dir,"config.db")
return dbm.open(db_path,"c",0600)
+
+# Print a "blob" of [node, [line1, line2]] tuples/lists for CLI/debugging.
+def print_blob (blob):
+ for node, text in blob:
+ print "= %s =" % node
+ for line in text:
+ print line
commit dae24a26c655bdb57a38f95f72d7b92ccdb88b6d
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 14:12:51 2011 -0400
Interface to remove volumes.
diff --git a/ToDo b/ToDo
index 24fa991..fec4353 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,4 @@
= High Priority =
-Interface to remove volumes
Make sure CLI equivalents work
= Medium Priority =
diff --git a/scripts/cfs_rm_volume.py b/scripts/cfs_rm_volume.py
new file mode 100644
index 0000000..c871f65
--- /dev/null
+++ b/scripts/cfs_rm_volume.py
@@ -0,0 +1,45 @@
+
+import os
+import re
+import socket
+import string
+import sys
+import urllib
+import urllib2
+
+from bottle import request, template
+import cfs_paths
+import cfs_utils
+
+def rm_local (vname):
+ db_obj = cfs_utils.open_db()
+ # Can't actually delete, but clearing is functionally equivalent.
+ db_obj["vt_"+vname] = ""
+ return "rm_local(%s) OK on %s" % (vname, socket.gethostname())
+
+def run_www (vname):
+ # TBD: all sorts of input-validity checking
+ nodes_for_vol = cfs_utils.get_nodes_for_vol(vname)
+ cmd = "volume delete %s" % vname
+ kid = cfs_utils.run_cmd("gluster",cmd)
+ blob = [["gluster",kid.communicate("y\n")]]
+ sts = kid.wait()
+ if sts:
+ return template("rm_vol_done.html", name=vname, blob=blob)
+ for node in nodes_for_vol:
+ scratch = [node,[]]
+ if socket.gethostbyname(node) in cfs_utils.local_addrs:
+ url_obj = [rm_local(vname)]
+ else:
+ url = "http://%s:%d/volumes/%s/rm_local" % (
+ node, cfs_paths.CLOUDFSD_PORT, vname)
+ url_obj = urllib2.urlopen(url)
+ for line in url_obj:
+ scratch[1].append(line)
+ blob.append(scratch)
+ return template("rm_vol_done.html",name=vname,blob=blob);
+
+if __name__ == "__main__":
+ name = sys.argv[1]
+ run_www(name)
+
diff --git a/scripts/cloudfsd.py b/scripts/cloudfsd.py
index 2cc7259..8255a44 100755
--- a/scripts/cloudfsd.py
+++ b/scripts/cloudfsd.py
@@ -13,6 +13,7 @@ import cfs_utils
import cfs_add_node
import cfs_add_directory
import cfs_add_volume
+import cfs_rm_volume
import cfs_start_volume
import cfs_stop_volume
import cfs_add_tenant
@@ -68,6 +69,14 @@ def add_vol_local ():
vname = request.forms.get("vname")
return cfs_add_volume.add_local(vname)
+@route("volumes/:name/remove")
+def rm_volume (name):
+ return cfs_rm_volume.run_www(name)
+
+@route("/volumes/:name/rm_local")
+def rm_vol_local (name):
+ return cfs_rm_volume.rm_local(name)
+
@route("/volumes/:vol_name/start")
def start_volume(vol_name):
return cfs_start_volume.run_www(vol_name)
diff --git a/scripts/views/rm_vol_done.html b/scripts/views/rm_vol_done.html
new file mode 100644
index 0000000..cede10d
--- /dev/null
+++ b/scripts/views/rm_vol_done.html
@@ -0,0 +1,15 @@
+<html><head>
+<meta http-equiv="pragma" content="no-cache">
+</head><body>
+%for node, output in blob:
+ <p><b>{{node}}</b></p>
+ <pre>
+ %for line in output:
+ {{line}}
+ %end
+ </pre>
+%end
+<p>Volume {{name}} deleted.</p>
+<p><a href="/volumes">Back to volume configuration</a></p>
+<p><a href="/cfgmain">Back to main menu</a></p>
+</body></html>
diff --git a/scripts/views/volumes.html b/scripts/views/volumes.html
index 7595052..cc54f92 100644
--- a/scripts/views/volumes.html
+++ b/scripts/views/volumes.html
@@ -14,7 +14,8 @@
<p><b>{{vol_name}}</b>
<a href="/volumes/{{vol_name}}/tenants">tenants</a>
<a href="/volumes/{{vol_name}}/start">start</a>
- <a href="/volumes/{{vol_name}}/stop">stop</a></p>
+ <a href="/volumes/{{vol_name}}/stop">stop</a>
+ <a href="/volumes/{{vol_name}}/remove">remove</a></p>
<ul>
%for brick in brick_list:
<li>{{brick}}</li>
commit 81fc70f17bddfa72c77487351927aae382075a6c
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu May 12 13:35:26 2011 -0400
Move log/pid/port directories.
diff --git a/ToDo b/ToDo
index db4cf21..24fa991 100644
--- a/ToDo
+++ b/ToDo
@@ -1,5 +1,4 @@
= High Priority =
-Create log/pid directories, move port links to /var/run
Interface to remove volumes
Make sure CLI equivalents work
diff --git a/scripts/cfs_paths.py b/scripts/cfs_paths.py
index 27e11b0..c497fe7 100644
--- a/scripts/cfs_paths.py
+++ b/scripts/cfs_paths.py
@@ -4,10 +4,10 @@ import os
gfs_dir = "/var/lib/glusterd"
info_dir = "/var/lib/cloudfs"
-idle_subdir = os.path.join(info_dir,".idle_ports")
-used_subdir = os.path.join(info_dir,".used_ports")
log_dir = "/var/log/cloudfs"
pid_dir = "/var/run/cloudfs"
+idle_subdir = os.path.join(pid_dir,".idle_ports")
+used_subdir = os.path.join(pid_dir,".used_ports")
volfile_re = re.compile("[^.]+\.(.*)\.bricks-")
CLOUDFSD_PORT = 8080
diff --git a/scripts/cfs_start_volume.py b/scripts/cfs_start_volume.py
index c0b8ce7..031735a 100644
--- a/scripts/cfs_start_volume.py
+++ b/scripts/cfs_start_volume.py
@@ -17,12 +17,18 @@ import cfs_utils
# Make sure the volume directory exists and has the right stuff in it.
def check_volume_directory(vol_name):
if not os.path.exists(cfs_paths.info_dir):
- os.mkdir(cfs_paths.info_dir)
- os.mkdir(cfs_paths.idle_subdir)
+ os.mkdir(cfs_paths.info_dir,0700)
+ if not os.path.exists(cfs_paths.log_dir):
+ os.mkdir(cfs_paths.log_dir,0700)
+ if not os.path.exists(cfs_paths.pid_dir):
+ os.mkdir(cfs_paths.pid_dir,0700)
+ if not os.path.exists(cfs_paths.idle_subdir):
+ os.mkdir(cfs_paths.idle_subdir,0700)
for i in range(24010, 24030):
fp = open("%s/%d" % (cfs_paths.idle_subdir, i), "w")
fp.close()
- os.mkdir(cfs_paths.used_subdir)
+ if not os.path.exists(cfs_paths.used_subdir):
+ os.mkdir(cfs_paths.used_subdir,0700)
vol_dir = "%s/%s" % (cfs_paths.info_dir, vol_name)
if not os.path.exists(vol_dir):
12 years, 4 months