fence-agents: master - overall cleanup and make ready for release

Fabio M. Di Nitto fabbione at fedoraproject.org
Fri Nov 5 09:44:00 UTC 2010


Gitweb:        http://git.fedorahosted.org/git/fence-agents.git?p=fence-agents.git;a=commitdiff;h=79336e5ef88fc33f40306466e48fafca9532d452
Commit:        79336e5ef88fc33f40306466e48fafca9532d452
Parent:        c3491b707166d877037d0f65ce7117396398d6af
Author:        Fabio M. Di Nitto <fdinitto at redhat.com>
AuthorDate:    Fri Nov 5 05:43:37 2010 -0400
Committer:     Fabio M. Di Nitto <fdinitto at redhat.com>
CommitterDate: Fri Nov 5 05:43:37 2010 -0400

overall cleanup and make ready for release

Signed-off-by: Fabio M. Di Nitto <fdinitto at redhat.com>
---
 .gitignore                             |    2 +
 Makefile.am                            |    5 +-
 configure.ac                           |   32 +------
 doc/Makefile.am                        |    6 +-
 doc/gfs2.txt                           |   45 --------
 doc/journaling.txt                     |  155 ----------------------------
 doc/min-gfs.txt                        |  159 ----------------------------
 doc/usage.txt                          |  177 --------------------------------
 fence/agents/node_assassin/Makefile.am |    2 +-
 make/fenceman.mk                       |    2 +-
 10 files changed, 9 insertions(+), 576 deletions(-)

diff --git a/.gitignore b/.gitignore
index d5dadff..cd80ece 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,8 +5,10 @@ autoconf
 autoheader
 autom4te.cache
 automake
+autoscan.log
 compile
 configure
+configure.scan
 config.guess
 config.log
 config.sub
diff --git a/Makefile.am b/Makefile.am
index 583ff0e..7fb4d60 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -6,7 +6,8 @@ MAINTAINERCLEANFILES	= Makefile.in aclocal.m4 configure depcomp \
 			  config.guess config.sub missing install-sh \
 			  autoheader automake autoconf libtool libtoolize \
 			  ltmain.sh compile make/clusterautoconfig.h.in \
-			  make/clusterautoconfig.h.in~
+			  make/clusterautoconfig.h.in~ autoscan.log \
+			  configure.scan
 
 noinst_HEADERS		= make/copyright.cf
 
@@ -17,12 +18,10 @@ SUBDIRS			= fence doc
 install-exec-local:
 			$(INSTALL) -d $(DESTDIR)/$(LOGDIR)
 			$(INSTALL) -d $(DESTDIR)/$(CLUSTERVARRUN)
-			$(INSTALL) -d $(DESTDIR)/$(CLUSTERVARLIB)
 
 uninstall-local:
 			rmdir $(DESTDIR)/$(LOGDIR) || :;
 			rmdir $(DESTDIR)/$(CLUSTERVARRUN) || :;
-			rmdir $(DESTDIR)/$(CLUSTERVARLIB) || :;
 
 maintainer-clean-local:
 	rm -rf m4
diff --git a/configure.ac b/configure.ac
index 3b70cfe..6f7c6df 100644
--- a/configure.ac
+++ b/configure.ac
@@ -52,6 +52,7 @@ AM_PROG_CC_C_O
 AC_PROG_LN_S
 AC_PROG_INSTALL
 AC_PROG_MAKE_SET
+AC_PROG_AWK
 
 ## local helper functions
 
@@ -81,22 +82,14 @@ PKG_CHECK_MODULES([nss],[nss])
 PKG_CHECK_MODULES([nspr],[nspr])
 
 # Checks for header files.
-AC_CHECK_HEADERS([arpa/inet.h fcntl.h libintl.h limits.h malloc.h netdb.h netinet/in.h stddef.h stdint.h stdlib.h string.h sys/ioctl.h sys/socket.h sys/time.h syslog.h unistd.h])
+AC_CHECK_HEADERS([arpa/inet.h fcntl.h libintl.h limits.h stddef.h sys/socket.h sys/time.h syslog.h])
 
 # Checks for typedefs, structures, and compiler characteristics.
-AC_C_INLINE
-AC_TYPE_INT32_T
-AC_TYPE_PID_T
 AC_TYPE_SIZE_T
 AC_TYPE_SSIZE_T
-AC_TYPE_UINT16_T
-AC_TYPE_UINT32_T
-AC_TYPE_UINT64_T
-AC_TYPE_UINT8_T
 
 # Checks for library functions.
 AC_FUNC_FORK
-AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK
 AC_FUNC_MALLOC
 AC_CHECK_FUNCS([alarm atexit bzero dup2 memmove memset select socket strcasecmp strchr strdup strerror strtol])
 
@@ -105,18 +98,6 @@ AC_ARG_ENABLE([debug],
 	[  --enable-debug          enable debug build. ],
 	[ default="no" ])
 
-AC_ARG_WITH([syslogfacility],
-	[  --syslogfacility=FACILITY
-                          cluster default syslog facility. ],
-	[ SYSLOGFACILITY="$withval" ],
-	[ SYSLOGFACILITY="LOG_LOCAL4" ])
-
-AC_ARG_WITH([sysloglevel],
-	[  --sysloglevel=LEVEL
-                          cluster default syslog level. ],
-	[ SYSLOGLEVEL="$withval" ],
-	[ SYSLOGLEVEL="LOG_INFO" ])
-
 AC_ARG_WITH([fenceagentslibdir],
 	[  --fenceagentslibdir=PATH
                           installation path for fence library. ],
@@ -145,7 +126,6 @@ AC_ARG_WITH([default-config-file],
 
 LOGDIR=${localstatedir}/log/cluster
 CLUSTERVARRUN=${localstatedir}/run/cluster
-CLUSTERVARLIB=${localstatedir}/lib/cluster
 
 ## do subst
 
@@ -165,17 +145,9 @@ AC_SUBST([CLUSTERVARRUN])
 AC_DEFINE_UNQUOTED([CLUSTERVARRUN], "$(eval echo ${CLUSTERVARRUN})",
 		   [Default cluster var/run directory])
 
-AC_SUBST([CLUSTERVARLIB])
-
 AC_SUBST([FENCEAGENTSLIBDIR])
 AC_SUBST([SNMPBIN])
 
-AC_DEFINE_UNQUOTED([SYSLOGFACILITY], $(eval echo ${SYSLOGFACILITY}),
-		   [Default syslog facility])
-
-AC_DEFINE_UNQUOTED([SYSLOGLEVEL], $(eval echo ${SYSLOGLEVEL}),
-		   [Default syslog level])
-
 ## *FLAGS handling
 
 ENV_CFLAGS="$CFLAGS"
diff --git a/doc/Makefile.am b/doc/Makefile.am
index 2e6a2ec..13035fc 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,10 +1,6 @@
 MAINTAINERCLEANFILES	= Makefile.in
 
-dist_doc_DATA		= gfs2.txt \
-			  journaling.txt \
-			  min-gfs.txt \
-			  usage.txt \
-			  COPYING.applications \
+dist_doc_DATA		= COPYING.applications \
 			  COPYING.libraries \
 			  COPYRIGHT \
 			  README.licence
diff --git a/doc/gfs2.txt b/doc/gfs2.txt
deleted file mode 100644
index 88f0143..0000000
--- a/doc/gfs2.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Global File System
-------------------
-
-http://sources.redhat.com/cluster/
-
-GFS is a cluster file system. It allows a cluster of computers to
-simultaneously use a block device that is shared between them (with FC,
-iSCSI, NBD, etc).  GFS reads and writes to the block device like a local
-file system, but also uses a lock module to allow the computers coordinate
-their I/O so file system consistency is maintained.  One of the nifty
-features of GFS is perfect consistency -- changes made to the file system
-on one machine show up immediately on all other machines in the cluster.
-
-GFS uses interchangable inter-node locking mechanisms.  Different lock
-modules can plug into GFS and each file system selects the appropriate
-lock module at mount time.  Lock modules include:
-
-  lock_nolock -- does no real locking and allows gfs to be used as a
-  local file system
-
-  lock_dlm -- uses a distributed lock manager (dlm) for inter-node locking
-  The dlm is found at linux/fs/dlm/
-
-In addition to interfacing with an external locking manager, a gfs lock
-module is responsible for interacting with external cluster management
-systems.  Lock_dlm depends on user space cluster management systems found
-at the URL above.
-
-To use gfs as a local file system, no external clustering systems are
-needed, simply:
-
-  $ gfs2_mkfs -p lock_nolock -j 1 /dev/block_device
-  $ mount -t gfs2 /dev/block_device /dir
-
-GFS2 is not on-disk compatible with previous versions of GFS.
-
-The following man pages can be found at the URL above:
-  gfs2_mkfs	to make a filesystem
-  gfs2_fsck	to repair a filesystem
-  gfs2_grow	to expand a filesystem online
-  gfs2_jadd	to add journals to a filesystem online
-  gfs2_tool	to manipulate, examine and tune a filesystem
-  gfs2_quota	to examine and change quota values in a filesystem
-  mount.gfs2	to find mount options
-
diff --git a/doc/journaling.txt b/doc/journaling.txt
deleted file mode 100644
index e89eefa..0000000
--- a/doc/journaling.txt
+++ /dev/null
@@ -1,155 +0,0 @@
-o  Journaling & Replay
-
-The fundamental problem with a journaled cluster filesystem is
-handling journal replay with multiple journals.  A single block of
-metadata can be modified sequentially by many different nodes in the
-cluster.  As the block is modified by each node, it gets logged in the
-journal for each node.  If care is not taken, it's possible to get
-into a situation where a journal replay can actually corrupt a
-filesystem.  The error scenario is:
-
-1) Node A modifies a metadata block by putting a updated copy into its
-   incore log.
-2) Node B wants to read and modify the block so it requests the lock
-   and a blocking callback is sent to Node A.
-3) Node A flushes its incore log to disk, and then syncs out the
-   metadata block to its inplace location.
-4) Node A then releases the lock.
-5) Node B reads in the block and puts a modified copy into its ondisk
-   log and then the inplace block location.
-6) Node A crashes.
-
-At this point, Node A's journal needs to be replayed.  Since there is
-a newer version of block inplace, if that block is replayed, the
-filesystem will be corrupted.  There are a few different ways of
-avoiding this problem.
-
-1) Generation Numbers (GFS1)
-
-   Each metadata block has header in it that contains a 64-bit
-   generation number.  As each block is logged into a journal, the
-   generation number is incremented.  This provides a strict ordering
-   of the different versions of the block a they are logged in the FS'
-   different journals.  When journal replay happens, each block in the
-   journal is not replayed if generation number in the journal is less
-   than the generation number in place.  This ensures that a newer
-   version of a block is never replaced with an older version.  So,
-   this solution basically allows multiple copies of the same block in
-   different journals, but it allows you to always know which is the
-   correct one.
-
-   Pros:
-
-   A) This method allows the fastest callbacks.  To release a lock,
-      the incore log for the lock must be flushed and then the inplace
-      data and metadata must be synced.  That's it.  The sync
-      operations involved are: start the log body and wait for it to
-      become stable on the disk, synchronously write the commit block,
-      start the inplace metadata and wait for it to become stable on
-      the disk.
-
-   Cons:
-
-   A) Maintaining the generation numbers is expensive.  All newly
-      allocated metadata block must be read off the disk in order to
-      figure out what the previous value of the generation number was.
-      When deallocating metadata, extra work and care must be taken to
-      make sure dirty data isn't thrown away in such a way that the
-      generation numbers stop doing their thing.
-   B) You can't continue to modify the filesystem during journal
-      replay.  Basically, replay of a block is a read-modify-write
-      operation: the block is read from disk, the generation number is
-      compared, and (maybe) the new version is written out.  Replay
-      requires that the R-M-W operation is atomic with respect to
-      other R-M-W operations that might be happening (say by a normal
-      I/O process).  Since journal replay doesn't (and can't) play by
-      the normal metadata locking rules, you can't count on them to
-      protect replay.  Hence GFS1, quieces all writes on a filesystem
-      before starting replay.  This provides the mutual exclusion
-      required, but it's slow and unnecessarily interrupts service on
-      the whole cluster.
-
-2) Total Metadata Sync (OCFS2)
-
-   This method is really simple in that it uses exactly the same
-   infrastructure that a local journaled filesystem uses.  Every time
-   a node receives a callback, it stops all metadata modification,
-   syncs out the whole incore journal, syncs out any dirty data, marks
-   the journal as being clean (unmounted), and then releases the lock.
-   Because journal is marked as clean and recovery won't look at any
-   of the journaled blocks in it, a valid copy of any particular block
-   only exists in one journal at a time and that journal always the
-   journal who modified it last.
-
-   Pros:
-
-   A) Very simple to implement.
-   B) You can reuse journaling code from other places (such as JBD).
-   C) No quiece necessary for replay.
-   D) No need for generation numbers sprinkled throughout the metadata.
-
-   Cons:
-
-   A) This method has the slowest possible callbacks.  The sync
-      operations are: stop all metadata operations, start and wait for
-      the log body, write the log commit block, start and wait for all
-      the FS' dirty metadata, write an unmount block.  Writing the
-      metadata for the whole filesystem can be particularly expensive
-      because it can be scattered all over the disk and there can be a
-      whole journal's worth of it.
-
-3) Revocation of a lock's buffers (GFS2)
-
-   This method prevents a block from appearing in more than one
-   journal by canceling out the metadata blocks in the journal that
-   belong to the lock being released.  Journaling works very similarly
-   to a local filesystem or to #2 above.
-
-   The biggest difference is you have to keep track of buffers in the
-   active region of the ondisk journal, even after the inplace blocks
-   have been written back.  This is done in GFS2 by adding a second
-   part to the Active Items List.  The first part (in GFS2 called
-   AIL1) contains a list of all the blocks which have been logged to
-   the journal, but not written back to their inplace location.  Once
-   an item in AIL1 has been written back to its inplace location, it
-   is moved to AIL2.  Once the tail of the log moves past the block's
-   transaction in the log, it can be removed from AIL2.
-
-   When a callback occurs, the log is flushed to the disk and the
-   metadata for the lock is synced to disk.  At this point, any
-   metadata blocks for the lock that are in the current active region
-   of the log will be in the AIL2 list.  We then build a transaction
-   that contains revoke tags for each buffer in the AIL2 list that
-   belongs to that lock.
-
-   Pros:
-
-   A) No quiece necessary for Replay
-   B) No need for generation numbers sprinkled throughout the
-      metadata.
-   C) The sync operations are: stop all metadata operations, start and
-      wait for the log body, write the log commit block, start and
-      wait for all the FS' dirty metadata, start and wait for the log
-      body of a transaction that revokes any of the lock's metadata
-      buffers in the journal's active region, and write the commit
-      block for that transaction.
-
-   Cons:
-
-   A) Recovery takes two passes, one to find all the revoke tags in
-      the log and one to replay the metadata blocks using the revoke
-      tags as a filter.  This is necessary for a local filesystem and
-      the total sync method, too.  It's just that there will probably
-      be more tags.
-
-Comparing #2 and #3, both do extra I/O during a lock callback to make
-sure that any metadata blocks in the log for that lock will be
-removed.  I believe #2 will be slower because syncing out all the
-dirty metadata for entire filesystem requires lots of little,
-scattered I/O across the whole disk.  The extra I/O done by #3 is a
-log write to the disk.  So, not only should it be less I/O, but it
-should also be better suited to get good performance out of the disk
-subsystem.
-
-KWP 07/06/05
-
diff --git a/doc/min-gfs.txt b/doc/min-gfs.txt
deleted file mode 100644
index af1399c..0000000
--- a/doc/min-gfs.txt
+++ /dev/null
@@ -1,159 +0,0 @@
-
-Minimum GFS HowTo
------------------
-
-The following gfs configuration requires a minimum amount of hardware and
-no expensive storage system.  It's the cheapest and quickest way to "play"
-with gfs.
-
-
-  ----------       ----------
-  | GNBD   |       | GNBD   |
-  | client |       | client |       <-- these nodes use gfs
-  | node2  |       | node3  |
-  ----------       ----------
-      |                |
-      ------------------  IP network
-               |
-          ----------
-          | GNBD   |
-          | server |                <-- this node doesn't use gfs
-          | node1  |
-          ----------
-
-- There are three machines to use with hostnames: node1, node2, node3
-
-- node1 has an extra disk /dev/sda1 to use for gfs
-  (this could be hda1 or an lvm LV or an md device)
-
-- node1 will use gnbd to export this disk to node2 and node3
-
-- Node1 cannot use gfs, it only acts as a gnbd server.
-  (Node1 will /not/ actually be part of the cluster since it is only
-   running the gnbd server.)
-
-- Only node2 and node3 will be in the cluster and use gfs.
-  (A two-node cluster is a special case for cman, noted in the config below.)
-
-- There's not much point to using clvm in this setup so it's left out.
-
-- Download the "cluster" source tree.
-
-- Build and install from the cluster source tree.  (The kernel components
-  are not required on node1 which will only need the gnbd_serv program.)
-
-    cd cluster
-    ./configure --kernel_src=/path/to/kernel
-    make; make install
-
-- Create /etc/cluster/cluster.conf on node2 with the following contents:
-
-<?xml version="1.0"?>
-<cluster name="gamma" config_version="1">
-
-<cman two_node="1" expected_votes="1">
-</cman>
-
-<clusternodes>
-<clusternode name="node2">
-	<fence>
-		<method name="single">
-			<device name="gnbd" ipaddr="node2"/>
-		</method>
-	</fence>
-</clusternode>
-
-<clusternode name="node3">
-	<fence>
-		<method name="single">
-			<device name="gnbd" ipaddr="node3"/>
-		</method>
-	</fence>
-</clusternode>
-</clusternodes>
-
-<fencedevices>
-	<fencedevice name="gnbd" agent="fence_gnbd" servers="node1"/>
-</fencedevices>
-
-</cluster>
-
-
-- load kernel modules on nodes
-
-node2 and node3> modprobe gnbd
-node2 and node3> modprobe gfs
-node2 and node3> modprobe lock_dlm
-
-- run the following commands
-
-node1> gnbd_serv -n
-node1> gnbd_export -c -d /dev/sda1 -e global_disk
-
-node2 and node3> gnbd_import -n -i node1
-node2 and node3> ccsd
-node2 and node3> cman_tool join
-node2 and node3> fence_tool join
-
-node2> gfs_mkfs -p lock_dlm -t gamma:gfs1 -j 2 /dev/gnbd/global_disk
-
-node2 and node3> mount -t gfs /dev/gnbd/global_disk /mnt
-
-- the end, you now have a gfs file system mounted on node2 and node3
-
-
-Appendix A
-----------
-
-To use manual fencing instead of gnbd fencing, the cluster.conf file
-would look like this:
-
-<?xml version="1.0"?>
-<cluster name="gamma" config_version="1">
-
-<cman two_node="1" expected_votes="1">
-</cman>
-
-<clusternodes>
-<clusternode name="node2">
-	<fence>
-		<method name="single">
-			<device name="manual" ipaddr="node2"/>
-		</method>
-	</fence>
-</clusternode>
-
-<clusternode name="node3">
-	<fence>
-		<method name="single">
-			<device name="manual" ipaddr="node3"/>
-		</method>
-	</fence>
-</clusternode>
-</clusternodes>
-
-<fencedevices>
-	<fencedevice name="manual" agent="fence_manual"/>
-</fencedevices>
-
-</cluster>
-
-
-FAQ
----
-
-- Why can't node3 use gfs, too?
-
-You might be able to make it work, but we recommend that you not try.
-This software was not intended or designed to allow that kind of usage.
-
-- Isn't node3 a single point of failure? how do I avoid that?
-
-Yes it is.  For the time being, there's no way to avoid that, apart from
-not using gnbd, of course.  Eventually, there will be a way to avoid this
-using cluster mirroring.
-
-- More info from
-  http://sources.redhat.com/cluster/gnbd/gnbd_usage.txt
-  http://sources.redhat.com/cluster/doc/usage.txt
-
diff --git a/doc/usage.txt b/doc/usage.txt
deleted file mode 100644
index f9e2866..0000000
--- a/doc/usage.txt
+++ /dev/null
@@ -1,177 +0,0 @@
-How to install and run GFS.
-
-Refer to the cluster project page for the latest information.
-http://sources.redhat.com/cluster/
-
-
-Install
--------
-
-Install a Linux kernel with GFS2, DLM, configfs, IPV6 and SCTP,
-  2.6.23-rc1 or later
-
-  If you want to use gfs1 (from cluster/gfs-kernel), then you need to
-  export three additional symbols from gfs2 by adding the following lines
-  to the end of linux/fs/gfs2/locking.c:
-     EXPORT_SYMBOL_GPL(gfs2_unmount_lockproto);
-     EXPORT_SYMBOL_GPL(gfs2_mount_lockproto);
-     EXPORT_SYMBOL_GPL(gfs2_withdraw_lockproto);
-
-Install openais
-  get the latest "whitetank" (stable) release from
-  http://openais.org/
-  or
-  svn checkout http://svn.osdl.org/openais
-  cd openais/branches/whitetank
-  make; make install DESTDIR=/
-
-Install gfs/dlm/fencing/etc components
-  get the latest cluster-2.xx.yy tarball from
-  ftp://sources.redhat.com/pub/cluster/
-  or
-  cvs -d :pserver:cvs at sources.redhat.com:/cvs/cluster login cvs
-  cvs -d :pserver:cvs at sources.redhat.com:/cvs/cluster checkout cluster
-  the password is "cvs"
-  cd cluster
-  ./configure --kernel_src=/path/to/kernel
-  make install
-
-  NOTE: On 64-bit systems, you will usually need to add '--libdir=/usr/lib64'
-        to the configure line.
-
-Install LVM2/CLVM (optional)
-  cvs -d :pserver:cvs at sources.redhat.com:/cvs/lvm2 login cvs
-  cvs -d :pserver:cvs at sources.redhat.com:/cvs/lvm2 checkout LVM2
-  cvs -d :pserver:cvs at sources.redhat.com:/cvs/lvm2
-  the password is "cvs"
-  cd LVM2
-  ./configure --with-clvmd=cman --with-cluster=shared
-  make; make install
-
-  NOTE: On 64-bit systems, you will usually need to add '--libdir=/usr/lib64'
-        to the configure line.
-
-Load kernel modules
--------------------
-
-modprobe gfs2
-modprobe gfs
-modprobe lock_dlm
-modprobe lock_nolock
-modprobe dlm
-
-
-Configuration
--------------
-
-Create /etc/cluster/cluster.conf and copy it to all nodes.
-
-  The format and content of cluster.conf has changed little since the
-  last generation of the software.  See old example here:
-  http://sources.redhat.com/cluster/doc/usage.txt
-  The one change you will need to make is to add nodeids for all nodes
-  in the cluster. These are now mandatory. eg:
-
-     <clusternode name="node12.mycluster.mycompany.com" votes="1" nodeid="12">
-
-  If you already have a cluster.conf file with no nodeids in it, then you can
-  use the 'ccs_tool addnodeids' command to add them.
-
-
-Example cluster.conf
---------------------
-
-This is a basic cluster.conf file that requires manual fencing.  The node
-names should resolve to the address on the network interface you want to
-use for openais/cman/dlm communication.
-
-<?xml version="1.0"?>
-<cluster name="alpha" config_version="1">
-
-<clusternodes>
-<clusternode name="node01" nodeid="1">
-        <fence>
-        </fence>
-</clusternode>
-
-<clusternode name="node02" nodeid="2">
-        <fence>
-        </fence>
-</clusternode>
-
-<clusternode name="node03" nodeid="3">
-        <fence>
-        </fence>
-</clusternode>
-</clusternodes>
-
-<fencedevices>
-</fencedevices>
-
-</cluster>
-
-
-Startup procedure
------------------
-
-Run these commands on each cluster node:
-
-> mount -t configfs none /sys/kernel/config
-> ccsd
-> cman_tool join
-> groupd
-> fenced
-> fence_tool join
-> dlm_controld
-> gfs_controld
-> clvmd (optional)
-> mkfs -t gfs2 -p lock_dlm -t <clustername>:<fsname> -j <#journals> <blockdev>
-> mount -t gfs2 [-v] <blockdev> <mountpoint>
-
-Notes:
-- replace "gfs2" with "gfs" above to use gfs1 instead of gfs2
-- <clustername> in mkfs should match the one in cluster.conf.
-- <fsname> in mkfs is any name you pick, each fs must have a different name.
-- <#journals> in mkfs should be greater than or equal to the number of nodes
-  that you want to mount this fs, each node uses a separate journal.
-- To avoid unnecessary fencing when starting the cluster, it's best for
-  all nodes to join the cluster (complete cman_tool join) before any
-  of them do fence_tool join.
-- The cman_tool "status" and "nodes" options show the status and members
-  of the cluster.
-- The group_tool command shows the status of fencing, dlm and gfs groups
-  that the local node is part of.
-- The "cman" init script can be used for starting everything up through
-  gfs_controld in the list above.
-
-
-Shutdown procedure
-------------------
-
-Run these commands on each cluster node:
-
-> umount [-v] <mountpoint>
-> fence_tool leave
-> cman_tool leave
-
-
-Converting from GFS1 to GFS2
-----------------------------
-
-If you have GFS1 filesystems that you need to convert to GFS2, follow
-this procedure:
-
-1. Back up your entire filesystem first.
-   e.g. cp /dev/your_vg/lvol0 /your_gfs_backup
-
-2. Run fsck to ensure filesystem integrity.
-   e.g. gfs2_fsck /dev/your_vg/lvol0
-
-3. Make sure the filesystem is not mounted from any node.
-   e.g. for i in `grep "<clusternode name" /etc/cluster/cluster.conf | cut -d '"' -f2` ; do ssh $i "mount | grep gfs" ; done
-
-4. Make sure you have the latest software versions.
-
-5. Run gfs2_convert <blockdev> from one of the nodes.
-   e.g. gfs2_convert /dev/your_vg/lvol0
-
diff --git a/fence/agents/node_assassin/Makefile.am b/fence/agents/node_assassin/Makefile.am
index af7906b..9208523 100644
--- a/fence/agents/node_assassin/Makefile.am
+++ b/fence/agents/node_assassin/Makefile.am
@@ -37,7 +37,7 @@ $(TARGET).conf: $(TARGET).conf.in
 	cat $^ | sed \
 		-e 's#@''LOGDIR@#${LOGDIR}#g' \
 		-e 's#@''CONFDIR@#${DEFAULT_CONFIG_DIR}#g' \
-		-e 's#@CONFFILE@#${DEFAULT_CONFIG_FILE}#g' \
+		-e 's#@''CONFFILE@#${DEFAULT_CONFIG_FILE}#g' \
 	> $@
 
 $(TARGET).lib: $(TARGET).lib.in
diff --git a/make/fenceman.mk b/make/fenceman.mk
index 5da0a3a..e6df9f0 100644
--- a/make/fenceman.mk
+++ b/make/fenceman.mk
@@ -1,6 +1,6 @@
 %.8: $(TARGET) $(top_srcdir)/fence/agents/lib/fence2man.xsl
 	set -e && \
-	PYTHONPATH=$(top_srcdir)/fence/agents/lib:$(top_builddir)/fence/agents/lib \
+	PYTHONPATH=$(abs_srcdir)/../lib:$(abs_builddir)/../lib \
 		python $^ -o metadata > .$@.tmp && \
 	xsltproc $(top_srcdir)/fence/agents/lib/fence2man.xsl .$@.tmp > $@
 


More information about the cluster-commits mailing list