cluster: RHEL510 - fsck.gfs2: Journals not properly checked

Bob Peterson rpeterso at fedoraproject.org
Fri Apr 5 13:46:25 UTC 2013


Gitweb:        http://git.fedorahosted.org/git/?p=cluster.git;a=commitdiff;h=979447cfad1f18bbcf617511ae40742903690fdf
Commit:        979447cfad1f18bbcf617511ae40742903690fdf
Parent:        d873d2a33c42baa8063c6975e430756c9fbbb1e2
Author:        Bob Peterson <rpeterso at redhat.com>
AuthorDate:    Fri Jan 6 17:19:25 2012 -0600
Committer:     Bob Peterson <rpeterso at redhat.com>
CommitterDate: Fri Apr 5 06:25:01 2013 -0700

fsck.gfs2: Journals not properly checked

The coverity tool found a problem with the previous set of patches that
led me to discover a problem with how journals were being processed
under the new code.  The problem was that the journal index was read
in after journal replay.  But journal replay relies upon information
in the jindex.  This patch rearranges the initializeation sequence
of fsck.gfs2 so that the journal index and rindex are read in and
processed prior to journal recovery.

rhbz#877150
---
 gfs2/fsck/fs_recovery.c |   12 ----
 gfs2/fsck/initialize.c  |  164 ++++++++++++++++++++++++++++++++---------------
 2 files changed, 112 insertions(+), 64 deletions(-)

diff --git a/gfs2/fsck/fs_recovery.c b/gfs2/fsck/fs_recovery.c
index 94113ca..61c3b9d 100644
--- a/gfs2/fsck/fs_recovery.c
+++ b/gfs2/fsck/fs_recovery.c
@@ -584,14 +584,6 @@ int replay_journals(struct gfs2_sbd *sdp, int preen, int force_check,
 	*clean_journals = 0;
 
 	sdp->jsize = GFS2_DEFAULT_JSIZE;
-	/* Get master dinode */
-	gfs2_lookupi(sdp->master_dir, "jindex", 6, &sdp->md.jiinode);
-
-	/* read in the journal index data */
-	if (ji_update(sdp)){
-		log_err( _("Unable to read in jindex inode.\n"));
-		return -1;
-	}
 
 	for(i = 0; i < sdp->md.journals; i++) {
 		if (!sdp->md.journal[i]) {
@@ -620,11 +612,7 @@ int replay_journals(struct gfs2_sbd *sdp, int preen, int force_check,
 			}
 			*clean_journals += clean;
 		}
-		inode_put(&sdp->md.journal[i]);
 	}
-	inode_put(&sdp->md.jiinode);
-	free(sdp->md.journal);
-	sdp->md.journal = NULL;
 	/* Sync the buffers to disk so we get a fresh start. */
 	fsync(sdp->device_fd);
 	return error;
diff --git a/gfs2/fsck/initialize.c b/gfs2/fsck/initialize.c
index 8ab5f48..537cae5 100644
--- a/gfs2/fsck/initialize.c
+++ b/gfs2/fsck/initialize.c
@@ -465,18 +465,13 @@ static void lookup_per_node(struct gfs2_sbd *sdp, int allow_rebuild)
 }
 
 /**
- * init_system_inodes
- *
- * Returns: 0 on success, -1 on failure
+ * fetch_rgrps - fetch the resource groups from disk, and check their integrity
  */
-static int init_system_inodes(struct gfs2_sbd *sdp)
+static int fetch_rgrps(struct gfs2_sbd *sdp)
 {
-	uint64_t inumbuf;
-	char *buf;
-	struct gfs2_statfs_change sc;
-	int rgcount, sane = 1;
 	enum rgindex_trust_level trust_lvl;
-	uint64_t addl_mem_needed;
+	int rgcount, sane = 1;
+
 	const char *level_desc[] = {
 		_("Checking if all rgrp and rindex values are good"),
 		_("Checking if rindex values may be easily repaired"),
@@ -491,42 +486,6 @@ static int init_system_inodes(struct gfs2_sbd *sdp)
 		_("Too many rgrp misses: rgrps must be unevenly spaced"),
 		_("Too much damage found: we cannot rebuild this rindex"),
 	};
-
-	/*******************************************************************
-	 ******************  Initialize important inodes  ******************
-	 *******************************************************************/
-
-	log_info( _("Initializing special inodes...\n"));
-
-	/* Get root dinode */
-	sdp->md.rooti = inode_read(sdp, sdp->sd_sb.sb_root_dir.no_addr);
-
-	gfs2_lookupi(sdp->master_dir, "rindex", 6, &sdp->md.riinode);
-	if (!sdp->md.riinode) {
-		if (query( _("The gfs2 system rindex inode is missing. "
-			     "Okay to rebuild it? (y/n) ")))
-			build_rindex(sdp);
-	}
-
-	/*******************************************************************
-	 ******************  Fill in journal information  ******************
-	 *******************************************************************/
-
-	/* rgrepair requires the journals be read in in order to distinguish
-	   "real" rgrps from rgrps that are just copies left in journals. */
-	gfs2_lookupi(sdp->master_dir, "jindex", 6, &sdp->md.jiinode);
-	if (!sdp->md.jiinode) {
-		if (query( _("The gfs2 system jindex inode is missing. "
-			     "Okay to rebuild it? (y/n) ")))
-			build_jindex(sdp);
-	}
-
-	/* read in the ji data */
-	if (ji_update(sdp)){
-		log_err( _("Unable to read in jindex inode.\n"));
-		return -1;
-	}
-
 	/*******************************************************************
 	 ********  Validate and read in resource group information  ********
 	 *******************************************************************/
@@ -561,6 +520,34 @@ static int init_system_inodes(struct gfs2_sbd *sdp)
 	log_info( _("%u resource groups found.\n"), rgcount);
 
 	check_rgrps_integrity(sdp);
+	return 0;
+}
+
+/**
+ * init_system_inodes
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+static int init_system_inodes(struct gfs2_sbd *sdp)
+{
+	uint64_t inumbuf = 0;
+	char *buf;
+	struct gfs2_statfs_change sc;
+	uint64_t addl_mem_needed;
+	int err;
+
+	/*******************************************************************
+	 ******************  Initialize important inodes  ******************
+	 *******************************************************************/
+
+	log_info( _("Initializing special inodes...\n"));
+
+	/* Get root dinode */
+	sdp->md.rooti = inode_read(sdp, sdp->sd_sb.sb_root_dir.no_addr);
+
+	err = fetch_rgrps(sdp);
+	if (err)
+		return err;
 
 	/*******************************************************************
 	 *****************  Initialize more system inodes  *****************
@@ -1125,7 +1112,75 @@ static int fill_super_block(struct gfs2_sbd *sdp)
 		if (read_sb(sdp) < 0)
 			return -1;
 	}
+	return 0;
+}
 
+/**
+ * init_rindex - read in the rindex file
+ */
+static int init_rindex(struct gfs2_sbd *sdp)
+{
+	int err;
+
+	gfs2_lookupi(sdp->master_dir, "rindex", 6, &sdp->md.riinode);
+
+	if (sdp->md.riinode)
+		return 0;
+
+	if (!query( _("The gfs2 system rindex inode is missing. "
+		      "Okay to rebuild it? (y/n) "))) {
+		log_crit(_("Error: Cannot proceed without a valid rindex.\n"));
+		return -1;
+	}
+	if ((err = build_rindex(sdp))) {
+		log_crit(_("Error %d rebuilding rindex\n"), err);
+		return -1;
+	}
+	return 0;
+}
+
+/**
+ * init_jindex - read in the rindex file
+ */
+static int init_jindex(struct gfs2_sbd *sdp)
+{
+	/*******************************************************************
+	 ******************  Fill in journal information  ******************
+	 *******************************************************************/
+
+	/* rgrepair requires the journals be read in in order to distinguish
+	   "real" rgrps from rgrps that are just copies left in journals. */
+	gfs2_lookupi(sdp->master_dir, "jindex", 6, &sdp->md.jiinode);
+
+	if (!sdp->md.jiinode) {
+		int err;
+
+		if (!query( _("The gfs2 system jindex inode is missing. "
+			      "Okay to rebuild it? (y/n) "))) {
+			log_crit(_("Error: cannot proceed without a valid "
+				   "jindex file.\n"));
+			return -1;
+		}
+		/* In order to rebuild jindex, we need some valid
+		   rgrps in memory.  Temporarily read those in. */
+		err = fetch_rgrps(sdp);
+		if (err)
+			return err;
+
+		err = build_jindex(sdp);
+		/* Free rgrps read in earlier (re-read them later) */
+		gfs2_rgrp_free(&sdp->rglist);
+		if (err) {
+			log_crit(_("Error %d rebuilding jindex\n"), err);
+			return err;
+		}
+	}
+
+	/* read in the ji data */
+	if (ji_update(sdp)){
+		log_err( _("Unable to read in jindex inode.\n"));
+		return -1;
+	}
 	return 0;
 }
 
@@ -1211,7 +1266,17 @@ int initialize(struct gfs2_sbd *sdp, int force_check, int preen,
 	   our journals to be there before we can replay them. */
 	lookup_per_node(sdp, 0);
 
-	/* verify various things */
+	/* We need rindex first in case jindex is missing and needs to read
+	   in the rgrps before rebuilding it. */
+	if (init_rindex(sdp))
+		return FSCK_ERROR;
+
+	/* We need to read in jindex in order to replay the journals */
+	if (init_jindex(sdp))
+		return FSCK_ERROR;
+
+	/* If GFS, rebuild the journals.  If GFS2, replay them.  We don't have
+	   the smarts to replay GFS1 journals (neither did gfs_fsck). */
 
 	if (replay_journals(sdp, preen, force_check, &clean_journals)) {
 		if (!opts.no && preen_is_safe(sdp, preen, force_check))
@@ -1239,7 +1304,7 @@ mount_fail:
 	return FSCK_USAGE;
 }
 
-static void destroy_sdp(struct gfs2_sbd *sdp)
+void destroy(struct gfs2_sbd *sdp)
 {
 	if (!opts.no) {
 		if (block_mounters(sdp, 0)) {
@@ -1261,8 +1326,3 @@ static void destroy_sdp(struct gfs2_sbd *sdp)
 				   "caches.\n"));
 	}
 }
-
-void destroy(struct gfs2_sbd *sdp)
-{
-	destroy_sdp(sdp);
-}


More information about the cluster-commits mailing list