[RFC] fixups for RHEV-M registration

Pete Zaitcev zaitcev at redhat.com
Tue Apr 5 06:05:38 UTC 2011


Dear Jim:

I'm sorry to dump this in one piece, but splitting up takes a while.
I'll split before committing. Please review as is.

The main problem is, RHEV-M registration in 0.92 RPM does not work.
Everything is done fine, but then code 400 returned to the user.
This is a regression that I put in while "fixing" the return code
handling.

One weird thing though, I thought I only posted this recently,
way later the 0.92 tag, but it got into RPM somehow.

The fix is to divorce rc and ret after all. It's just wasn't done
right originally.

Small fixes:
 - Fix matching with with "ERROR xxxxxxx"
 - Fix the template name not to start with a useless slash
 - Free curl headers right
 - Create missing directories

Cheers,
-- Pete

diff --git a/backend.c b/backend.c
index 101f520..004f772 100644
--- a/backend.c
+++ b/backend.c
@@ -1355,7 +1355,7 @@ fs_rhevm_register (my_state *ms, const provider_t *prov, const char *next,
 	char		*nfs_dir;
 	const char	*conf_name = NULL;
 	char		*conf_text;
-	int		 rc	= MHD_HTTP_BAD_REQUEST;
+	int		 ret	= MHD_HTTP_BAD_REQUEST;
 	const char	*argv[12];
 	unsigned int	 argc = 0;
 	pid_t		 pid;
@@ -1366,6 +1366,7 @@ fs_rhevm_register (my_state *ms, const provider_t *prov, const char *next,
 	char		*s;
 	char		 ami_id_buf[64];
 	regmatch_t	 match[2];
+	int		 rc;
 
 	strcpy(ami_id_buf, "none");
 
@@ -1445,6 +1446,8 @@ fs_rhevm_register (my_state *ms, const provider_t *prov, const char *next,
 		goto cleanup;
 	}
 
+	ret = MHD_HTTP_INTERNAL_SERVER_ERROR;
+
 	rc = asprintf(&conf_text,
 		    "{\n"
 		    "  \"image\"   : \"%s/%s\",\n"
@@ -1472,7 +1475,6 @@ fs_rhevm_register (my_state *ms, const provider_t *prov, const char *next,
 	sprintf(ami_id_buf,"pending %lld",(long long)time(NULL));
 	DPRINTF("temporary ami-id = \"%s\"\n",ami_id_buf);
 	(void)meta_set_value(ms->bucket,ms->key,"ami-id",ami_id_buf);
-	rc = MHD_HTTP_INTERNAL_SERVER_ERROR;
 
 	const char *cmd = "dc-rhev-image";
 	argv[argc++] = cmd;
@@ -1514,13 +1516,11 @@ fs_rhevm_register (my_state *ms, const provider_t *prov, const char *next,
 			buf[match[1].rm_eo] = '\0';
 			DPRINTF("found image UUID: %s\n",buf+match[1].rm_so);
 			sprintf(ami_id_buf,"OK %.60s",buf+match[1].rm_so);
-			if (rc == MHD_HTTP_BAD_REQUEST)
-				rc = MHD_HTTP_OK;
+			ret = MHD_HTTP_OK;
 		}
-		else if (strcmp(buf,"ERROR") == 0) {
+		else if (strncmp(buf,"ERROR",sizeof("ERROR")-1) == 0) {
 			DPRINTF("found err marker: %s\n",buf+sizeof("ERROR"));
 			sprintf(ami_id_buf,"failed %.56s",buf+sizeof("ERROR"));
-			rc = MHD_HTTP_INTERNAL_SERVER_ERROR;
 		}
 		else {
 			DPRINTF("ignoring line: <%s>\n",buf);
diff --git a/dc-rhev-image.c b/dc-rhev-image.c
index e944343..5325438 100644
--- a/dc-rhev-image.c
+++ b/dc-rhev-image.c
@@ -231,6 +231,18 @@ static int path_exists(const char *path)
 	return access(path, R_OK) == 0;
 }
 
+/* This is basename(), but we handroll it to make sure, due to BSD. */
+static char *image_name(char *path)
+{
+	char *name;
+
+	if (!(name = strrchr(path, '/')) || name[1]==0)
+		name = path;
+	else
+		name++;
+	return name;
+}
+
 static void
 cfg_veripick(char **cfgval, const char *cfgname, json_t *jcfg,
 	     const char *cfgtag)
@@ -260,6 +272,29 @@ cfg_veripick(char **cfgval, const char *cfgname, json_t *jcfg,
 	*cfgval = tmp;
 }
 
+static void ensure_path(const char *filename)
+{
+	char *path;
+	char *s;
+
+	path = strdup(filename);
+	if (!path)
+		return;
+	for (s = path; ; s++) {
+		if (!*s)
+			break;
+		if (*s == '/')
+			continue;
+		s = strchr(s, '/');
+		if (!s)
+			break;
+		*s = 0;
+		(void) mkdir(path, 0770);
+		*s = '/';
+	}
+	free(path);
+}
+
 static size_t api_wcb(void *ptr, size_t bsz, size_t nmemb, void *arg)
 {
 	struct api_buf *bp = arg;
@@ -864,6 +899,7 @@ static struct stor_dom *apistart(struct config *cfg)
 	sd->poolid = apipool(cfg, &connection, headers, pathdc, sd->uuid);
 
 	curl_easy_cleanup(connection.curl);
+	curl_slist_free_all(headers);
 
 	free(authhdr);
 	free(authraw);
@@ -923,6 +959,12 @@ static void spitovf(struct config *cfg, struct stor_dom *sd,
 	if (rc < 0)
 		goto err_alloc;
 
+	/*
+	 * When storage domain is freshly imported, without any VMs,
+	 * the RHEV-M may not create the "master/vms/". Pre-create.
+	 */
+	ensure_path(ovfdir);
+
 	now = time(NULL);
 	gmtime_r(&now, &now_tm);
 	strftime(now_str, 50, "%Y/%m/%d %H:%M:%S", &now_tm);
@@ -980,13 +1022,8 @@ static void spitovf(struct config *cfg, struct stor_dom *sd,
 	rc = xmlTextWriterWriteAttribute(writer,
 	    BAD_CAST "ovf:size", BAD_CAST buf100);
 	if (rc < 0) goto err_xml;
-	/* This is basename(), but we handroll it to make sure, due to BSD. */
-	if (!(s = strrchr(cfg->image, '/')) || s[1]==0)
-		s = cfg->image;
-	else
-		s++;
 	rc = xmlTextWriterWriteAttribute(writer,
-	    BAD_CAST "ovf:description", BAD_CAST s);
+	    BAD_CAST "ovf:description", BAD_CAST image_name(cfg->image));
 	if (rc < 0) goto err_xml;
 	rc = xmlTextWriterEndElement(writer);	/* close <File> */
 	if (rc < 0) goto err_xml;
@@ -1070,7 +1107,7 @@ static void spitovf(struct config *cfg, struct stor_dom *sd,
 	    BAD_CAST "ovf:id", BAD_CAST "out");
 	if (rc < 0) goto err_xml;
 
-	if (!(s = strrchr(cfg->image, '/'))) s = cfg->image;
+	s = image_name(cfg->image);
 	rc = xmlTextWriterWriteElement(writer, BAD_CAST "Name", BAD_CAST s);
 	if (rc < 0) goto err_xml;
 
diff --git a/doc/registrations.md b/doc/registrations.md
new file mode 100644
index 0000000..4116236
--- /dev/null
+++ b/doc/registrations.md
@@ -0,0 +1,142 @@
+Notes on registration in iwhd
+=============================
+
+Every known cloud incudes a concept known as "registration", when an image
+requires special processing before it can be instantiated. Generally, if
+iwhd runs on host H, and cloud includes a storage back-end S and a management
+server M, the registration involves:
+ - formatting the image, if necessary
+ - generating any necessary manifests, metadata, or OVF files
+ - uploading the image from H to S
+ - notifying M with an API call
+
+The registration is triggered with "op=register" posted to iwhd image.
+Unfortunately, paramteres are cloud-specific (see examples).
+
+When registrarion completes with code 200, application is supposed to
+extract a cloud ID from the "ami-id" attribute, and verify that it starts
+with "OK". For example:
+ curl http://iwhdhost.eng.example.com:9090/buk1/test_img/ami-id
+
+Another unfortunate implementation limitation is that registrations are
+going to fail unless iwhd uses a filesystem-type back-end. A work is ongoing
+to lift this restriction.
+
+Amazon EC2
+----------
+
+There is no pre-set except configuring iwhd/conf.js like so:
+
+[
+   {
+      "name": "main",
+      "type": "fs",
+      "path": "_fs",
+   }
+]
+
+Note that the type is "fs", not "s3" (see the second unfortunate note above).
+
+Registration call:
+
+ curl -d op=register -d site=amazon \
+  -d api-key=AKIAJAJZYB6229Z5K3VW \
+  -d api-secret=PPU45khle/uHqq0xGPNNSJLAmPwsc9end7s3aCx+ \
+  -d ami-cert=/home/tester/cert-1.pem \
+  -d ami-key=/home/tester/pk-1.pem \
+  -d ami-uid=089534962013 \
+  -d ami-bkt=west-test \
+  -d kernel=_default_ \
+  -d ramdisk=_default_ \
+  http://localhost:9090/buk1/dummy_img
+
+Note that S3 bucket may be different from iwhd bucket. This is mostly done
+because Amazon buckets are global and it is very easy to run into conflicts.
+The kernel and ramdisk arguments are optional.
+
+The ami-id contains a pattern like "OK ami-298f1573".
+
+This back-end may be compatible with Amazonesque clouds, such as Eucalyptus
+and OpenStack.
+
+RHEV-M
+------
+
+The pre-set for RHEV-M consists of creating an NFS area (S) that both RHEV-M
+server (M) and iwhd server (H) can access for writing. Its top-level directory
+must be owned by user 36 (vdsm) and group 36 (kvm). Usually the /etc/exports
+looks like this:
+
+ /home/vdsm  10.16.0.0/16(rw) 10.11.10.167/16(rw) *(ro)
+
+This assumes iwhd is ran as root. Since iwhd must write into the area S
+with UID 36, attempts to run iwhd as non-priviledeged user require
+tricks with wrapping dc-rhev-image into a script that calls sudo.
+Do not attempt it if you value your sanity.
+
+The area S must be mounted at H (example below assumes /mnt/iwhd-fish).
+
+Finally, RHEV-M must be told to "import" and "attach" so-called
+"export storage domain". At this time, RHEV-M server mounts the area S
+and creates the necessary directory structure. The names it selects are
+impossible to guess ahead, so this must be done before any registrations
+are attempted.
+
+The provider in iwhd/conf.js uses "fs-rhev-m" type:
+
+[
+   {
+      "name": "main",
+      "type": "fs-rhev-m",
+      "path": "_fs",
+   }
+]
+
+Registration call:
+
+ curl -d op=register -d site=main \
+  -d api-url=http://rhevm23.virt.lab.eng.bos.redhat.com/rhevm-api \
+  -d api-key=rhevadmin at virt.lab.eng.bos.redhat.com \
+  -d api-secret=donotusepassw0rd \
+  -d nfs-host=fish.usersys.redhat.com \
+  -d nfs-path=/home/vdsm/v1 \
+  -d nfs-dir=/mnt/iwhd-fish \
+  http://localhost:9090/buk1/dummy_img
+
+The ami-id contains a pattern like "OK <uuid>". The UUID is the "image"
+UUID that can be used to find the image by RHEV-M datacenters through
+its RESTful API.
+
+Condor
+------
+
+Condor is a toy cloud that DeltaCloud use for testing.
+
+Condor uses NFS just like RHEV-M, but there are no problems with UID 36,
+so iwhd can be run as non-root. The only requirement is to create a
+subdirectory called "staging/" in area S, and make sure that iwhd can
+write into S.
+
+The area S must be mounted at H, of course.
+
+[
+   {
+      "name": "condor",
+      "type": "fs-condor",
+      "path": "/home/iwhd/_fs",
+   }
+]
+
+Registration call:
+
+ curl -d op=register -d site=main \
+  -d nfs-dir=/mnt/falcon-in \
+  http://localhost:9090/buk1/dummy_img_2
+
+This basically copies the image into /mnt/falcon-in/staging/, then
+renames it into /mnt/falcon-in.
+
+VMware
+------
+
+Not implemented yet.


More information about the iwhd-devel mailing list