Branch 'aes' - xlators/encryption

Edward Shishkin eshishki at fedoraproject.org
Thu Jun 16 20:20:35 UTC 2011


 xlators/encryption/crypt/src/crypt.c | 2363 +++++++++++++++++++++++++++++++----
 xlators/encryption/crypt/src/crypt.h |  122 +
 2 files changed, 2240 insertions(+), 245 deletions(-)

New commits:
commit 38752da8f39db4d1950e17238912000dec8c58ef
Author: Edward <edward at redhat.com>
Date:   Thu Jun 16 22:19:00 2011 +0200

    Add support for atomic cipher modes;
    Handling holes.

diff --git a/xlators/encryption/crypt/src/crypt.c b/xlators/encryption/crypt/src/crypt.c
index 8748749..67513c7 100644
--- a/xlators/encryption/crypt/src/crypt.c
+++ b/xlators/encryption/crypt/src/crypt.c
@@ -30,338 +30,2181 @@
 #include "logging.h"
 
 #include "crypt.h"
+#include <endian.h>
 
+/* Transparent encryption with symmetric block ciphers.
+ *
+ *                  Glossary
+ *
+ *
+ * block (atom)     A logical unit in a file. A minimal chunk of
+ *                  file's data (plain-text[cipher-text]) that will
+ *                  be involved to encryption[decryption] process.
+ *                  Atom size depends on cipher algorithm, mode, etc.
+ *                  If atom size is more than 1 bit, then we'll say
+ *                  that mode is atomic. Otherwise it is inatomic.
+ *
+ * head block       An atom of minimal offset, which contains
+ *                  data to read/write.
+ *
+ * tail block       An atom of maximal offset, which contains
+ *                  data to read/write.
+ *
+ * complete block   An atom, which contains exactly atom_size
+ *                  numbers of bytes to read/write.
+ *
+ * partial block    An atom, which contains smaller than atom_size
+ *                  numbers of bytes to read/write.
+ *
+ * cblock           (or cipher block). A logical unit in a file.
+ *                  cblock size is defined as the number of bits
+ *                  in an input (or output) block of the block
+ *                  cipher (*). Cipher block size is a property of
+ *                  cipher algorithm. E.g. cblock size is 64 bits
+ *                  for DES, 128 bits for AES, etc.
+ *
+ *
+ * (*) Recommendation for Block Cipher Modes of Operation
+ *     Methods and Techniques
+ *     NIST Special Publication 800-38A Edition 2001
+ */
+
+
+/* classical 64 bit Fowler/Noll/Vo-1 (FNV-1) hash.
+
+   See http://www.isthe.com/chongo/tech/comp/fnv/ for details.
+
+   Excerpts:
+
+   FNV hashes are designed to be fast while maintaining a low collision
+   rate.
+
+   FNV hash algorithms and source code have been released into the public
+   domain.
+
+*/
+static uint64_t hash_fnv1(const unsigned char *msg /* message to digest */ ,
+			  size_t len /* @msg's length */)
+{
+	uint64_t a = 0xcbf29ce484222325ull;
+	const uint64_t fnv_64_prime = 0x100000001b3ull;
+
+	/* FNV-1 hash each octet in the buffer */
+	for (; len; msg++, len--) {
+		/* multiply by the 32 bit FNV magic prime mod 2^64 */
+		a *= fnv_64_prime;
+		/* xor the bottom with the current octet */
+		a ^= (uint64_t)(*msg);
+	}
+	/* return our new hash value */
+	return a;
+}
 
 /*
- * TBD: make this endian-neutral.  Right now it works on homogeneous machines
- * because they'll all do the addition the same way, but it should work on
- * heterogeneous machines as well.
+ * Calculate and set file's minor id
  */
-void
-increment_iv (unsigned char *iv, unsigned int n)
+static inline void set_mid(uint64_t *mid, fd_t *fd)
 {
-	unsigned int *iv_ints = (unsigned int *)iv;
-	unsigned char i;
+	*mid = hash_fnv1(fd->inode->gfid, sizeof(uuid_t));
+}
 
-	/* Check for safety from 32-bit overflow. */
-	if (n <= (UINT_MAX - iv_ints[3])) {
-		iv_ints[3] += n;	
-		return;
+static size_t iovec_get_size(struct iovec *vec, uint32_t count)
+{
+	int i;
+	size_t size = 0;
+	for (i = 0; i < count; i++)
+		size += vec[i].iov_len;
+	return size;
+}
+
+#define AES_BLOCK_BITS      (4)
+/*
+ * format of 128-bit counters:
+ *
+ * . high 64 bits are file's mid (minor id);
+ * . low 64 bits are offset in a file.
+ */
+
+/*
+ * set a counter in a big-endian format
+ */
+static void setctr(uint64_t *mid, unsigned char *ivec, off_t offset,
+		   int block_bits)
+{
+	uint64_t *ctr;
+
+	ctr = (uint64_t *)ivec;
+	*ctr++ = htobe64(*mid);
+	*ctr = htobe64(offset >> block_bits);
+}
+
+/* format of 128-bit initial vectors:
+ *
+ * For OFB mode IV is a 128-bit counter (see above for
+ * definition).
+
+ * FOR CFB mode IV is a 128-bit counter encrypted with
+ * the same key that we use to encrypt the file.
+ */
+
+/*
+ * geniv(),
+ * generate and set initial vector for CFB cipher mode.
+ * put the result to @ivec.
+ */
+static void geniv(uint64_t *mid, unsigned char *ivec, off_t offset,
+		  int block_bits, crypt_private_t *priv)
+{
+	uint64_t *ctr = (uint64_t *)ivec;
+
+	const union { long one; char little; } is_endian = {1};
+	if (is_endian.little) {
+		*ctr++ = offset >> block_bits;
+		*ctr = *mid;
+	}
+	else {
+		*ctr++ = *mid;
+		*ctr = offset >> block_bits;
 	}
+	AES_encrypt(ivec, ivec, &priv->key[AES_ENCRYPT]);
+}
 
-	/*
-	 * Do this carefully to avoid actually hitting the overflow.  We know
-	 * that iv_ints[3] cannot be zero because then n could not have been
-	 * greater than the remainder and we wouldn't be here.
-	 */
-	iv_ints[3] = n - (UINT_MAX - iv_ints[3] + 1);
-
-	/* Propagate the carry bit. */
-	i = 2;
-	for (;;) {
-		if (iv_ints[i] != UINT_MAX) {
-			/* Carry bit stops here. */
-			++iv_ints[i];
-			break;
+int aes_cipher(crypt_private_t *priv,
+		      uint64_t *mid,
+		      char *from,
+		      char *to,
+		      off_t off,
+		      size_t len,
+		      int dir)
+{
+	int num = 0;
+	unsigned int anum = 0;
+	unsigned char ecount_buf[AES_BLOCK_SIZE];
+	unsigned char ivec[AES_BLOCK_SIZE];
+	AES_KEY *key;
+
+	switch (priv->mode) {
+	case AES_CFB:
+		key = &priv->key[AES_ENCRYPT];
+		geniv(mid, ivec, off, priv->block_bits, priv);
+		AES_cfb128_encrypt((const unsigned char *)from,
+				   (unsigned char *)to,
+				   len,
+				   key,
+				   ivec,
+				   &num,
+				   dir);
+		break;
+	case AES_OFB:
+		key = &priv->key[AES_ENCRYPT];
+		setctr(mid, ivec, off, priv->block_bits);
+		AES_ofb128_encrypt((const unsigned char *)from,
+				   (unsigned char *)to,
+				   len,
+				   key,
+				   ivec,
+				   &num);
+		break;
+	case AES_CTR:
+		key = &priv->key[AES_ENCRYPT];
+		setctr(mid, ivec, off, AES_BLOCK_BITS);
+		memset(ecount_buf, 0, AES_BLOCK_SIZE);
+		AES_ctr128_encrypt((const unsigned char *)from,
+				   (unsigned char *)to,
+				   len,
+				   key,
+				   ivec,
+				   ecount_buf,
+				   &anum);
+		break;
+	default:
+		gf_log("aes", GF_LOG_DEBUG,
+		       "unsupported cipher mode %d", priv->mode);
+		return ENOTSUP;
+	}
+	return 0;
+}
+
+#define MAX_CIPHER_CHUNK (1 << 30)
+
+/*
+ * Do cipher (encryption/decryption) transform of a
+ * continuous region of memory.
+ *
+ * @len: a number of bytes to transform;
+ * @buf: data to transform;
+ * @off: offset in a file, should be block-aligned
+ *       for atomic cipher modes and ksize-aligned
+ *       for other modes).
+ * @dir: direction of transform (encrypt/decrypt).
+ */
+static int cipher_region(crypt_private_t *priv,
+			 uint64_t *mid,
+			 char *from,
+			 char *to,
+			 off_t off,
+			 size_t len,
+			 int dir)
+{
+	int ret;
+
+	while (len > 0) {
+		size_t to_cipher;
+
+		to_cipher = len;
+		if (to_cipher > MAX_CIPHER_CHUNK)
+			to_cipher = MAX_CIPHER_CHUNK;
+
+		/* this will reset IV */
+		ret = aes_cipher(priv,
+				 mid,
+				 from,
+				 to,
+				 off,
+				 to_cipher,
+				 dir);
+		if (ret)
+			return ret;
+		from += to_cipher;
+		to   += to_cipher;
+		off  += to_cipher;
+		len  -= to_cipher;
+	}
+	return 0;
+}
+
+/*
+ * Do cipher transform (encryption/decryption) of
+ * plaintext/ciphertext represented by @vec.
+ *
+ * Pre-conditions: @vec represents data in a file at
+ * offset @off to be ciphered (encrypted/decrypted).
+ * @count is the number of vec's components. All the
+ * components must be block-aligned, the caller is
+ * responsible for this. @dir is "direction" of
+ * transform (encrypt/decrypt).
+ */
+int32_t cipher_aligned_iov(crypt_private_t *priv,
+			   uint64_t *mid,
+			   struct iovec *vec,
+			   int count,
+			   off_t off,
+			   int32_t dir)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < count; i++) {
+		ret = cipher_region(priv,
+				    mid,
+				    vec[i].iov_base,
+				    vec[i].iov_base,
+				    off,
+				    vec[i].iov_len,
+				    dir);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static inline int32_t encrypt_aligned_iov(crypt_private_t *priv,
+					  uint64_t *mid,
+					  struct iovec *vec,
+					  int count,
+					  off_t off)
+{
+	return cipher_aligned_iov(priv, mid, vec, count, off, AES_ENCRYPT);
+}
+
+static inline int32_t decrypt_aligned_iov(crypt_private_t *priv,
+					  uint64_t *mid,
+					  struct iovec *vec,
+					  int count,
+					  off_t off)
+{
+	return cipher_aligned_iov(priv, mid, vec, count, off, AES_DECRYPT);
+}
+
+/*
+ * Pre-conditions:
+ * @vec represents a plaintext(ciphertext) of
+ * original size and offset.
+ *
+ * encrypt(decrypt) @vec components one-by-one.
+ */
+static int32_t cipher_iov_inatomic(crypt_private_t *priv,
+				   uint64_t *mid,
+				   struct iovec *vec,
+				   int count,
+				   off_t off,
+				   int32_t dir)
+{
+	int32_t i;
+	int32_t ret;
+	int32_t vec_off;
+	struct iovec avec[2];
+	char pad[AES_BLOCK_SIZE];
+
+	for (i = 0; i < count; i++) {
+		vec_off = off % AES_BLOCK_SIZE;
+		if (vec_off) {
+			/*
+			 * component is not ksize-aligned,
+			 * prepend a zero padding before
+			 * decryption
+			 */
+			avec[0].iov_len = AES_BLOCK_SIZE;
+			avec[0].iov_base = pad;
+			memset(avec[0].iov_base, 0, vec_off);
+
+			avec[1].iov_len =  vec[i].iov_len -
+				(AES_BLOCK_SIZE - vec_off);
+			avec[1].iov_base = vec[i].iov_base +
+				(AES_BLOCK_SIZE - vec_off);
+
+			ret = cipher_aligned_iov(priv,
+						 mid,
+						 avec,
+						 2,
+						 off - vec_off,
+						 dir);
+			if (ret)
+				return ret;
+			memcpy(avec[1].iov_base,
+			       avec[0].iov_base,
+			       vec_off);
 		}
-		iv_ints[i] = 0;
-		if (i == 0) {
-			/* Total overflow; wraparound is OK. */
-			break;
+		else {
+			/*
+			 * component is ksize-alignd,
+			 * decrypt it in-place
+			 */
+			cipher_region(priv,
+				      mid,
+				      vec[i].iov_base,
+				      vec[i].iov_base,
+				      off,
+				      vec[i].iov_len,
+				      dir);
 		}
-		/* Propagate (at least) one further. */
-		--i;
 	}
+	return 0;
 }
 
-void
-encrypt_chunk (AES_KEY *key, unsigned char *input, unsigned char *output,
-	       unsigned char *gfid, off_t file_offset, size_t length)
+static int32_t encrypt_iov_inatomic(crypt_private_t *priv,
+				    uint64_t *mid,
+				    struct iovec *vec,
+				    int count,
+				    off_t off)
 {
-	size_t        length_now;	/* length within data block */
-	unsigned char iv_input[AES_BLOCK_SIZE];
-	unsigned char actual_iv[AES_BLOCK_SIZE];
-	unsigned char keystream[AES_BLOCK_SIZE];
-	unsigned char ib_offset;	/* intra-block offset */
+	return cipher_iov_inatomic(priv, mid, vec, count, off, AES_ENCRYPT);
+}
 
-	while (length > 0) {
-		length_now = BLOCK_SIZE - (file_offset % BLOCK_SIZE);
-		if (length_now > length) {
-			length_now = length;
-		}
-		/* Generate a hard-to-predict IV for the block, using a method
-		 * similar to ESSIV but without the redundant hash step.  Note
-		 * that the IV will still be constant for the same key, GFID,
-		 * and block number.  This does make us susceptible to a
-		 * known-plaintext attack; anyone who can see both plaintext
-		 * and ciphertext for a block can derive that section of the
-		 * keystream even without having the key, and can then use that
-		 * to read any plaintext in that same block thereafter.  So
-		 * don't let anyone see both.  Really.  If you send plaintext
-		 * to the same people who receive the ciphertext (i.e. the
-		 * people who run your storage servers) then you'll need to
-		 * change either the key or the GFID to regain confidentiality.
-		 * This is simple if you just do copy/rename, but involves
-		 * a lot of data movement and calculation.
-		 */
-		memset(iv_input,0,sizeof(iv_input));
-		memcpy(iv_input,gfid,GFID_SIZE);
-		*((off_t *)iv_input) = file_offset / BLOCK_SIZE;
-		AES_encrypt(iv_input,actual_iv,key);
-		increment_iv(actual_iv,(file_offset%BLOCK_SIZE)/AES_BLOCK_SIZE);
-		/*
-		 * Now for the actual encryption.  This is very similar to
-		 * AES_ctr128_encrypt, but adjusted to avoid API inefficiency
-		 * and to use iv_increment for consistency with the above.
-		 */
-		AES_encrypt(actual_iv,keystream,key);
-		DPRINTF("keystream for %lu:%lu starts with %02x %02x %02x\n",
-			file_offset / BLOCK_SIZE,
-			(file_offset % BLOCK_SIZE) / AES_BLOCK_SIZE,
-			keystream[0], keystream[1], keystream[2]);
-		ib_offset = file_offset % AES_BLOCK_SIZE;
-		for (;;) {
-			*(output++) = *(input++) ^ keystream[ib_offset];
-			++file_offset;
-			--length;
-			if (--length_now == 0) {
-				break;
-			}
-			if (++ib_offset == AES_BLOCK_SIZE) {
-				increment_iv(actual_iv,1);
-				AES_encrypt(actual_iv,keystream,key);
-				ib_offset = 0;
+static int32_t decrypt_iov_inatomic(crypt_private_t *priv,
+				    uint64_t *mid,
+				    struct iovec *vec,
+				    int count,
+				    off_t off)
+{
+	return cipher_iov_inatomic(priv, mid, vec, count, off, AES_DECRYPT);
+}
+
+/*
+ * Compound @avec, which represent the same data
+ * chunk as @vec, but has aligned components of
+ * specified block size. Alloc blocks, if needed.
+ * put number of allocated blocks to @num_blocks.
+ *
+ * 0    1  2       3     4               5   6
+ * *----*--*+------*-+---*----+--------+-*---*---
+ * |    |  ||      | |   |    |        | |   |
+ * *----+--+*------+-*---+----*--------*-+---+--*
+ * 0        2        3        4        5        6
+ *
+ */
+static int align_iov_by_atoms(crypt_private_t *priv,
+			      size_t size,
+			      struct iovec *vec /* input vector */,
+			      struct iovec *avec /* aligned vector */,
+			      char **blocks /* pool of blocks */,
+			      uint32_t *blocks_allocated)
+{
+	int vecn = 0;      /* number of the current component in vec */
+	int avecn = 0;     /* number of the current component in avec */
+	off_t vec_off = 0; /* offset in the current vec component,
+			    * i.e. the number of bytes have already
+			    * been copied */
+	int32_t block_size = priv->block_size;
+	size_t to_process = size;
+
+	while (to_process > 0) {
+		if (vec[vecn].iov_len - vec_off < block_size &&
+		    vec[vecn].iov_len - vec_off != to_process) {
+			/* less than block_size and not EOF,
+			 * so compound a new block from vec
+			 * components
+			 */
+			size_t copied = 0;
+			/*
+			 * populate the pool with a new block
+			 */
+			blocks[*blocks_allocated] = malloc(block_size);
+			if (!blocks[*blocks_allocated])
+				return -ENOMEM;
+			/*
+			 * fill the block with vec components
+			 */
+			do {
+				size_t to_copy;
+
+				to_copy = vec[vecn].iov_len - vec_off;
+				if (to_copy > block_size)
+					to_copy = block_size;
+
+				memcpy(blocks[*blocks_allocated] + copied,
+				       vec[vecn].iov_base + vec_off,
+				       vec[vecn].iov_len);
+
+				copied += to_copy;
+				to_process -= to_copy;
+
+				vec_off += to_copy;
+				if (vec_off == vec[vecn].iov_len) {
+					/* finished with this vecn */
+					vec_off = 0;
+					vecn++;
+				}
+			} while (copied < block_size && to_process > 0);
+			/*
+			 * update avec
+			 */
+			avec[avecn].iov_len = copied;
+			avec[avecn].iov_base = blocks[*blocks_allocated];
+
+			(*blocks_allocated)++;
+		} else {
+			/* the rest of the current vec component
+			 * is either more, than block_size, or we
+			 * have encounter the EOF, so reuse the
+			 * memory buffer of the component.
+			 */
+			size_t to_reuse;
+			to_reuse = (to_process > block_size ?
+				    block_size :
+				    to_process);
+			avec[avecn].iov_len = to_reuse;
+			avec[avecn].iov_base = vec[vecn].iov_base + vec_off;
+
+			vec_off += to_reuse;
+			if (vec_off == vec[vecn].iov_len) {
+				/* finished with this vecn */
+				vec_off = 0;
+				vecn++;
 			}
+			to_process -= to_reuse;
 		}
+		avecn++;
 	}
+	return 0;
 }
 
-
-int32_t
-crypt_readv_cbk (call_frame_t *frame,
-                 void *cookie,
-                 xlator_t *this,
-                 int32_t op_ret,
-                 int32_t op_errno,
-                 struct iovec *vector,
-                 int32_t count,
-		 struct iatt *stbuf,
-                 struct iobref *iobref)
+/*
+ * Pre-conditions:
+ * @vec represents a ciphertext of expanded size and
+ * aligned offset.
+ *
+ * Compound a temporal vector @avec with block-aligned
+ * components, decrypt and fix it up to represent a chunk
+ * of data corresponding to the original size and offset.
+ * Pass the result to the next translator.
+ */
+int32_t crypt_readv_cbk_atomic(call_frame_t *frame,
+			       void *cookie,
+			       xlator_t *this,
+			       int32_t op_ret,
+			       int32_t op_errno,
+			       struct iovec *vec,
+			       int32_t count,
+			       struct iatt *stbuf,
+			       struct iobref *iobref)
 {
 	crypt_rlocal_t  *local = frame->local;
 	crypt_private_t *priv = this->private;
-	int32_t          i = 0;
-	off_t            cur_off = local->offset;
 
-	for (i = 0; i < count; ++i) {
-		encrypt_chunk(&priv->key,vector[i].iov_base,vector[i].iov_base,
-			      local->gfid,cur_off,vector[i].iov_len);
-		cur_off += vector[i].iov_len;
+	struct iovec *avec = NULL;
+	int32_t acount = 0;
+	char *pool;
+	uint32_t blocks_in_pool = 0;
+
+	if (op_ret < 0 || count == 0 || vec[0].iov_len == 0) {
+		STACK_UNWIND_STRICT(readv,
+				    frame,
+				    op_ret,
+				    op_errno,
+				    vec,
+				    count,
+				    stbuf, iobref);
+		return 0;
+	}
+	op_errno = ENOMEM;
+	acount = local->expanded_size >> priv->block_bits;
+	avec = CALLOC(acount, sizeof(*avec));
+	if (!avec)
+		goto exit;
+	pool = malloc(acount * sizeof(pool));
+	if (!pool) {
+		free(avec);
+		goto exit;
 	}
+	op_errno = align_iov_by_atoms(priv, local->expanded_size,
+				      vec, avec, &pool, &blocks_in_pool);
+	if (op_errno)
+		goto free;
+	op_errno = decrypt_aligned_iov(priv, &local->mid, avec, acount,
+				       local->aligned_offset);
+ free:
+	while (blocks_in_pool)
+		free((&pool)[--blocks_in_pool]);
+	free(pool);
+	free(avec);
+ exit:
+	if (op_errno)
+		op_ret = -1;
+	STACK_UNWIND_STRICT(readv,
+			    frame,
+			    op_ret,
+			    op_errno,
+			    vec,
+			    count,
+			    stbuf,
+			    iobref);
+	return 0;
+}
+
+int32_t crypt_readv_cbk_inatomic(call_frame_t *frame,
+			       void *cookie,
+			       xlator_t *this,
+			       int32_t op_ret,
+			       int32_t op_errno,
+			       struct iovec *vec,
+			       int32_t count,
+			       struct iatt *stbuf,
+			       struct iobref *iobref)
+{
+	crypt_rlocal_t *local = frame->local;
+	crypt_private_t *priv = this->private;
 
-	STACK_UNWIND_STRICT(readv,frame,op_ret,op_errno,
-			    vector,count,stbuf,iobref);
+	if (op_ret < 0 || count == 0 || vec[0].iov_len == 0) {
+		STACK_UNWIND_STRICT(readv,
+				    frame,
+				    op_ret,
+				    op_errno,
+				    vec,
+				    count,
+				    stbuf, iobref);
+		return 0;
+	}
+	op_errno = decrypt_iov_inatomic(priv,
+					&local->mid,
+					vec,
+					count,
+					local->orig_offset);
+	if (op_errno)
+		op_ret = -1;
+	STACK_UNWIND_STRICT(readv,
+			    frame,
+			    op_ret,
+			    op_errno,
+			    vec,
+			    count,
+			    stbuf, iobref);
 	return 0;
 }
 
-/*
- * We can only decrypt starting at block boundaries, so we might need to read
- * a little extra at the beginning if this read is unaligned.  Otherwise, we
- * could just pass this through and do all our work in the callback.
+static inline int32_t get_atom_size (crypt_private_t *priv)
+{
+	return priv->block_size;
+}
+
+static inline int32_t get_atom_bits (crypt_private_t *priv)
+{
+	return priv->block_bits;
+}
+
+static inline int32_t in_atomic_cipher_mode(crypt_private_t *priv)
+{
+	switch (priv->mode) {
+	case AES_CTR:
+		return 0;
+	default:
+		return 1;
+	}
+}
+
+int32_t crypt_readv_cbk (call_frame_t *frame,
+		       void *cookie,
+		       xlator_t *this,
+		       int32_t op_ret,
+		       int32_t op_errno,
+		       struct iovec *vec,
+		       int32_t count,
+		       struct iatt *stbuf,
+		       struct iobref *iobref)
+{
+
+	if (in_atomic_cipher_mode(this->private))
+		return crypt_readv_cbk_atomic(frame, cookie, this,
+					    op_ret, op_errno, vec,
+					    count, stbuf, iobref);
+	else
+		return crypt_readv_cbk_inatomic(frame, cookie, this,
+					      op_ret, op_errno, vec,
+					      count, stbuf, iobref);
+}
+
+/* In "atomic" modes (CBC, CFB, OFB, etc) cipher
+ * transform (encrypton/decryption) is going by
+ * blocks ("atoms"). So sometimes we should read
+ * extra-bytes of ciphertext before decryption.
+ *
+ * Determine how much bytes of ciphertext should
+ * be read from disk. Actual decryption will be
+ * performed in the callback procedure.
  */
-int32_t
-crypt_readv (call_frame_t *frame,
-             xlator_t *this,
-             fd_t *fd,
-             size_t size,
-             off_t offset)
+int32_t crypt_readv(call_frame_t *frame,
+		    xlator_t *this,
+		    fd_t *fd,
+		    size_t size,
+		    off_t offset)
 {
-	crypt_rlocal_t  *local = NULL;
-	uint32_t         op_errno = EIO;
+	crypt_rlocal_t *local;
+	crypt_private_t *priv = this->private;
+	size_t resid;
 
-	local = CALLOC(1,sizeof(*local));
+	local = CALLOC(1, sizeof(*local));
 	if (!local) {
-		op_errno = ENOMEM;
-		goto err;
+		STACK_UNWIND_STRICT(readv,
+				    frame,
+				    -1,
+				    ENOMEM,
+				    NULL,
+				    0,
+				    NULL,
+				    NULL);
+		return 0;
 	}
 	frame->local = local;
 
-	local->offset = offset;
-	memcpy(local->gfid,fd->inode->gfid,sizeof(local->gfid));
+	set_mid(&local->mid, fd);
+	local->orig_size = size;
+	local->orig_offset = offset;
 
-	STACK_WIND (frame,
-		    crypt_readv_cbk,
-		    FIRST_CHILD (this),
-		    FIRST_CHILD (this)->fops->readv,
-		    fd, size, offset);
+	if (in_atomic_cipher_mode(priv)) {
+		uint32_t atom_size = get_atom_size(priv);
+		/*
+		 * Round-down the start,
+		 * round-up the end.
+		 */
+		resid = offset & (uint64_t)(atom_size - 1);
+		local->aligned_offset = offset - resid;
+		local->expanded_size = size + resid;
+
+		resid = (offset + size) & (uint64_t)(atom_size - 1);
+		if (resid)
+			local->expanded_size += (atom_size - resid);
+		gf_log(this->name, GF_LOG_DEBUG, "reading %lu at %ld",
+		       local->expanded_size, local->aligned_offset);
+		STACK_WIND(frame,
+			   crypt_readv_cbk,
+			   FIRST_CHILD (this),
+			   FIRST_CHILD (this)->fops->readv,
+			   fd,
+			   local->expanded_size,
+			   local->aligned_offset);
+	} else {
+		gf_log(this->name, GF_LOG_DEBUG, "reading %lu at %ld",
+		       size, offset);
+		STACK_WIND(frame,
+			   crypt_readv_cbk,
+			   FIRST_CHILD (this),
+			   FIRST_CHILD (this)->fops->readv,
+			   fd,
+			   size,
+			   offset);
+	}
 	return 0;
+}
 
-err:
-	STACK_UNWIND_STRICT(readv,frame,-1,op_errno,NULL,0,NULL,NULL);
+/*
+ * Uptodate a part of head block from disk.
+ * Handle cases of incomplete read.
+ *
+ * Invoked as a ->readv_cbk().
+ *
+ * Pre-conditions: @vec components contain full
+ * head block with plain text read from disk.
+ */
+int32_t read_modify_head(call_frame_t *frame,
+			 void *cookie,
+			 xlator_t *this,
+			 int32_t op_ret,
+			 int32_t op_errno,
+			 struct iovec *vec,
+			 int32_t count,
+			 struct iatt *stbuf,
+			 struct iobref *iobref,
+			 struct avec_config *conf,
+			 struct iovec *head)
+{
+	crypt_wlocal_t *local = frame->local;
+	crypt_private_t *priv = this->private;
+
+	if (op_ret >= 0) {
+		/*
+		 * fill a "head" of head block
+		 * with plain text of the latest
+		 * version
+		 */
+		int32_t i;
+		int32_t to_head;
+		int32_t copied = 0;
+
+		to_head = conf->off_in_head;
+		if (iovec_get_size(vec, count) < to_head) {
+			/*
+			 * This kind of error is specific for
+			 * transparent encryption in stackable
+			 * file systems:
+			 *
+			 * It is impossible to uptodate
+			 * head block: too few bytes have
+			 * been read from disk, so that
+			 * partial write is impossible.
+			 *
+			 * It could happen because of many
+			 * reasons: IO errors, (meta)data
+			 * corruption in the local file system,
+			 * etc.
+			 */
+			gf_log (this->name, GF_LOG_WARNING,
+			      "Unable to uptodate a head block for encryption");
+			op_errno = EIO;
+			op_ret = -1;
+			goto err;
+		}
+
+		for (i = 0;
+		     i < count && copied < to_head;
+		     i++) {
+			int32_t to_copy;
+
+			to_copy = vec[i].iov_len;
+			if (to_copy > to_head - copied)
+				to_copy = to_head - copied;
+
+			memcpy(head->iov_base,
+			       vec[i].iov_base,
+			       to_copy);
+			copied += to_copy;
+		}
+		/* encrypt the whole head block */
+		op_errno = encrypt_aligned_iov(priv,
+					       &local->mid,
+					       head,
+					       1,
+					       conf->aligned_offset /* offset
+								      of the
+								      head
+								      block */);
+		if (op_errno) {
+			op_ret = -1;
+			goto err;
+		}
+		return 0;
+	}
+ err:
+	local->op_ret = op_ret;
+	local->op_errno = op_errno;
 	return 0;
 }
 
+/*
+ * Uptodate a part of tail block from disk.
+ * Handle cases of incomplete read.
+ *
+ * Invoked as ->readv_cbk().
+ *
+ * Pre-condition: If ->readv() was ok (required
+ * amount of bytes were read), then @vec components
+ * contain full block with plain text; @count is the
+ * number of those components.
+ */
+int32_t read_modify_tail(call_frame_t *frame,
+			 void *cookie,
+			 xlator_t *this,
+			 int32_t op_ret,
+			 int32_t op_errno,
+			 struct iovec *vec,
+			 int32_t count,
+			 struct iatt *stbuf,
+			 struct iobref *iobref,
+			 struct avec_config *conf,
+			 struct iovec *tail)
+{
+	crypt_wlocal_t *local = frame->local;
+	crypt_private_t *priv = this->private;
+
+	if (op_ret >= 0) {
+		/*
+		 * fill a "tail" of tail block
+		 * with plain text of the latest
+		 * version
+		 */
+		int i;
+		int to_tail;
+		int skip;
+		int skipped;
+		int copied;
+		off_t offset;
+		off_t off_in_tail;
+
+		off_in_tail = conf->off_in_tail;
+		to_tail = priv->block_size - off_in_tail;
 
-int32_t
-crypt_writev_cbk (call_frame_t *frame,
-                  void *cookie,
-                  xlator_t *this,
-                  int32_t op_ret,
-                  int32_t op_errno,
-                  struct iatt *prebuf,
-		  struct iatt *postbuf)
+		if (stbuf->ia_size <
+		    conf->aligned_offset + conf->expanded_size) {
+			/* padding size should be reduced */
+
+			if (stbuf->ia_size <
+			    conf->orig_offset + conf->orig_size)
+				/* case of append:
+				   nothing to read from the file */
+				to_tail = 0;
+			else
+				/* take into account the EOF */
+				to_tail -= priv->block_size -
+					(stbuf->ia_size &
+					 (uint64_t)(priv->block_size - 1));
+		}
+		if (iovec_get_size(vec, count) < to_tail) {
+			/*
+			 * This kind of error is specific for
+			 * transparent encryption in stackable
+			 * file systems:
+			 *
+			 * It is impossible to uptodate
+			 * tail block: too few bytes have
+			 * been read from disk, so that
+			 * partial write is impossible.
+			 *
+			 * It could happen because of many
+			 * reasons: IO errors, (meta)data
+			 * corruption in the local file system,
+			 * etc.
+			 */
+			gf_log (this->name, GF_LOG_WARNING,
+			      "Unable to uptodate a tail block for encryption");
+			op_errno = EIO;
+			op_ret = -1;
+			goto error;
+		}
+		skip = 1;
+		skipped = copied = 0;
+		for (i = 0; i < count && copied < to_tail; i++) {
+			int32_t to_copy;
+			if (skip) {
+				skipped += vec[i].iov_len;
+				if (skipped < off_in_tail)
+					continue;
+				else {
+					offset = skipped - off_in_tail;
+					skip = 0;
+				}
+			}
+			to_copy =  vec[i].iov_len - offset;
+			if (to_copy > to_tail - copied)
+				to_copy = to_tail - copied;
+
+			memcpy(tail->iov_base + off_in_tail + copied,
+			       vec[i].iov_base + offset,
+			       to_copy);
+			offset = 0;
+			copied += to_copy;
+		}
+		/* encrypt the whole head block */
+		op_errno = encrypt_aligned_iov(priv,
+					       &local->mid,
+					       tail,
+					       1,
+					       conf->aligned_offset +
+					       ((1 + conf->nr_full_blocks) <<
+						priv->block_bits) /* offset
+								     of tail
+								     block */);
+		if (op_errno) {
+			op_ret = -1;
+			goto error;
+		}
+		return 0;
+	}
+ error:
+	local->op_ret = op_ret;
+	local->op_errno = op_errno;
+	return 0;
+}
+
+int32_t read_modify_data_head(call_frame_t *frame,
+			      void *cookie,
+			      xlator_t *this,
+			      int32_t op_ret,
+			      int32_t op_errno,
+			      struct iovec *vec,
+			      int32_t count,
+			      struct iatt *stbuf,
+			      struct iobref *iobref)
+{
+	return read_modify_head(frame,
+				cookie,
+				this,
+				op_ret,
+				op_errno,
+				vec,
+				count,
+				stbuf,
+				iobref,
+				&((crypt_wlocal_t *)frame->local)->data_conf,
+				&((crypt_wlocal_t *)frame->local)->data_conf.avec[0]);
+}
+
+int32_t read_modify_data_tail(call_frame_t *frame,
+			      void *cookie,
+			      xlator_t *this,
+			      int32_t op_ret,
+			      int32_t op_errno,
+			      struct iovec *vec,
+			      int32_t count,
+			      struct iatt *stbuf,
+			      struct iobref *iobref)
+{
+	return read_modify_tail(frame,
+				cookie,
+				this,
+				op_ret,
+				op_errno,
+				vec,
+				count,
+				stbuf,
+				iobref,
+				&((crypt_wlocal_t *)frame->local)->data_conf,
+				&((crypt_wlocal_t *)frame->local)->
+				data_conf.avec[((crypt_wlocal_t *)frame->local)->
+					       data_conf.acount - 1]);
+}
+
+int32_t read_modify_hole_head(call_frame_t *frame,
+			      void *cookie,
+			      xlator_t *this,
+			      int32_t op_ret,
+			      int32_t op_errno,
+			      struct iovec *vec,
+			      int32_t count,
+			      struct iatt *stbuf,
+			      struct iobref *iobref)
+{
+	return read_modify_head(frame,
+				cookie,
+				this,
+				op_ret,
+				op_errno,
+				vec,
+				count,
+				stbuf,
+				iobref,
+				&((crypt_wlocal_t *)frame->local)->hole_conf,
+				&((crypt_wlocal_t *)frame->local)->hole_conf.avec[0]);
+}
+
+int32_t read_modify_hole_tail(call_frame_t *frame,
+			      void *cookie,
+			      xlator_t *this,
+			      int32_t op_ret,
+			      int32_t op_errno,
+			      struct iovec *vec,
+			      int32_t count,
+			      struct iatt *stbuf,
+			      struct iobref *iobref)
+{	
+	return read_modify_tail(frame,
+				cookie,
+				this,
+				op_ret,
+				op_errno,
+				vec,
+				count,
+				stbuf,
+				iobref,
+				&((crypt_wlocal_t *)frame->local)->hole_conf,
+				&((crypt_wlocal_t *)frame->local)->
+				hole_conf.avec[((crypt_wlocal_t *)frame->local)->
+					       hole_conf.blocks_in_pool - 1]);
+}
+
+int32_t crypt_writev_cbk(call_frame_t *frame,
+			 void *cookie,
+			 xlator_t *this,
+			 int32_t op_ret,
+			 int32_t op_errno,
+			 struct iatt *prebuf,
+			 struct iatt *postbuf);
+
+/*
+ * Submit a read-modify-write request.
+ * This should be performed only after approval
+ * of locking manager, i.e. the caller needs
+ * to make sure this is his turn to submit.
+ */
+void submit_rmw_end(call_frame_t *frame,
+		    xlator_t *this,
+		    fd_t *fd,
+		    struct iobref *iobref,
+		    struct rmw_atom *atom)
+{
+	/* read, modify */
+	STACK_WIND(frame,
+		   atom->read_modify,
+		   this,
+		   this->fops->readv, /* crypt_readv */
+		   fd,
+		   atom->get_iovec(frame)->iov_len,
+		   atom->get_offset(frame));
+	/* ...and write */
+	STACK_WIND(frame,
+		   crypt_writev_cbk,
+		   FIRST_CHILD(this),
+		   FIRST_CHILD(this)->fops->writev,
+		   fd,
+		   atom->get_iovec(frame),
+		   1,
+		   atom->get_offset(frame),
+		   iobref);
+}
+
+struct rmw_atom *atom_by_type(rmw_atom_type type);
+
+/*
+ * Handle the result of inquire, which is located
+ * in @op_errno. If busy, then send inquire again,
+ * otherwise submit
+ */
+int32_t handle_event_common(call_frame_t *frame,
+			    void *cookie,
+			    xlator_t *this,
+			    int32_t op_ret,
+			    int32_t op_errno,
+			    dict_t *dict,
+			    rmw_atom_type atom_type)
+{
+	crypt_wlocal_t  *local = frame->local;
+	struct rmw_atom *atom = atom_by_type(atom_type);
+
+	if ((op_ret < 0) && (op_errno == EBUSY))
+		submit_rmw_begin(frame, this, atom->handle_event,
+				 0/* for the first time
+				     submit_rmw_begin for this request
+				     is called not in handler */);
+	else
+		submit_rmw_end(frame,
+			       this,
+			       fd,
+			       local->iobref,
+			       atom);
+}
+
+int32_t handle_event_data_head(call_frame_t *frame,
+			       void *cookie,
+			       xlator_t *this,
+			       int32_t op_ret,
+			       int32_t op_errno,
+			       dict_t *dict)
+{
+	return handle_event_common(frame,
+				   cookie,
+				   this,
+				   op_ret,
+				   op_errno,
+				   dict,
+				   atom_by_id(DATA_HEAD_ATOM));
+}
+
+int32_t handle_event_data_tail(call_frame_t *frame,
+			       void *cookie,
+			       xlator_t *this,
+			       int32_t op_ret,
+			       int32_t op_errno,
+			       dict_t *dict)
+{
+	return handle_event_common(frame,
+				   cookie,
+				   this,
+				   op_ret,
+				   op_errno,
+				   dict,
+				   atom_by_id(DATA_TAIL_ATOM));
+}
+
+int32_t handle_event_hole_head(call_frame_t *frame,
+			       void *cookie,
+			       xlator_t *this,
+			       int32_t op_ret,
+			       int32_t op_errno,
+			       dict_t *dict)
 {
+	return handle_event_common(frame,
+				   cookie,
+				   this,
+				   op_ret,
+				   op_errno,
+				   dict,
+				   atom_by_id(HOLE_HEAD_ATOM));
+}
+
+int32_t handle_event_hole_tail(call_frame_t *frame,
+			       void *cookie,
+			       xlator_t *this,
+			       int32_t op_ret,
+			       int32_t op_errno,
+			       dict_t *dict)
+{
+	return handle_event_common(frame,
+				   cookie,
+				   this,
+				   op_ret,
+				   op_errno,
+				   dict,
+				   atom_by_id(HOLE_TAIL_ATOM));
+}
 
-	STACK_UNWIND_STRICT (writev, frame, op_ret, op_errno, prebuf, postbuf);
+/* 
+ * send an enquiry and submit a read-modify-write request
+ */
+int submit_rmw_begin(call_frame_t *frame,
+		     xlator_t *this,
+		     struct rmw_atom *atom;
+		     int first /* is it the first time we
+				  try to submit this request */)
+{
+	crypt_wlocal_t  *local = frame->local;
+	crypt_private_t *priv = this->private;
+
+	local->xattr = dict_new();
+	if (!local->xattr)
+		return ENOMEM;
+	/* FIXME: Encode @first to the xattr value,
+	 * so that locking manager will be able to
+	 * push us to the queue */
+	if (dict_set_str(local->xattr,
+			 "trusted.glusterfs.lock","fubar") != 0) {
+		dict_unref(local->xattr);
+		return EIO;
+	}
+	/*
+	 * we use ->fgetxattr() to poll locking manager;
+	 * enquire result handler is called as ->fgetxattr_cbk().
+	 * Brain damaged.
+	 */
+	STACK_WIND(frame,
+		   atom->hande_event,
+		   FIRST_CHILD(this),
+		   FIRST_CHILD(this)->fops->fgetxattr,
+		   local->fd,
+		   local->xattr,
+		   0);
 	return 0;
 }
 
+/*
+ * We spawn many writev_cbks, which manipulate
+ * with @local data so make sure it is protected.
+ */
+int32_t crypt_writev_cbk_atomic (call_frame_t *frame,
+				 void *cookie,
+				 xlator_t *this,
+				 int32_t op_ret,
+				 int32_t op_errno,
+				 struct iatt *prebuf,
+				 struct iatt *postbuf)
+{
+	int32_t ret;
+	crypt_wlocal_t  *local = frame->local;
+	crypt_private_t *priv = this->private;
+	int submit_hole = 0;
+	int should_unwind = 0;
+	/*
+	 * we spawn many writev_cbks per @local, and
+	 * only one of them should convert hole to zeros
+	 * and submit respective writes. So there is no
+	 * real contention on this lock.
+	 */
+	ret = TRY_LOCK(&local->hole_lock);
+	if (ret)
+		/*
+		 * someone is already handling hole
+		 */
+		goto put_one_call;
+	if (!local->hole_processed &&
+	    prebuf->ia_size < local->orig_offset) {
+		/*
+		 *  convert hole to zeros
+		 */
+		ret = setup_config_hole(frame, this,
+					prebuf->ia_size,
+					local->orig_offset);
+		if (ret) {
+			UNLOCK(&local->hole_lock);
+			op_ret = -1;
+			op_errno = ret;
+			goto put_one_call;
+		}
+		/*
+		 * hole conversion spawns additional
+		 * writes and respectively increases
+		 * funout.
+		 */
+		LOCK(&local->call_lock);
+		local->nr_calls += local->hole_conf.blocks_in_pool;
+		UNLOCK(&local->call_lock);
 
-int32_t
-crypt_writev (call_frame_t *frame,
-              xlator_t *this,
-              fd_t *fd,
-              struct iovec *vector,
-              int32_t count,
-              off_t offset,
-              struct iobref *iobref)
+		submit_hole = 1;
+	}
+	local->hole_handled = 1;
+	UNLOCK(&local->hole_lock);
+	if (submit_hole)
+		submit_hole();
+ put_one_call:
+	LOCK(&local->call_lock);
+	if (--local->nr_calls == 0)
+		should_unwind = 1;
+	UNLOCK(&local->call_lock);
+	if (should_unwind) {
+		/*
+		 * this is last child,
+		 * release everything
+		 */
+		free_data_avec(local);
+		free_hole_avec(local);
+		STACK_UNWIND_STRICT (writev,
+				     frame,
+				     op_ret,
+				     op_errno,
+				     prebuf,
+				     postbuf);
+	}
+	return 0;
+}
+
+int32_t crypt_writev_cbk_inatomic(call_frame_t *frame,
+				  void *cookie,
+				  xlator_t *this,
+				  int32_t op_ret,
+				  int32_t op_errno,
+				  struct iatt *prebuf,
+				  struct iatt *postbuf)
+	
+{
+	STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, prebuf, postbuf);
+	return 0;
+}
+
+int32_t crypt_writev_cbk(call_frame_t *frame,
+			 void *cookie,
+			 xlator_t *this,
+			 int32_t op_ret,
+			 int32_t op_errno,
+			 struct iatt *prebuf,
+			 struct iatt *postbuf)
+{
+	if (in_atomic_cipher_mode(this->private))
+		return crypt_writev_cbk_atomic(frame,
+					       cookie,
+					       this,
+					       op_ret,
+					       op_errno,
+					       prebuf,
+					       postbuf);
+	else
+		return crypt_writev_cbk_inatomic(frame,
+						 cookie,
+						 this,
+						 op_ret,
+						 op_errno,
+						 prebuf,
+						 postbuf);
+}
+
+/*
+ * fill struct avec_config with offsets layouts
+ */
+static int32_t setup_config_offsets(call_frame_t *frame,
+				    xlator_t *this,
+				    off_t offset,
+				    int32_t count,
+				    struct avec_config *conf)
 {
 	crypt_private_t *priv = this->private;
-	int32_t          i = 0;
-	off_t            cur_off = offset;
+	crypt_wlocal_t *local = frame->local;
+
+	uint32_t atom_size;
+	uint32_t atom_bits;
+
+	size_t orig_size;
+	off_t orig_offset;
+	size_t expanded_size;
+	off_t aligned_offset;
 
-	for (i = 0; i < count; ++i) {
-		encrypt_chunk(&priv->key,vector[i].iov_base,vector[i].iov_base,
-			      fd->inode->gfid,cur_off,vector[i].iov_len);
-		cur_off += vector[i].iov_len;
+	int64_t first_full_block; /* signed, can be -1 */
+	int64_t last_full_block;
+
+	int32_t off_in_head = 0;
+	int32_t off_in_tail = 0;
+	int64_t nr_full_blocks;
+
+	unsigned int head_is_partial;
+	unsigned int tail_is_partial;
+	unsigned int head_is_tail;
+
+	uint32_t acount; /* number of alifned components to write */
+
+	orig_offset = offset;
+	orig_size = count;
+	
+	atom_size = get_atom_size(priv);
+	atom_bits = get_atom_bits(priv);
+	/*
+	 * Round-down the start,
+	 * round-up the end.
+	 */
+	resid = offset & (uint64_t)(atom_size - 1);
+	aligned_offset = offset - resid;
+	expanded_size = orig_size + resid;
+
+	first_full_block = offset >> atom_bits;
+	if (resid) {
+		first_full_block += 1;
+		head_is_partial = 1;
+		off_in_head = resid;
 	}
+	/* calculate tail,
+	   expand size forward  */
+	resid = (offset + orig_size) & (uint64_t)(atom_size - 1);
 
-	STACK_WIND (frame, crypt_writev_cbk,
-		    FIRST_CHILD (this), FIRST_CHILD (this)->fops->writev,
-		    fd, vector, count, offset, iobref);
+	last_full_block =
+		((offset + orig_size) >> atom_bits) - 1;
+	if (resid) {
+		tail_is_partial = 1;
+		off_in_tail = resid;
+		expanded_size += (atom_size - resid);
+	}
+	acount = expanded_size >> atom_bits;
+	/* calculate full number
+	   of blocks to write */
+	nr_full_blocks =
+		last_full_block - first_full_block + 1;
+	if (nr_full_blocks < 0) {
+		/* there is only one partial
+		   block to write */
+		head_is_tail = 1;
+		nr_full_blocks = 0;
+	}
+	conf->orig_size = orig_size;
+	conf->orig_offset = orig_offset;
+	conf->expanded_size = expanded_size;
+	conf->aligned_offset = aligned_offset;
+
+	conf->off_in_tail = off_in_tail;
+	conf->off_in_head = off_in_head;
+	conf->nr_full_blocks = nr_full_blocks;
+
+	conf->head_is_partial = head_is_partial;
+	conf->tail_is_partial = tail_is_partial;
+	conf->head_is_tail = head_is_tail;
+
+	conf->acount = acount;
+}
+
+/*
+ * allocate and setup aligned vector for data submission
+ */
+int32_t setup_config_avec_data(crypt_priv_t *priv, struct avec_config *conf,
+			       struct iovec *vec)
+{
+	struct iovec *avec;
+	char *pool;
+	uint32_t blocks_in_pool;
+
+	avec = CALLOC(conf->acount, sizeof(*avec));
+	if (!avec)
+		return ENOMEM;
+	pool = malloc(acount * sizeof(pool));
+	if (!pool) {
+		FREE(avec);
+		return ENOMEM;
+	}
+	ret = align_iov_by_atoms(priv, conf->expanded_size, vec, avec,
+				 &pool, &blocks_in_pool);
+	if (ret)
+		goto free;
+	conf->avec = avec;
+	conf->pool = pool;
+	conf->blocks_in_pool = blocks_in_pool;
 	return 0;
+ free:
+	FREE(avec);
+	while (blocks_in_pool)
+		free((&pool)[--blocks_in_pool]);
+	free(pool);
+	return ret;	
 }
 
-int32_t
-crypt_set_key (data_t *data, AES_KEY *key)
+/*
+ * allocate and setup aligned vector for hole submission
+ */
+int32_t setup_config_avec_hole(struct avec_config *conf, crypt_private_t *priv)
 {
-	int           rc = -1;
-	unsigned char hex_buf[32] = {0};	/* binary AES-256 */
-	unsigned char i = 0;
-	int           hex_byte = 0;
-	int           fd = -1;
-	unsigned char file_buf[64] = {0};	/* hex AES-256 */
+	struct iovec *avec;
+	int idx;
+	char *pool;
+	uint32_t num_blocks;
 
-	if (!data) {
-		gf_log(__func__,GF_LOG_ERROR,"missing key option");
-		return EINVAL;
+	num_blocks = lo->head_is_partial +
+		(lo->tail_is_partial && !lo->head_is_tail) +
+		!!nr_full_blocks;
+	avec = CALLOC(num_blocks, sizeof(*avec));
+	if (!avec)
+		return ENOMEM;
+	pool = CALLOC(num_blocks, get_atom_size(priv));
+	if (!pool) {
+		FREE(avec);
+		return ENOMEM;
 	}
-	gf_log(__func__,GF_LOG_DEBUG,"data length is %d",data->len);
+	if (lo->head_is_partial) {
+		/* setup head */
+		idx = 0;
+		avec[idx].iov_base = &pool[idx];
+		avec[idx].iov_size = get_atom_size(priv);
+		memset(avec[idx].iov_base + conf->off_in_head, 0,
+		       get_atom_size(priv) - conf->off_in_head);
+	}
+	if (!lo->head_is_tail && lo->tail_is_partial) {
+		/* setup_tail */
+		idx = num_blocks - 1;
+		avec[idx].iov_base = &pool[idx];
+		avec[idx].iov_size = get_atom_size(priv);
+		memset(avec[idx].iov_base, 0, conf->off_in_tail);
+	}
+	if (nr_full_blocks) {
+		/* setup middle */
+		idx = lo->tail_is_partial ? 1 : 0;
+		avec[idx].iov_base = &pool[idx];
+		avec[idx].iov_size = get_atom_size(priv);
+		memset(avec[idx].iov_base, 0, get_atom_size(priv));
+	}
+	lo->avec = avec;
+	lo->pool = pool;
+	lo->blocks_in_pool = num_blocks;
+	return 0;
+ free:
+	FREE(avec);
+	FREE(pool);
+	return ret;
+}
+
+int32_t local_setup_data(call_frame_t *frame,
+			 xlator_t *this,
+			 struct iovec *vec,
+			 int32_t count,
+			 off_t offset,)
+{
+	int32_t ret = ENOMEM;
+	crypt_private_t *priv = this->private;
+	crypt_wlocal_t *local = frame->local;
+
+	uint32_t resid;
+	uint32_t atom_size;
+	uint32_t atom_bits;
+
+	size_t orig_size;
+	off_t orig_offset;
+	size_t expanded_size;
+	off_t aligned_offset;
+
+	struct iovec *avec; /* aligned vector */
+	uint32_t acount; /* number of components
+			    in the aligned vector */
+	char *pool;
+	uint32_t blocks_in_pool;
 
+	int64_t first_full_block; /* signed, can be -1 */
+	int64_t last_full_block;
+
+	int32_t off_in_head = 0;
+	int32_t off_in_tail = 0;
+	int64_t nr_full_blocks;
+
+	unsigned int d_head_is_partial;
+	unsigned int d_tail_is_partial;
+	unsigned int d_head_is_d_tail;
+
+	orig_size = iovec_get_size(vec, count);
+	orig_offset = offset;
+	atom_size = get_atom_size(priv);
+	atom_bits = get_atom_bits(priv);
 	/*
-	 * Mostly we let AES_set_encrypt_key do key-size checking, since
-	 * it'll do it anyway even if we already did.  The exceptions are (1)
-	 * a null key which is practically free to check in the switch, and
-	 * (2) the hex-key length which could cause an overflow in this code
-	 * before we ever call AES_set_encrypt_key.
+	 * Round-down the start,
+	 * round-up the end.
 	 */
-	switch (data->data[0]) {
-	case '\0':
-		gf_log(__func__,GF_LOG_DEBUG,"null key");
-		return EINVAL;
-	case '%':	/* hex key */
-		gf_log(__func__,GF_LOG_DEBUG,"handling hex key");
-		switch (data->len) {
-		case 34:	/* AES-128 = "%<32x>\0" */
-		case 50:	/* AES-192 = "%<48x>\0" */
-		case 66:	/* AES-256 = "%<64x>\0" */
-			break;
-		default:
-			gf_log(__func__,GF_LOG_DEBUG,"bad hex-key length");
-			return EINVAL;
-		}
-		for (i = 0; i < (data->len / 2 - 1); ++i) {
-			if (sscanf(data->data+i*2+1,"%2x",&hex_byte) != 1) {
-				break;
-			}
-			hex_buf[i] = hex_byte & 0xff;
+	/* calculate head,
+	   expand size backward */
+	resid = offset & (uint64_t)(atom_size - 1);
+	aligned_offset = offset - resid;
+	expanded_size = orig_size + resid;
+
+	first_full_block = offset >> atom_bits;
+	if (resid) {
+		first_full_block += 1;
+		d_head_is_partial = 1;
+		off_in_head = resid;
+	}
+	/* calculate tail,
+	   expand size forward  */
+	resid = (offset + orig_size) & (uint64_t)(atom_size - 1);
+
+	last_full_block =
+		((offset + orig_size) >> atom_bits) - 1;
+	if (resid) {
+		d_tail_is_partial = 1;
+		off_in_tail = resid;
+		expanded_size += (atom_size - resid);
+	}
+	/* calculate full number
+	   of blocks to write */
+	nr_full_blocks =
+		last_full_block - first_full_block + 1;
+	if (nr_full_blocks < 0) {
+		/* there is only one partial
+		   block to write */
+		d_head_is_d_tail = 1;
+		nr_full_blocks = 0;
+	}
+	gf_log(this->name, GF_LOG_DEBUG, "writing %lu at %ld",
+	       expanded_size, aligned_offset);
+
+	/* total number of blocks to write */
+	acount = expanded_size >> atom_bits;
+
+	avec = CALLOC(acount, sizeof(*avec));
+	if (!avec)
+		return ret;
+	pool = malloc(acount * sizeof(pool));
+	if (!pool) {
+		FREE(avec);
+		return ret;
+	}
+	local->orig_size = orig_size;
+	local->orig_offset = orig_offset;
+	local->expanded_size = expanded_size;
+	local->aligned_offset = aligned_offset;
+
+	local->avec = avec;
+	local->acount = acount;
+	local->off_in_tail = off_in_tail;
+	local->off_in_head = off_in_head;
+	local->nr_full_blocks = nr_full_blocks;
+
+	ret = align_iov_by_atoms(priv, expanded_size, vec, avec,
+				 &pool, &blocks_in_pool);
+	if (ret)
+		goto free;
+	return 0;
+ free:
+	FREE(avec);
+	while (blocks_in_pool)
+		free((&pool)[--blocks_in_pool]);
+	free(pool);
+	return ret;
+}
+
+/*
+ * Convert hole to zeros and prepare respective iovecs
+ * for submit. The hole lock should be held.
+ */
+int32_t setup_config_hole(call_frame_t *frame, xlator_t *this,
+			  uint64_t i_size, off_t offset)
+{
+	uint32_t ret;
+	crypt_private_t *priv = this->private;
+	crypt_wlocal_t *local = frame->local;
+	struct avec_config *conf = &local->hole_conf;
+
+	ret = setup_config_offsets(frame, this, i_size, offset, conf);
+	if (ret)
+		return ret;
+	ret = setup_config_avec_hole(conf, priv);
+	return ret;
+}
+
+int32_t setup_config_data(call_frame_t *frame, xlator_t *this,
+			  uint64_t from, off_t count, struct iovec *vec)
+{
+	uint32_t ret;
+	crypt_private_t *priv = this->private;
+	crypt_wlocal_t *local = frame->local;
+	struct avec_config *conf = &local->data_conf;
+
+	ret = setup_config_offsets(frame, this, from, count, conf);
+	if (ret)
+		return ret;
+	ret = setup_config_avec_data(conf, priv, vec);
+	return ret;
+}
+
+void free_data_avec(crypt_wlocal_t *local)
+{
+	struct iovec *avec;
+	char *pool;
+	int block_in_pool;
+
+	avec = local->data_config.avec;
+	pool = local->data_config.pool
+	blocks_in_pool = local->data_config.avec;
+
+	FREE(avec);
+	while (blocks_in_pool)
+		free((&pool)[--blocks_in_pool]);
+	free(pool);	
+}
+
+void free_hole_avec(crypt_wlocal_t *local)
+{
+	if (!local->hole_config.acount)
+		return;
+	FREE(local->hole_config.avec);
+	FREE(local->hole_config.pool);
+}
+
+/* lock/unlock object for read-modify-write */
+/* FIXME */
+#define LOCK_OBJECT_RMW(object)    //LOCK(object)
+#define UNLOCK_OBJECT_RMW(object)  //UNLOCK(oject)
+
+/* Pre-conditions:
+ * @vec represents data in a file at @offset to be
+ * encrypted.
+ *
+ * Align the @vec and keep all needed info in the
+ * @local. Pass the result to the nexp translator.
+ * All resources will be released at callback
+ * function (crypt_writev_cbk).
+ */
+int32_t crypt_writev_atomic(call_frame_t *frame,
+			    xlator_t *this,
+			    fd_t *fd,
+			    struct iovec *vec,
+			    int32_t count,
+			    off_t offset,
+			    struct iobref *iobref)
+{
+	crypt_private_t *priv = this->private;
+	crypt_wlocal_t *local = frame->local;
+
+#if 0
+	/*
+	 * We perform every write in 3 steps:
+	 *
+	 * 1) write partial head block;
+	 * 2) write complete blocks in the middle;
+	 * 3) write partial tail block;
+	 *
+	 * Partial writes are serialized.
+	 */
+	/* Step 1 */
+	if (d_head_is_partial) {
+		/*
+		 * make head uptodate and modify it
+		 */
+		LOCK_OBJECT_RMW(lock);
+		STACK_WIND(frame,
+			   uptodate_and_encrypt_head_readv_cbk,
+			   this,
+			   this->fops->readv, /* crypt_readv */
+			   fd,
+			   atom_size,
+			   aligned_offset /* offset of
+					     head block */);
+
+		if (local->op_ret < 0) {
+			ret = local->op_errno;
+			UNLOCK_OBJECT_RMW(lock);
+			goto free;
 		}
-		rc = AES_set_encrypt_key(hex_buf,i*8,key);
-		break;
-	case '/':	/* key in file */
-		gf_log(__func__,GF_LOG_DEBUG,"handling file key");
-		fd = open(data->data,O_RDONLY);
-		if (fd < 0) {
-			gf_log(__func__,GF_LOG_ERROR,"could not open key file");
-			return EINVAL;
+		/*
+		 * write head
+		 */
+		STACK_WIND(frame,
+			   crypt_writev_cbk,
+			   FIRST_CHILD(this),
+			   FIRST_CHILD(this)->fops->writev,
+			   fd,
+			   &avec[0], /* head block is the first
+				      * component of aligned
+				      * vector */
+			   1,
+			   orig_offset,
+			   iobref);
+		UNLOCK_OBJECT_RMW(lock);
+	}
+	/* Step 2 */
+	if (nr_full_blocks) {
+		/*
+		 * encrypt full blocks
+		 */
+		ret = encrypt_aligned_iov(priv,
+					  &local->mid,
+					  &avec[d_head_is_partial],
+					  nr_full_blocks,
+					  aligned_offset +
+					  (d_head_is_partial << atom_bits));
+		if (ret)
+			goto free;
+		/* write full blocks */
+		STACK_WIND(frame,
+			   crypt_writev_cbk,
+			   FIRST_CHILD(this),
+			   FIRST_CHILD(this)->fops->writev,
+			   fd,
+			   &avec[d_head_is_partial],
+			   nr_full_blocks,
+			   aligned_offset + (d_head_is_partial << atom_bits),
+			   iobref);
+	}
+	/* Step 3 */
+	if (!d_head_is_d_tail && d_tail_is_partial) {
+		LOCK_OBJECT_RMW(lock);
+		/*
+		 * make tail uptodate and modify it
+		 */
+		STACK_WIND(frame,
+			   uptodate_and_encrypt_tail_readv_cbk,
+			   this,
+			   this->fops->readv, /* crypt_readv */
+			   fd,
+			   atom_size,
+			   aligned_offset + expanded_size - atom_size);
+
+		if (local->op_ret < 0) {
+			ret = local->op_errno;
+			UNLOCK_OBJECT_RMW(lock);
+			goto free;
 		}
-		rc = read(fd,file_buf,sizeof(file_buf));
-		close(fd);
-		for (i = 0; i < (rc / 2); ++i) {
-			if (sscanf(file_buf+i*2,"%2x",&hex_byte) != 1) {
-				break;
-			}
-			hex_buf[i] = hex_byte & 0xff;
+		/*
+		 * write tail
+		 */
+		STACK_WIND(frame,
+			   crypt_writev_cbk,
+			   FIRST_CHILD(this),
+			   FIRST_CHILD(this)->fops->writev,
+			   fd,
+			   &avec[d_head_is_partial + nr_full_blocks],
+			   1,
+			   aligned_offset + expanded_size - atom_size,
+			   iobref);
+		UNLOCK_OBJECT_RMW(lock);
+	}
+#endif
+	/*
+	 * We perform every write in 3 steps:
+	 *
+	 * 1) send an enquiry to submit partial data head block;
+	 * 2) send an enquiry to submit partial tail block;
+	 * 3) submit complete blocks in the middle without enquiry.
+	 */
+	/*
+	 * first, setup funout number for data writes,
+	 * lock is not required
+	 */
+	local->nr_calls = d_head_is_partial +
+		(!d_head_is_d_tail && d_tail_is_partial);
+	/*
+	 * spawn writes
+	 */
+	if (d_head_is_partial)
+		submit_rmw_begin(frame, this,
+				 atom_by_id(DATA_HEAD_ATOM),
+				 1 /* first attempt */);
+	if (!d_head_is_d_tail && d_tail_is_partial)
+		submit_rmw_begin(frame, this,
+				 atom_by_id(DATA_TAIL_ATOM),
+				 1 /* first attempt */);
+	if (nr_full_blocks) {
+		/*
+		 * encrypt full blocks
+		 */
+		ret = encrypt_aligned_iov(priv,
+					  &local->mid,
+					  &avec[d_head_is_partial],
+					  nr_full_blocks,
+					  aligned_offset +
+					  (d_head_is_partial << atom_bits));
+		if (ret)
+			goto free;
+		/*
+		 * Write full blocks.
+		 * There is no need in serialization
+		 */
+		STACK_WIND(frame,
+			   crypt_writev_cbk,
+			   FIRST_CHILD(this),
+			   FIRST_CHILD(this)->fops->writev,
+			   fd,
+			   &avec[d_head_is_partial],
+			   nr_full_blocks,
+			   aligned_offset + (d_head_is_partial << atom_bits),
+			   iobref);
+	}
+	else
+		/* there is no full blocks to write.
+		 * The hole (if any) will be handled by
+		 * one of the callbacks spawned above.
+		 */;
+	return 0;
+ free:
+	FREE(avec);
+	while (blocks_in_pool)
+		free((&pool)[--blocks_in_pool]);
+	free(pool);
+	return ret;
+}
+
+int32_t crypt_writev_inatomic(call_frame_t *frame,
+			      xlator_t *this,
+			      fd_t *fd,
+			      struct iovec *vec,
+			      int32_t count,
+			      off_t off,
+			      struct iobref *iobref)
+{
+	int32_t ret;
+	crypt_private_t *priv = this->private;
+	crypt_wlocal_t *local = frame->local;
+
+	local->orig_offset = off;
+
+	ret = encrypt_iov_inatomic(priv, &local->mid, vec, count, off);
+	if (ret)
+		return ret;
+	STACK_WIND(frame,
+		   crypt_writev_cbk,
+		   FIRST_CHILD(this),
+		   FIRST_CHILD(this)->fops->writev,
+		   fd,
+		   vec,
+		   count,
+		   off,
+		   iobref);
+	return 0;
+}
+
+int crypt_writev (call_frame_t *frame,
+		xlator_t *this,
+		fd_t *fd,
+		struct iovec *vec,
+		int32_t count,
+		off_t offset,
+		struct iobref *iobref)
+{
+	int32_t ret = ENOMEM;
+	crypt_wlocal_t *local;
+
+	local = CALLOC(1, sizeof(*local));
+	if (!local)
+		goto exit;
+	frame->local = local;
+	set_mid(&local->mid, fd);
+
+	if (in_atomic_cipher_mode(this->private))
+		ret = crypt_writev_atomic(frame, this, fd, vec, count,
+					  offset, iobref);
+	else
+		ret = crypt_writev_inatomic(frame, this, fd, vec, count,
+					    offset, iobref);
+ exit:
+	if (ret)
+		STACK_UNWIND_STRICT(writev,
+				    frame,
+				    -1,
+				    ret /* op_errno */,
+				    NULL,
+				    NULL);
+	return 0;
+}
+
+int32_t crypt_open_cbk(call_frame_t *frame,
+		       void *cookie,
+		       xlator_t *this,
+		       int32_t op_ret,
+		       int32_t op_errno,
+		       fd_t *fd)
+{
+	STACK_UNWIND_STRICT(open, frame, op_ret, op_errno, fd);
+	return 0;
+}
+
+int32_t crypt_open(call_frame_t *frame,
+		   xlator_t *this,
+		   loc_t *loc,
+		   int32_t flags,
+		   fd_t *fd,
+		   int32_t wbflags)
+{
+	if (in_atomic_cipher_mode(this->private)) {
+		/*
+		 * We can't open O_WRONLY, because we need
+		 * to do read-modify-write.
+		 */
+		if ((flags & O_ACCMODE) == O_WRONLY)
+			flags = (flags & ~O_ACCMODE) | O_RDWR;
+	}
+	STACK_WIND(frame,
+		   crypt_open_cbk,
+		   FIRST_CHILD(this),
+		   FIRST_CHILD(this)->fops->open,
+		   loc,
+		   flags,
+		   fd,
+		   wbflags);
+	return 0;
+}
+
+int32_t crypt_create_cbk (call_frame_t *frame,
+			void *cookie,
+			xlator_t *this,
+			int32_t op_ret,
+			int32_t op_errno,
+			fd_t *fd,
+			inode_t *inode,
+			struct iatt *buf,
+			struct iatt *preparent,
+			struct iatt *postparent)
+{
+	STACK_UNWIND_STRICT(create, frame, op_ret, op_errno, fd, inode, buf,
+			    preparent, postparent);
+	return 0;
+}
+
+int32_t crypt_create(call_frame_t *frame,
+		     xlator_t *this,
+		     loc_t *loc,
+		     int32_t flags,
+		     mode_t mode,
+		     fd_t *fd,
+		     dict_t *params)
+{
+	if (in_atomic_cipher_mode(this->private)) {
+		/*
+		 * We can't open O_WRONLY, because we
+		 * need to do read-modify-write.
+		 */
+		if ((flags & O_ACCMODE) == O_WRONLY) {
+			flags = (flags & ~O_ACCMODE) | O_RDWR;
 		}
-		rc = AES_set_encrypt_key(hex_buf,i*8,key);
+	}
+	STACK_WIND(frame,
+		   crypt_create_cbk,
+		   FIRST_CHILD(this),
+		   FIRST_CHILD(this)->fops->create,
+		   loc,
+		   flags,
+		   mode,
+		   fd,
+		   params);
+	return 0;
+}
+
+int32_t cipher_set_block_size (xlator_t *this, crypt_private_t *priv)
+{
+	data_t *data;
+	int size;
+
+	data = dict_get(this->options, "blocksize");
+	if (in_atomic_cipher_mode(priv) && data == NULL) {
+		gf_log(this->name, GF_LOG_ERROR,
+		       "FATAL: blocksize missing");
+		return -1;
+	}
+	if (!data)
+		return 0;
+	size = atoi(data->data);
+	switch(size) {
+	case 512:
+		priv->block_bits = 9;
+		break;
+	case 1024:
+		priv->block_bits = 10;
 		break;
-	default:	/* text key */
-		gf_log(__func__,GF_LOG_DEBUG,"handling text key");
-		rc = AES_set_encrypt_key(data->data,(data->len-1)*8,key);
+	case 2048:
+		priv->block_bits = 11;
+		break;
+	case 4096:
+		priv->block_bits = 12;
+		break;
+	default:
+		gf_log("crypt", GF_LOG_ERROR,
+		       "FATAL: unsupported block size %d", size);
+		return -1;
 	}
+	priv->block_size = size;
+	return 0;
+}
+
+int aes_set_key_bits (xlator_t *this, crypt_private_t *private)
+{
+	data_t *data;
+	int bits;
 
-	return rc ? EINVAL : 0;
+	data = dict_get(this->options, "kbits");
+	if (!data) {
+		gf_log(this->name, GF_LOG_ERROR,
+		       "FATAL: keybits missing");
+		return -1;
+	}
+	bits = atoi(data->data);
+	switch(bits) {
+	case 128:
+	case 192:
+	case 256:
+		break;
+	default:
+		gf_log("crypt", GF_LOG_ERROR,
+		       "FATAL: wrong key bits %d", bits);
+		return -1;
+	}
+	private->key_bits = bits;
+	return 0;
+}
+
+int cipher_set_mode (xlator_t *this, crypt_private_t *priv)
+{
+	data_t *data;
+
+	data = dict_get(this->options, "mode");
+	if (!data) {
+		gf_log(this->name, GF_LOG_ERROR,
+		       "FATAL: mode missing");
+		return -1;
+	}
+	if (!strcmp(data->data, "cfb"))
+		priv->mode = AES_CFB;
+	else if (!strcmp(data->data, "ofb"))
+		priv->mode = AES_OFB;
+	else if (!strcmp(data->data, "ctr"))
+		priv->mode = AES_CTR;
+	else {
+		gf_log ("crypt", GF_LOG_ERROR,
+			"FATAL: unsupported cipher mode %s", data->data);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * TBD: the option should specify a file containing a key
+ */
+int32_t aes_set_keys(xlator_t *this, crypt_private_t *priv)
+{
+	data_t *data;
+	int key_size;
+	int ret;
+
+	data = dict_get(this->options, "key");
+	if (!data) {
+		gf_log(this->name, GF_LOG_ERROR,
+		       "FATAL: key missing");
+		return -1;
+	}
+	key_size = strlen(data->data);
+	if (key_size != priv->key_bits >> 3) {
+		gf_log(this->name, GF_LOG_ERROR,
+		       "FATAL: wrong key size %d", key_size);
+		return -1;
+	}
+	ret = AES_set_encrypt_key((unsigned char *)data->data,
+				  priv->key_bits,
+				  &priv->key[AES_ENCRYPT]);
+	if (ret) {
+		gf_log(this->name, GF_LOG_ERROR,
+		       "FATAL: AES_set_encrypt_key failed");
+		return -1;
+	}
+	ret = AES_set_decrypt_key((unsigned char *)data->data,
+				  priv->key_bits,
+				  &priv->key[AES_DECRYPT]);
+	if (ret) {
+		gf_log(this->name, GF_LOG_ERROR,
+		       "FATAL: AES_set_decrypt_key failed");
+		return -1;
+	}
+	return 0;
 }
 
-int32_t
-init (xlator_t *this)
+int32_t init (xlator_t *this)
 {
+	int32_t ret;
 	crypt_private_t *priv = NULL;
-	int32_t          status = 0;
 
 	if (!this->children || this->children->next) {
 		gf_log ("crypt", GF_LOG_ERROR,
 			"FATAL: crypt should have exactly one child");
-		return -1;
+		return EINVAL;
 	}
-
 	if (!this->parents) {
 		gf_log (this->name, GF_LOG_WARNING,
 			"dangling volume. check volfile ");
 	}
-
-	priv = CALLOC (sizeof (crypt_private_t), 1);
-	if (!priv) {
-		return -1;
-	}
+	priv = CALLOC(1, sizeof(*priv));
+	if (!priv)
+		return ENOMEM;
 	this->private = priv;
-
-	/*
-	 * TBD: the option should really specify a file containing a longer key
-	 * for a better encryption algorithm.
-	 */
-	status = crypt_set_key(dict_get(this->options,"key"),&priv->key);
-	if (status != 0) {
-		gf_log(this->name,GF_LOG_ERROR,"key missing");
-		return status;
-	}
+	ret = cipher_set_mode(this, priv);
+	if (ret)
+		goto error;
+	ret = cipher_set_block_size(this, priv);
+	if (ret)
+		goto error;
+	ret = aes_set_key_bits(this, priv);
+	if (ret)
+		goto error;
+	ret = aes_set_keys(this, priv);
+	if (ret)
+		goto error;
 	gf_log ("crypt", GF_LOG_INFO, "crypt xlator loaded");
 	return 0;
+ error:
+	FREE (priv);
+	return EINVAL;
 }
 
-void
-fini (xlator_t *this)
+void fini (xlator_t *this)
 {
 	crypt_private_t *priv = this->private;
-
 	FREE (priv);
-
 	return;
 }
 
 struct xlator_fops fops = {
 	.readv        = crypt_readv,
 	.writev       = crypt_writev,
+	.open         = crypt_open,
+	.create       = crypt_create
 };
 
 struct xlator_cbks cbks = {
@@ -371,5 +2214,53 @@ struct volume_options options[] = {
 	{ .key = {"key"},
 	  .type = GF_OPTION_TYPE_STR
 	},
+	{ .key = {"kbits"},
+	  .type = GF_OPTION_TYPE_STR
+	},
+	{ .key = {"blocksize"},
+	  .type = GF_OPTION_TYPE_STR
+	},
+	{ .key = {"mode"},
+	  .type = GF_OPTION_TYPE_STR
+	},
 	{ .key  = {NULL} },
 };
+
+struct rmw_atom *atom_by_type(rmw_atom_type type)
+{
+	return atoms[type];
+}
+
+struct rmw_atom atoms[LAST_RMW_ATOM] = {
+	{ .read_modify = read_modify_data_head,
+	  .handle_event = handle_event_data_head,
+	  .get_offset = get_offset_data_head,
+	  .get_iovec = get_iovec_data_head
+	},
+	{ .read_modify = read_modify_data_tail,
+	  .handle_event = handle_event_data_tail,
+	  .get_offset = get_offset_data_tail,
+	  .get_iovec = get_iovec_data_tail
+	},
+	{ .read_modify = read_modify_hole_head,
+	  .handle_event = handle_event_hole_head,
+	  .get_offset = get_offset_hole_head,
+	  .get_iovec = get_iovec_hole_head
+	},
+	{ .read_modify = read_modify_hole_tail,
+	  .handle_event = handle_event_hole_tail,
+	  .get_offset = get_offset_hole_tail,
+	  .get_iovec = get_iovec_hole_tail
+	}
+};
+
+/*
+  Local variables:
+  c-indentation-style: "K&R"
+  mode-name: "LC"
+  c-basic-offset: 8
+  tab-width: 8
+  fill-column: 80
+  scroll-step: 1
+  End:
+*/
diff --git a/xlators/encryption/crypt/src/crypt.h b/xlators/encryption/crypt/src/crypt.h
index 0aa228b..c8a8d6f 100644
--- a/xlators/encryption/crypt/src/crypt.h
+++ b/xlators/encryption/crypt/src/crypt.h
@@ -26,23 +26,127 @@
 #include "config.h"
 #endif
 
-#include <limits.h>
+#include <openssl/des.h>
 #include <openssl/aes.h>
 
-#define GFID_SIZE  16
-#define BLOCK_SIZE 1024
+typedef enum {
+	AES_CFB,
+	AES_OFB,
+	AES_CTR,
+	AES_LAST_MODE
+} crypt_mode_t;
 
-#define DPRINTF(fmt,args...) do {			\
-	gf_log(__func__,GF_LOG_DEBUG,fmt,##args);	\
-} while (0);
+typedef enum {
+	HEAD_HOLE_IOV,
+	MIDL_HOLE_IOV,
+	TAIL_HOLE_IOV,
+	LAST_HOLE_IOV
+} hole_iov_type;
 
 typedef struct {
-	AES_KEY		key;
+        crypt_mode_t     mode;
+	uint32_t         block_size;
+	uint32_t         block_bits;
+        uint32_t         key_bits;
+	AES_KEY          key[2];
 } crypt_private_t;
 
+/* For reads. */
 typedef struct {
-	off_t		offset;
-	uuid_t		gfid;
+	uint64_t mid;
+	size_t   orig_size;
+	off_t    orig_offset;
+	size_t   expanded_size;
+	off_t    aligned_offset;
 } crypt_rlocal_t;
 
+/* this describes aligned vector */
+struct avec_config {
+	size_t orig_size;
+	off_t orig_offset;
+	size_t expanded_size;
+	off_t aligned_offset;
+
+	int32_t off_in_head;
+	int32_t off_in_tail;
+	int64_t nr_full_blocks;
+
+	unsigned int head_is_partial;
+	unsigned int tail_is_partial;
+	unsigned int head_is_d_tail;
+
+	struct iovec *avec; /* aligned vector */
+	uint32_t acount; /* number of avec componenents */
+	char *pool;
+	uint32_t blocks_in_pool;
+};
+
+/* For writes. */
+typedef struct {
+	uint64_t mid;
+	gf_lock_t call_lock; /* protect nr_calls from many cbks */
+	int32_t nr_calls;
+
+	/* data setup */
+	struct avec_config data_conf;
+
+	/* hole setup */
+	gf_lock_t hole_lock; /* protect hole config from many cbks */
+	int hole_handled;
+	struct avec_config hole_conf;
+	
+	/* for ->readv() call in crypt_writev_atomic() */
+	int32_t op_ret;
+	int32_t op_errno;
+} crypt_wlocal_t;
+
+typedef enum {
+	DATA_HEAD_ATOM,
+	DATA_TAIL_ATOM,
+	HOLE_HEAD_ATOM,
+	HOLE_TAIL_ATOM,
+	LAST_RMW_ATOM
+} rmw_atom_type ;
+
+/* This represents a read-modify-write atom */
+struct rmw_atom {
+	/*
+	 * this method is called to uptodate
+	 * and modify the atoym.
+	 */
+	int32_t (*read_modify)(call_frame_t *frame,
+			       void *cookie,
+			       xlator_t *this,
+			       int32_t op_ret,
+			       int32_t op_errno,
+			       struct iovec *vec,
+			       int32_t count,
+			       struct iatt *stbuf,
+			       struct iobref *iobref);
+	/*
+	 * this method is called after sending an
+	 * enquiry for submit and receiving the
+	 * answer from the locking manager.
+	 */
+	int32_t (*handle_event)(call_frame_t *frame,
+				void *cookie,
+				xlator_t *this,
+				int32_t op_ret,
+				int32_t op_errno,
+				dict_t *dict);
+	loff_t (*get_offset)(call_frame_t *frame);
+	struct iovec *(*get_iovec)(call_frame_t *frame);
+};	
+
 #endif /* __CRYPT_H__ */
+
+/*
+  Local variables:
+  c-indentation-style: "K&R"
+  mode-name: "LC"
+  c-basic-offset: 8
+  tab-width: 8
+  fill-column: 80
+  scroll-step: 1
+  End:
+*/




More information about the cloudfs-devel mailing list