master - scan: use 128K bcache block size
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=b504bb809efa8a3a4db...
Commit: b504bb809efa8a3a4db0e494bb80d3720d82e6ff
Parent: ae093df3f15f6d125bf85ed921e5a971b170dc23
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Thu Feb 15 10:27:43 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:22:46 2018 -0500
scan: use 128K bcache block size
---
lib/label/label.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/lib/label/label.c b/lib/label/label.c
index dc14beb..38db733 100644
--- a/lib/label/label.c
+++ b/lib/label/label.c
@@ -253,7 +253,7 @@ struct label *label_create(struct labeller *labeller)
/* global variable for accessing the bcache populated by label scan */
struct bcache *scan_bcache;
-#define BCACHE_BLOCK_SIZE_IN_SECTORS 2048 /* 1MB */
+#define BCACHE_BLOCK_SIZE_IN_SECTORS 256 /* 256*512 = 128K */
static bool _in_bcache(struct device *dev)
{
6 years, 1 month
master - [device/bcache] Initial code drop.
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=acb42ec4652450f7cd7...
Commit: acb42ec4652450f7cd73440929fd57f1487282bf
Parent: 00f1b208a1bf44665ec97a791355b1fcf525a3a7
Author: Joe Thornber <ejt(a)redhat.com>
AuthorDate: Tue Jan 30 10:46:08 2018 +0000
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:10:45 2018 -0500
[device/bcache] Initial code drop.
Compiles. Not written tests yet.
---
lib/device/bcache.c | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++
lib/device/bcache.h | 83 +++++
2 files changed, 916 insertions(+), 0 deletions(-)
diff --git a/lib/device/bcache.c b/lib/device/bcache.c
new file mode 100644
index 0000000..1be626c
--- /dev/null
+++ b/lib/device/bcache.c
@@ -0,0 +1,833 @@
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <libaio.h>
+#include <unistd.h>
+#include <linux/fs.h>
+#include <sys/ioctl.h>
+#include <sys/user.h>
+
+#include "bcache.h"
+#include "dm-logging.h"
+#include "log.h"
+
+#define SECTOR_SHIFT 9L
+
+//----------------------------------------------------------------
+
+// Assumes the list is not empty.
+static inline struct dm_list *_list_pop(struct dm_list *head)
+{
+ struct dm_list *l;
+
+ l = head->n;
+ dm_list_del(l);
+ return l;
+}
+
+//----------------------------------------------------------------
+
+struct control_block {
+ struct dm_list list;
+ void *context;
+ struct iocb cb;
+};
+
+struct cb_set {
+ struct dm_list free;
+ struct dm_list allocated;
+ struct control_block *vec;
+} control_block_set;
+
+static struct cb_set *_cb_set_create(unsigned nr)
+{
+ int i;
+ struct cb_set *cbs = malloc(sizeof(*cbs));
+
+ if (!cbs)
+ return NULL;
+
+ cbs->vec = malloc(nr * sizeof(*cbs->vec));
+ if (!cbs->vec) {
+ free(cbs);
+ return NULL;
+ }
+
+ dm_list_init(&cbs->free);
+ dm_list_init(&cbs->allocated);
+
+ for (i = 0; i < nr; i++)
+ dm_list_add(&cbs->free, &cbs->vec[i].list);
+
+ return cbs;
+}
+
+static bool _cb_set_destroy(struct cb_set *cbs)
+{
+ if (!dm_list_empty(&cbs->allocated)) {
+ // FIXME: I think we should propogate this up.
+ log_error("async io still in flight");
+ return false;
+ }
+
+ free(cbs->vec);
+ free(cbs);
+ return 0;
+}
+
+static struct control_block *_cb_alloc(struct cb_set *cbs, void *context)
+{
+ struct control_block *cb;
+
+ if (dm_list_empty(&cbs->free))
+ return NULL;
+
+ cb = dm_list_item(_list_pop(&cbs->free), struct control_block);
+ cb->context = context;
+ dm_list_add(&cbs->allocated, &cb->list);
+
+ return cb;
+}
+
+static void _cb_free(struct cb_set *cbs, struct control_block *cb)
+{
+ dm_list_del(&cb->list);
+ dm_list_add_h(&cbs->free, &cb->list);
+}
+
+static struct control_block *_iocb_to_cb(struct iocb *icb)
+{
+ return dm_list_struct_base(icb, struct control_block, cb);
+}
+
+//----------------------------------------------------------------
+
+// FIXME: write a sync engine too
+enum dir {
+ DIR_READ,
+ DIR_WRITE
+};
+
+struct io_engine {
+ io_context_t aio_context;
+ struct cb_set *cbs;
+};
+
+static struct io_engine *_engine_create(unsigned max_io)
+{
+ int r;
+ struct io_engine *e = malloc(sizeof(*e));
+
+ if (!e)
+ return NULL;
+
+ e->aio_context = 0;
+ r = io_setup(max_io, &e->aio_context);
+ if (r < 0) {
+ log_warn("io_setup failed");
+ return NULL;
+ }
+
+ e->cbs = _cb_set_create(max_io);
+ if (!e->cbs) {
+ log_warn("couldn't create control block set");
+ free(e);
+ return NULL;
+ }
+
+ return e;
+}
+
+static void _engine_destroy(struct io_engine *e)
+{
+ _cb_set_destroy(e->cbs);
+ io_destroy(e->aio_context);
+ free(e);
+}
+
+static bool _engine_issue(struct io_engine *e, int fd, enum dir d,
+ sector_t sb, sector_t se, void *data, void *context)
+{
+ int r;
+ struct iocb *cb_array[1];
+ struct control_block *cb;
+
+ if (((uint64_t) data) & (PAGE_SIZE - 1)) {
+ log_err("misaligned data buffer");
+ return false;
+ }
+
+ cb = _cb_alloc(e->cbs, context);
+ if (!cb) {
+ log_err("couldn't allocate control block");
+ return false;
+ }
+
+ memset(&cb->cb, 0, sizeof(cb->cb));
+
+ cb->cb.aio_fildes = (int) fd;
+ cb->cb.u.c.buf = data;
+ cb->cb.u.c.offset = sb << SECTOR_SHIFT;
+ cb->cb.u.c.nbytes = (se - sb) << SECTOR_SHIFT;
+ cb->cb.aio_lio_opcode = (d == DIR_READ) ? IO_CMD_PREAD : IO_CMD_PWRITE;
+
+ cb_array[0] = &cb->cb;
+ r = io_submit(e->aio_context, 1, cb_array);
+ if (r < 0) {
+ log_sys_error("io_submit", "");
+ _cb_free(e->cbs, cb);
+ return false;
+ }
+
+ return true;
+}
+
+#define MAX_IO 64
+typedef void complete_fn(void *context, int io_error);
+
+static bool _engine_wait(struct io_engine *e, complete_fn fn)
+{
+ int i, r;
+ struct io_event event[MAX_IO];
+ struct control_block *cb;
+
+ memset(&event, 0, sizeof(event));
+ r = io_getevents(e->aio_context, 1, MAX_IO, event, NULL);
+ if (r < 0) {
+ log_sys_error("io_getevents", "");
+ return false;
+ }
+
+ for (i = 0; i < r; i++) {
+ struct io_event *ev = event + i;
+
+ cb = _iocb_to_cb((struct iocb *) ev->obj);
+
+ if (ev->res == cb->cb.u.c.nbytes)
+ fn((void *) cb->context, 0);
+
+ else if ((int) ev->res < 0)
+ fn(cb->context, (int) ev->res);
+
+ else {
+ log_err("short io");
+ fn(cb->context, -ENODATA);
+ }
+
+ _cb_free(e->cbs, cb);
+ }
+
+ return true;
+}
+
+//----------------------------------------------------------------
+
+#define MIN_BLOCKS 16
+#define WRITEBACK_LOW_THRESHOLD_PERCENT 33
+#define WRITEBACK_HIGH_THRESHOLD_PERCENT 66
+
+//----------------------------------------------------------------
+
+static void *_alloc_aligned(size_t len, size_t alignment)
+{
+ void *result = NULL;
+ int r = posix_memalign(&result, alignment, len);
+ if (r)
+ return NULL;
+
+ return result;
+}
+
+//----------------------------------------------------------------
+
+static bool _test_flags(struct block *b, unsigned bits)
+{
+ return (b->flags & bits) != 0;
+}
+
+static void _set_flags(struct block *b, unsigned bits)
+{
+ b->flags |= bits;
+}
+
+static void _clear_flags(struct block *b, unsigned bits)
+{
+ b->flags &= ~bits;
+}
+
+//----------------------------------------------------------------
+
+enum block_flags {
+ BF_IO_PENDING = (1 << 0),
+ BF_DIRTY = (1 << 1),
+};
+
+struct bcache {
+ int fd;
+ sector_t block_sectors;
+ uint64_t nr_data_blocks;
+ uint64_t nr_cache_blocks;
+
+ struct io_engine *engine;
+
+ void *raw_data;
+ struct block *raw_blocks;
+
+ /*
+ * Lists that categorise the blocks.
+ */
+ unsigned nr_locked;
+ unsigned nr_dirty;
+ unsigned nr_io_pending;
+
+ struct dm_list free;
+ struct dm_list errored;
+ struct dm_list dirty;
+ struct dm_list clean;
+ struct dm_list io_pending;
+
+ /*
+ * Hash table.
+ */
+ unsigned nr_buckets;
+ unsigned hash_mask;
+ struct dm_list *buckets;
+
+ /*
+ * Statistics
+ */
+ unsigned read_hits;
+ unsigned read_misses;
+ unsigned write_zeroes;
+ unsigned write_hits;
+ unsigned write_misses;
+ unsigned prefetches;
+};
+
+//----------------------------------------------------------------
+
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
+
+static unsigned _hash(struct bcache *cache, int fd, uint64_t index)
+{
+ uint64_t h = (index << 10) & fd;
+ h *= GOLDEN_RATIO_PRIME_64;
+ return h & cache->hash_mask;
+}
+
+static struct block *_hash_lookup(struct bcache *cache, int fd, uint64_t index)
+{
+ struct block *b;
+ unsigned h = _hash(cache, fd, index);
+
+ dm_list_iterate_items_gen (b, cache->buckets + h, hash)
+ if (b->index == index)
+ return b;
+
+ return NULL;
+}
+
+static void _hash_insert(struct block *b)
+{
+ unsigned h = _hash(b->cache, b->fd, b->index);
+ dm_list_add_h(b->cache->buckets + h, &b->hash);
+}
+
+static void _hash_remove(struct block *b)
+{
+ dm_list_del(&b->hash);
+}
+
+/*
+ * Must return a power of 2.
+ */
+static unsigned _calc_nr_buckets(unsigned nr_blocks)
+{
+ unsigned r = 8;
+ unsigned n = nr_blocks / 4;
+
+ if (n < 8)
+ n = 8;
+
+ while (r < n)
+ r <<= 1;
+
+ return r;
+}
+
+static int _hash_table_init(struct bcache *cache, unsigned nr_entries)
+{
+ unsigned i;
+
+ cache->nr_buckets = _calc_nr_buckets(nr_entries);
+ cache->hash_mask = cache->nr_buckets - 1;
+ cache->buckets = malloc(cache->nr_buckets * sizeof(*cache->buckets));
+ if (!cache->buckets)
+ return -ENOMEM;
+
+ for (i = 0; i < cache->nr_buckets; i++)
+ dm_list_init(cache->buckets + i);
+
+ return 0;
+}
+
+static void _hash_table_exit(struct bcache *cache)
+{
+ free(cache->buckets);
+}
+
+//----------------------------------------------------------------
+
+static int _init_free_list(struct bcache *cache, unsigned count)
+{
+ unsigned i;
+ size_t block_size = cache->block_sectors << SECTOR_SHIFT;
+ unsigned char *data =
+ (unsigned char *) _alloc_aligned(count * block_size, PAGE_SIZE);
+
+ /* Allocate the data for each block. We page align the data. */
+ if (!data)
+ return -ENOMEM;
+
+ cache->raw_data = data;
+ cache->raw_blocks = malloc(count * sizeof(*cache->raw_blocks));
+
+ if (!cache->raw_blocks)
+ free(cache->raw_data);
+
+ for (i = 0; i < count; i++) {
+ struct block *b = cache->raw_blocks + i;
+ b->cache = cache;
+ b->data = data + (block_size * i);
+ dm_list_add(&cache->free, &b->list);
+ }
+
+ return 0;
+}
+
+static void _exit_free_list(struct bcache *cache)
+{
+ free(cache->raw_data);
+ free(cache->raw_blocks);
+}
+
+static struct block *_alloc_block(struct bcache *cache)
+{
+ struct block *b = dm_list_struct_base(_list_pop(&cache->free), struct block, list);
+ return b;
+}
+
+/*----------------------------------------------------------------
+ * Clean/dirty list management.
+ * Always use these methods to ensure nr_dirty_ is correct.
+ *--------------------------------------------------------------*/
+
+static void _unlink_block(struct block *b)
+{
+ if (_test_flags(b, BF_DIRTY))
+ b->cache->nr_dirty--;
+
+ dm_list_del(&b->list);
+}
+
+static void _link_block(struct block *b)
+{
+ struct bcache *cache = b->cache;
+
+ if (_test_flags(b, BF_DIRTY)) {
+ dm_list_add(&cache->dirty, &b->list);
+ cache->nr_dirty++;
+ } else
+ dm_list_add(&cache->clean, &b->list);
+}
+
+static void _relink(struct block *b)
+{
+ _unlink_block(b);
+ _link_block(b);
+}
+
+/*----------------------------------------------------------------
+ * Low level IO handling
+ *
+ * We cannot have two concurrent writes on the same block.
+ * eg, background writeback, put with dirty, flush?
+ *
+ * To avoid this we introduce some restrictions:
+ *
+ * i) A held block can never be written back.
+ * ii) You cannot get a block until writeback has completed.
+ *
+ *--------------------------------------------------------------*/
+
+/*
+ * |b->list| should be valid (either pointing to itself, on one of the other
+ * lists.
+ */
+static bool _issue_low_level(struct block *b, enum dir d)
+{
+ struct bcache *cache = b->cache;
+ sector_t sb = b->index * cache->block_sectors;
+ sector_t se = sb + cache->block_sectors;
+
+ if (_test_flags(b, BF_IO_PENDING))
+ return false;
+
+ _set_flags(b, BF_IO_PENDING);
+ return _engine_issue(cache->engine, cache->fd, d, sb, se, b->data, b);
+}
+
+static inline bool _issue_read(struct block *b)
+{
+ return _issue_low_level(b, DIR_READ);
+}
+
+static inline bool _issue_write(struct block *b)
+{
+ return _issue_low_level(b, DIR_WRITE);
+}
+
+static void _complete_io(void *context, int err)
+{
+ struct block *b = context;
+ struct bcache *cache = b->cache;
+
+ b->error = err;
+ _clear_flags(b, BF_IO_PENDING);
+ cache->nr_io_pending--;
+
+ /*
+ * b is on the io_pending list, so we don't want to use unlink_block.
+ * Which would incorrectly adjust nr_dirty.
+ */
+ dm_list_del(&b->list);
+
+ if (b->error)
+ dm_list_add(&cache->errored, &b->list);
+
+ else {
+ _clear_flags(b, BF_DIRTY);
+ _link_block(b);
+ }
+}
+
+static int _wait_io(struct bcache *cache)
+{
+ return _engine_wait(cache->engine, _complete_io);
+}
+
+/*----------------------------------------------------------------
+ * High level IO handling
+ *--------------------------------------------------------------*/
+
+static void _wait_all(struct bcache *cache)
+{
+ while (!dm_list_empty(&cache->io_pending))
+ _wait_io(cache);
+}
+
+static void _wait_specific(struct block *b)
+{
+ while (_test_flags(b, BF_IO_PENDING))
+ _wait_io(b->cache);
+}
+
+static unsigned _writeback(struct bcache *cache, unsigned count)
+{
+ unsigned actual = 0;
+ struct block *b, *tmp;
+
+ dm_list_iterate_items_gen_safe (b, tmp, &cache->dirty, list) {
+ if (actual == count)
+ break;
+
+ // We can't writeback anything that's still in use.
+ if (!b->ref_count) {
+ _issue_write(b);
+ actual++;
+ }
+ }
+
+ return actual;
+}
+
+/*----------------------------------------------------------------
+ * High level allocation
+ *--------------------------------------------------------------*/
+
+static struct block *_find_unused_clean_block(struct bcache *cache)
+{
+ struct block *b;
+
+ dm_list_iterate_items (b, &cache->clean) {
+ if (!b->ref_count) {
+ _unlink_block(b);
+ _hash_remove(b);
+ return b;
+ }
+ }
+
+ return NULL;
+}
+
+static struct block *_new_block(struct bcache *cache, block_address index)
+{
+ struct block *b;
+
+ b = _alloc_block(cache);
+ while (!b && cache->nr_locked < cache->nr_cache_blocks) {
+ b = _find_unused_clean_block(cache);
+ if (!b) {
+ if (dm_list_empty(&cache->io_pending))
+ _writeback(cache, 16);
+ _wait_io(cache);
+ }
+ }
+
+ if (b) {
+ dm_list_init(&b->list);
+ dm_list_init(&b->hash);
+ b->flags = 0;
+ b->index = index;
+ b->ref_count = 0;
+ b->error = 0;
+
+ _hash_insert(b);
+ }
+
+ return b;
+}
+
+/*----------------------------------------------------------------
+ * Block reference counting
+ *--------------------------------------------------------------*/
+static void _zero_block(struct block *b)
+{
+ b->cache->write_zeroes++;
+ memset(b->data, 0, b->cache->block_sectors << SECTOR_SHIFT);
+ _set_flags(b, BF_DIRTY);
+}
+
+static void _hit(struct block *b, unsigned flags)
+{
+ struct bcache *cache = b->cache;
+
+ if (flags & (GF_ZERO | GF_DIRTY))
+ cache->write_hits++;
+ else
+ cache->read_hits++;
+
+ _relink(b);
+}
+
+static void _miss(struct bcache *cache, unsigned flags)
+{
+ if (flags & (GF_ZERO | GF_DIRTY))
+ cache->write_misses++;
+ else
+ cache->read_misses++;
+}
+
+static struct block *_lookup_or_read_block(struct bcache *cache,
+ int fd, block_address index,
+ unsigned flags)
+{
+ struct block *b = _hash_lookup(cache, fd, index);
+
+ if (b) {
+ // FIXME: this is insufficient. We need to also catch a read
+ // lock of a write locked block. Ref count needs to distinguish.
+ if (b->ref_count && (flags & (GF_DIRTY | GF_ZERO))) {
+ log_err("concurrent write lock attempted");
+ return NULL;
+ }
+
+ if (_test_flags(b, BF_IO_PENDING)) {
+ _miss(cache, flags);
+ _wait_specific(b);
+
+ } else
+ _hit(b, flags);
+
+ _unlink_block(b);
+
+ if (flags & GF_ZERO)
+ _zero_block(b);
+
+ } else {
+ _miss(cache, flags);
+
+ b = _new_block(cache, index);
+ if (b) {
+ if (flags & GF_ZERO)
+ _zero_block(b);
+
+ else {
+ _issue_read(b);
+ _wait_specific(b);
+
+ // we know the block is clean and unerrored.
+ _unlink_block(b);
+ }
+ }
+ }
+
+ if (b && !b->error) {
+ if (flags & (GF_DIRTY | GF_ZERO))
+ _set_flags(b, BF_DIRTY);
+
+ _link_block(b);
+ return b;
+ }
+
+ return NULL;
+}
+
+static void _preemptive_writeback(struct bcache *cache)
+{
+ // FIXME: this ignores those blocks that are in the error state. Track
+ // nr_clean instead?
+ unsigned nr_available = cache->nr_cache_blocks - (cache->nr_dirty - cache->nr_io_pending);
+ if (nr_available < (WRITEBACK_LOW_THRESHOLD_PERCENT * cache->nr_cache_blocks / 100))
+ _writeback(cache, (WRITEBACK_HIGH_THRESHOLD_PERCENT * cache->nr_cache_blocks / 100) - nr_available);
+
+}
+
+/*----------------------------------------------------------------
+ * Public interface
+ *--------------------------------------------------------------*/
+struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks)
+{
+ int r;
+ struct bcache *cache;
+
+ cache = malloc(sizeof(*cache));
+ if (!cache)
+ return NULL;
+
+ cache->block_sectors = block_sectors;
+ cache->nr_cache_blocks = nr_cache_blocks;
+
+ cache->engine = _engine_create(nr_cache_blocks < 1024u ? nr_cache_blocks : 1024u);
+ if (!cache->engine) {
+ free(cache);
+ return NULL;
+ }
+
+ cache->nr_locked = 0;
+ cache->nr_dirty = 0;
+ cache->nr_io_pending = 0;
+
+ dm_list_init(&cache->free);
+ dm_list_init(&cache->errored);
+ dm_list_init(&cache->dirty);
+ dm_list_init(&cache->clean);
+ dm_list_init(&cache->io_pending);
+
+ if (_hash_table_init(cache, nr_cache_blocks)) {
+ _engine_destroy(cache->engine);
+ free(cache);
+ }
+
+ cache->read_hits = 0;
+ cache->read_misses = 0;
+ cache->write_zeroes = 0;
+ cache->write_hits = 0;
+ cache->write_misses = 0;
+ cache->prefetches = 0;
+
+ r = _init_free_list(cache, nr_cache_blocks);
+ if (r) {
+ _engine_destroy(cache->engine);
+ _hash_table_exit(cache);
+ free(cache);
+ }
+
+ return cache;
+}
+
+void bcache_destroy(struct bcache *cache)
+{
+ if (cache->nr_locked)
+ log_warn("some blocks are still locked\n");
+
+ bcache_flush(cache);
+ _wait_all(cache);
+ _exit_free_list(cache);
+ _hash_table_exit(cache);
+ _engine_destroy(cache->engine);
+ free(cache);
+}
+
+void bcache_prefetch(struct bcache *cache, int fd, block_address index)
+{
+ struct block *b = _hash_lookup(cache, fd, index);
+
+ if (!b) {
+ cache->prefetches++;
+
+ b = _new_block(cache, index);
+ if (b)
+ _issue_read(b);
+ }
+}
+
+bool bcache_get(struct bcache *cache, int fd, block_address index,
+ unsigned flags, struct block **result)
+{
+ struct block *b = _lookup_or_read_block(cache, fd, index, flags);
+ if (b) {
+ if (!b->ref_count)
+ cache->nr_locked++;
+ b->ref_count++;
+
+ *result = b;
+ return true;
+ }
+
+ *result = NULL;
+ log_err("couldn't get block");
+ return false;
+}
+
+void bcache_put(struct block *b)
+{
+ if (!b->ref_count) {
+ log_err("ref count on bcache block already zero");
+ return;
+ }
+
+ b->ref_count--;
+ if (!b->ref_count)
+ b->cache->nr_locked--;
+
+ if (_test_flags(b, BF_DIRTY))
+ _preemptive_writeback(b->cache);
+}
+
+int bcache_flush(struct bcache *cache)
+{
+ while (!dm_list_empty(&cache->dirty)) {
+ struct block *b = dm_list_item(_list_pop(&cache->dirty), struct block);
+ if (b->ref_count || _test_flags(b, BF_IO_PENDING))
+ // The superblock may well be still locked.
+ continue;
+
+ _issue_write(b);
+ }
+
+ _wait_all(cache);
+
+ return dm_list_empty(&cache->errored) ? 0 : -EIO;
+}
+
+//----------------------------------------------------------------
+
diff --git a/lib/device/bcache.h b/lib/device/bcache.h
new file mode 100644
index 0000000..1f4262e
--- /dev/null
+++ b/lib/device/bcache.h
@@ -0,0 +1,83 @@
+#ifndef BCACHE_H
+#define BCACHE_H
+
+#include <stdint.h>
+
+#include "libdevmapper.h"
+
+/*----------------------------------------------------------------*/
+
+typedef uint64_t block_address;
+typedef uint64_t sector_t;
+
+struct bcache;
+struct block {
+ /* clients may only access these three fields */
+ int fd;
+ uint64_t index;
+ void *data;
+
+ struct bcache *cache;
+ struct dm_list list;
+ struct dm_list hash;
+
+ unsigned flags;
+ unsigned ref_count;
+ int error;
+};
+
+struct bcache *bcache_create(sector_t block_size, unsigned nr_cache_blocks);
+void bcache_destroy(struct bcache *cache);
+
+enum bcache_get_flags {
+ /*
+ * The block will be zeroed before get_block returns it. This
+ * potentially avoids a read if the block is not already in the cache.
+ * GF_DIRTY is implicit.
+ */
+ GF_ZERO = (1 << 0),
+
+ /*
+ * Indicates the caller is intending to change the data in the block, a
+ * writeback will occur after the block is released.
+ */
+ GF_DIRTY = (1 << 1)
+};
+
+typedef uint64_t block_address;
+
+unsigned bcache_get_max_prefetches(struct bcache *cache);
+
+/*
+ * Use the prefetch method to take advantage of asynchronous IO. For example,
+ * if you wanted to read a block from many devices concurrently you'd do
+ * something like this:
+ *
+ * dm_list_iterate_items (dev, &devices)
+ * bcache_prefetch(cache, dev->fd, block);
+ *
+ * dm_list_iterate_items (dev, &devices) {
+ * if (!bcache_get(cache, dev->fd, block, &b))
+ * fail();
+ *
+ * process_block(b);
+ * }
+ *
+ * It's slightly sub optimal, since you may not run the gets in the order that
+ * they complete. But we're talking a very small difference, and it's worth it
+ * to keep callbacks out of this interface.
+ */
+void bcache_prefetch(struct bcache *cache, int fd, block_address index);
+
+/*
+ * Returns true on success.
+ */
+bool bcache_get(struct bcache *cache, int fd, block_address index,
+ unsigned flags, struct block **result);
+void bcache_put(struct block *b);
+
+int bcache_flush(struct bcache *cache);
+
+/*----------------------------------------------------------------*/
+
+#endif
6 years, 1 month
master - test: vgsplit-usage if LVM1 tests
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=ae093df3f15f6d125bf...
Commit: ae093df3f15f6d125bf85ed921e5a971b170dc23
Parent: d75aa557845e37f5c4b90ca43c81943bd9b90094
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Thu Feb 15 10:00:07 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:22:46 2018 -0500
test: vgsplit-usage if LVM1 tests
---
test/shell/vgsplit-usage.sh | 2 ++
1 files changed, 2 insertions(+), 0 deletions(-)
diff --git a/test/shell/vgsplit-usage.sh b/test/shell/vgsplit-usage.sh
index 98818ab..a112e86 100644
--- a/test/shell/vgsplit-usage.sh
+++ b/test/shell/vgsplit-usage.sh
@@ -184,6 +184,7 @@ check pvlv_counts $vg1 2 1 0
vgremove -f $vg1
# vgsplit rejects split because metadata types differ
+if test -n "$LVM_TEST_LVM1" ; then
pvcreate -ff -M1 "$dev3" "$dev4"
pvcreate -ff "$dev1" "$dev2"
vgcreate -M1 $vg1 "$dev3" "$dev4"
@@ -192,3 +193,4 @@ not vgsplit $vg1 $vg2 "$dev3" 2>err;
grep "Metadata types differ" err
vgremove -f $vg1 $vg2
fi
+fi
6 years, 1 month
master - disable LVM1 tests
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=d75aa557845e37f5c4b...
Commit: d75aa557845e37f5c4b90ca43c81943bd9b90094
Parent: 96a61337b00a250f69e7a8e6ac390c47c36c2c0f
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Thu Feb 15 09:54:12 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:22:46 2018 -0500
disable LVM1 tests
---
test/lib/flavour-ndev-cluster.sh | 1 -
test/lib/flavour-ndev-vanilla.sh | 1 -
test/lib/flavour-udev-cluster.sh | 1 -
test/lib/flavour-udev-vanilla.sh | 1 -
4 files changed, 0 insertions(+), 4 deletions(-)
diff --git a/test/lib/flavour-ndev-cluster.sh b/test/lib/flavour-ndev-cluster.sh
index 3082b11..3629069 100644
--- a/test/lib/flavour-ndev-cluster.sh
+++ b/test/lib/flavour-ndev-cluster.sh
@@ -1,2 +1 @@
export LVM_TEST_LOCKING=3
-export LVM_TEST_LVM1=1
diff --git a/test/lib/flavour-ndev-vanilla.sh b/test/lib/flavour-ndev-vanilla.sh
index c106e61..1899c94 100644
--- a/test/lib/flavour-ndev-vanilla.sh
+++ b/test/lib/flavour-ndev-vanilla.sh
@@ -1,2 +1 @@
export LVM_TEST_LOCKING=1
-export LVM_TEST_LVM1=1
diff --git a/test/lib/flavour-udev-cluster.sh b/test/lib/flavour-udev-cluster.sh
index 1cab558..a9025a6 100644
--- a/test/lib/flavour-udev-cluster.sh
+++ b/test/lib/flavour-udev-cluster.sh
@@ -1,3 +1,2 @@
export LVM_TEST_LOCKING=3
export LVM_TEST_DEVDIR=/dev
-export LVM_TEST_LVM1=1
diff --git a/test/lib/flavour-udev-vanilla.sh b/test/lib/flavour-udev-vanilla.sh
index 6fbdafe..ca778a6 100644
--- a/test/lib/flavour-udev-vanilla.sh
+++ b/test/lib/flavour-udev-vanilla.sh
@@ -1,3 +1,2 @@
export LVM_TEST_LOCKING=1
export LVM_TEST_DEVDIR=/dev
-export LVM_TEST_LVM1=1
6 years, 1 month
master - lvmdiskscan: use the new label_scan
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=96a61337b00a250f69e...
Commit: 96a61337b00a250f69e7a8e6ac390c47c36c2c0f
Parent: 28255e3eeef13a0e73a40d533623b22dd9db89cb
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Feb 14 16:43:26 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:22:46 2018 -0500
lvmdiskscan: use the new label_scan
instead of doing it's own.
---
tools/lvmdiskscan.c | 7 +++----
1 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/tools/lvmdiskscan.c b/tools/lvmdiskscan.c
index cb688b5..7e2fc88 100644
--- a/tools/lvmdiskscan.c
+++ b/tools/lvmdiskscan.c
@@ -87,7 +87,6 @@ int lvmdiskscan(struct cmd_context *cmd, int argc __attribute__((unused)),
uint64_t size;
struct dev_iter *iter;
struct device *dev;
- struct label *label;
/* initialise these here to avoid problems with the lvm shell */
disks_found = 0;
@@ -105,10 +104,10 @@ int lvmdiskscan(struct cmd_context *cmd, int argc __attribute__((unused)),
return ECMD_FAILED;
}
- /* Do scan */
+ label_scan(cmd);
+
for (dev = dev_iter_get(iter); dev; dev = dev_iter_get(iter)) {
- /* Try if it is a PV first */
- if ((label_read(dev, &label, UINT64_C(0)))) {
+ if (lvmcache_has_dev_info(dev)) {
if (!dev_get_size(dev, &size)) {
log_error("Couldn't get size of \"%s\"",
dev_name(dev));
6 years, 1 month
master - scan: always setup bcache for commands using lvmetad
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=28255e3eeef13a0e73a...
Commit: 28255e3eeef13a0e73a40d533623b22dd9db89cb
Parent: f328532f05877fc04e7f67c751ef95a844831b9b
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Feb 14 16:21:27 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:22:46 2018 -0500
scan: always setup bcache for commands using lvmetad
Do this at the start of the command so that it doesn't
need to be checked and set up in every function that
could need it.
---
lib/cache/lvmcache.c | 5 ++++-
lib/label/label.c | 29 +++++++++++++++++------------
lib/label/label.h | 1 +
3 files changed, 22 insertions(+), 13 deletions(-)
diff --git a/lib/cache/lvmcache.c b/lib/cache/lvmcache.c
index 87bcc37..53254f4 100644
--- a/lib/cache/lvmcache.c
+++ b/lib/cache/lvmcache.c
@@ -1241,8 +1241,11 @@ int lvmcache_label_scan(struct cmd_context *cmd)
int r = 0;
- if (lvmetad_used())
+ if (lvmetad_used()) {
+ if (!label_scan_setup_bcache())
+ return 0;
return 1;
+ }
/* Avoid recursion when a PVID can't be found! */
if (_scanning_in_progress)
diff --git a/lib/label/label.c b/lib/label/label.c
index 3359b4d..dc14beb 100644
--- a/lib/label/label.c
+++ b/lib/label/label.c
@@ -678,18 +678,6 @@ int label_scan_devs(struct cmd_context *cmd, struct dm_list *devs)
{
struct device_list *devl;
- if (!scan_bcache) {
- /*
- * This is only needed when commands are using lvmetad, in
- * which case they don't do an initial label_scan, but may
- * later need to rescan certain devs from disk and call this
- * function.
- * FIXME: is there some better number to choose here?
- */
- if (!_setup_bcache(32))
- return 0;
- }
-
dm_list_iterate_items(devl, devs) {
if (_in_bcache(devl->dev)) {
bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
@@ -841,3 +829,20 @@ void label_scan_confirm(struct device *dev)
label_read(dev, NULL, 0);
}
+/*
+ * This is only needed when commands are using lvmetad, in which case they
+ * don't do an initial label_scan, but may later need to rescan certain devs
+ * from disk and call this function. FIXME: is there some better number to
+ * choose here?
+ */
+
+int label_scan_setup_bcache(void)
+{
+ if (!scan_bcache) {
+ if (!_setup_bcache(32))
+ return 0;
+ }
+
+ return 1;
+}
+
diff --git a/lib/label/label.h b/lib/label/label.h
index e265a6b..107bd30 100644
--- a/lib/label/label.h
+++ b/lib/label/label.h
@@ -110,5 +110,6 @@ void label_scan_destroy(struct cmd_context *cmd);
int label_read(struct device *dev, struct label **labelp, uint64_t unused_sector);
int label_read_sector(struct device *dev, struct label **labelp, uint64_t scan_sector);
void label_scan_confirm(struct device *dev);
+int label_scan_setup_bcache(void);
#endif
6 years, 1 month
master - scan: leave the caller's dev list unchanged
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=f328532f05877fc04e7...
Commit: f328532f05877fc04e7f67c751ef95a844831b9b
Parent: 7bce66c5e83296398e2eee99140b3d6e409236c9
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Feb 14 16:15:30 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:22:46 2018 -0500
scan: leave the caller's dev list unchanged
When scanning the list of devs from the caller
they are moved to another temporary list, but
were never returned to the original list.
---
lib/label/label.c | 2 ++
1 files changed, 2 insertions(+), 0 deletions(-)
diff --git a/lib/label/label.c b/lib/label/label.c
index bf1070c..3359b4d 100644
--- a/lib/label/label.c
+++ b/lib/label/label.c
@@ -567,6 +567,8 @@ static int _scan_list(struct dm_list *devs, int *failed)
if (failed)
*failed = scan_failed_count;
+ dm_list_splice(devs, &done_devs);
+
return 1;
}
6 years, 1 month
master - scan: setup bcache for commands using lvmetad
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=7bce66c5e83296398e2...
Commit: 7bce66c5e83296398e2eee99140b3d6e409236c9
Parent: 6e580465b50edcd5fef0eb95180a620cb785d835
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Feb 14 15:45:31 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:22:46 2018 -0500
scan: setup bcache for commands using lvmetad
Commands using lvmetad will not begin with a proper
label_scan which initializes bcache, but may later
decide they need to scan a set of devs, in which case
they'll need bcache set up at that point.
---
lib/label/label.c | 66 +++++++++++++++++++++++++++++++++++++---------------
1 files changed, 47 insertions(+), 19 deletions(-)
diff --git a/lib/label/label.c b/lib/label/label.c
index 19beecf..bf1070c 100644
--- a/lib/label/label.c
+++ b/lib/label/label.c
@@ -570,6 +570,38 @@ static int _scan_list(struct dm_list *devs, int *failed)
return 1;
}
+static int _setup_bcache(int cache_blocks)
+{
+ struct io_engine *ioe;
+
+ /* No devices can happen, just create bcache with any small number. */
+ if (!cache_blocks)
+ cache_blocks = 8;
+
+ /*
+ * 100 is arbitrary, it's the max number of concurrent aio's
+ * possible, i.e, the number of devices that can be read at
+ * once. Should this be configurable?
+ */
+ if (!(ioe = create_async_io_engine(100))) {
+ log_error("Failed to create bcache io engine.");
+ return 0;
+ }
+
+ /*
+ * Configure one cache block for each device on the system.
+ * We won't generally need to cache that many because some
+ * of the devs will not be lvm devices, and we don't need
+ * an entry for those. We might want to change this.
+ */
+ if (!(scan_bcache = bcache_create(BCACHE_BLOCK_SIZE_IN_SECTORS, cache_blocks, ioe))) {
+ log_error("Failed to create bcache with %d cache blocks.", cache_blocks);
+ return 0;
+ }
+
+ return 1;
+}
+
/*
* Scan and cache lvm data from all devices on the system.
* The cache should be empty/reset before calling this.
@@ -581,8 +613,6 @@ int label_scan(struct cmd_context *cmd)
struct dev_iter *iter;
struct device_list *devl;
struct device *dev;
- struct io_engine *ioe;
- int cache_blocks;
log_debug_devs("Finding devices to scan");
@@ -621,25 +651,11 @@ int label_scan(struct cmd_context *cmd)
dev_iter_destroy(iter);
if (!scan_bcache) {
- /* No devices can happen, just create bcache with any small number. */
- if (!(cache_blocks = dm_list_size(&all_devs)))
- cache_blocks = 8;
-
- /*
- * 100 is arbitrary, it's the max number of concurrent aio's
- * possible, i.e, the number of devices that can be read at
- * once. Should this be configurable?
- */
- if (!(ioe = create_async_io_engine(100)))
- return 0;
-
/*
- * Configure one cache block for each device on the system.
- * We won't generally need to cache that many because some
- * of the devs will not be lvm devices, and we don't need
- * an entry for those. We might want to change this.
+ * FIXME: there should probably be some max number of
+ * cache blocks we use when setting up bcache.
*/
- if (!(scan_bcache = bcache_create(BCACHE_BLOCK_SIZE_IN_SECTORS, cache_blocks, ioe)))
+ if (!_setup_bcache(dm_list_size(&all_devs)))
return 0;
}
@@ -660,6 +676,18 @@ int label_scan_devs(struct cmd_context *cmd, struct dm_list *devs)
{
struct device_list *devl;
+ if (!scan_bcache) {
+ /*
+ * This is only needed when commands are using lvmetad, in
+ * which case they don't do an initial label_scan, but may
+ * later need to rescan certain devs from disk and call this
+ * function.
+ * FIXME: is there some better number to choose here?
+ */
+ if (!_setup_bcache(32))
+ return 0;
+ }
+
dm_list_iterate_items(devl, devs) {
if (_in_bcache(devl->dev)) {
bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
6 years, 1 month
master - vgremove: fix force remove on devs with damaged metadata
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=6e580465b50edcd5fef...
Commit: 6e580465b50edcd5fef0eb95180a620cb785d835
Parent: 37471bb4777f3f407d2cd942995b45c326ea221a
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Feb 14 14:47:28 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:22:46 2018 -0500
vgremove: fix force remove on devs with damaged metadata
The improved detection of bad metadata when scanning
(where errors were ignored before) means we now have to
override some errors when forcibly erasing damaged metadata.
---
lib/format_text/format-text.c | 22 +++++++++++++++++++---
1 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/lib/format_text/format-text.c b/lib/format_text/format-text.c
index f33451f..8a42213 100644
--- a/lib/format_text/format-text.c
+++ b/lib/format_text/format-text.c
@@ -865,12 +865,28 @@ static int _vg_remove_raw(struct format_instance *fid, struct volume_group *vg,
int r = 0;
int noprecommit = 0;
- if (!(mdah = raw_read_mda_header(fid->fmt, &mdac->area, mda_is_primary(mda))))
- goto_out;
+ if (!(mdah = dm_pool_alloc(fid->fmt->cmd->mem, MDA_HEADER_SIZE))) {
+ log_error("struct mda_header allocation failed");
+ return 0;
+ }
- if (!(rlocn = _read_metadata_location_vg(&mdac->area, mdah, mda_is_primary(mda), vg->name, &noprecommit))) {
+ /*
+ * FIXME: what's the point of reading the mda_header and metadata,
+ * since we zero the rlocn fields whether we can read them or not.
+ */
+
+ if (!_raw_read_mda_header(mdah, &mdac->area, mda_is_primary(mda))) {
+ log_warn("WARNING: Removing metadata location on %s with bad mda header.",
+ dev_name(mdac->area.dev));
rlocn = &mdah->raw_locns[0];
mdah->raw_locns[1].offset = 0;
+ } else {
+ if (!(rlocn = _read_metadata_location_vg(&mdac->area, mdah, mda_is_primary(mda), vg->name, &noprecommit))) {
+ log_warn("WARNING: Removing metadata location on %s with bad metadata.",
+ dev_name(mdac->area.dev));
+ rlocn = &mdah->raw_locns[0];
+ mdah->raw_locns[1].offset = 0;
+ }
}
rlocn->offset = 0;
6 years, 1 month
master - scan: skip extra scan in vg_read
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=37471bb4777f3f407d2...
Commit: 37471bb4777f3f407d2cd942995b45c326ea221a
Parent: e4f478d86d6545f6cced7a8ba3bc0b79dccb7b6e
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Feb 14 13:49:56 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Apr 20 11:22:46 2018 -0500
scan: skip extra scan in vg_read
Drop an extra label scan in the recovery part
of vg_read. This is a temporary improvement
until the pending replacement for the broken
recovery code burried in vg_read.
---
lib/metadata/metadata.c | 2 --
1 files changed, 0 insertions(+), 2 deletions(-)
diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
index 5d3f835..b588a04 100644
--- a/lib/metadata/metadata.c
+++ b/lib/metadata/metadata.c
@@ -4080,8 +4080,6 @@ static struct volume_group *_vg_read(struct cmd_context *cmd,
/* Independent MDAs aren't supported under low memory */
if (!cmd->independent_metadata_areas && prioritized_section())
return_NULL;
- lvmcache_force_next_label_scan();
- lvmcache_label_scan(cmd);
if (!(fmt = lvmcache_fmt_from_vgname(cmd, vgname, vgid, 0)))
return_NULL;
6 years, 1 month