summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAlex Elder <elder@inktank.com>2013-04-21 00:32:07 -0500
committerSage Weil <sage@inktank.com>2013-05-01 21:19:15 -0700
commita9e8ba2cb3eb64cf6cfa509d096ef79bc1c827ae (patch)
tree8822a3411c5353abfdd12341a2cbd91af2ffe03b /drivers
parent0eefd470f034cc18349fa1a9e4fda000e963c4e3 (diff)
rbd: enforce parent overlap
A clone image has a defined overlap point with its parent image. That is the byte offset beyond which the parent image has no defined data to back the clone, and anything thereafter can be viewed as being zero-filled by the clone image. This is needed because a clone image can be resized. If it gets resized larger than the snapshot it is based on, the overlap defines the original size. If the clone gets resized downward below the original size the new clone size defines the overlap. If the clone is subsequently resized to be larger, the overlap won't be increased because the previous resize invalidated any parent data beyond that point. This resolves: http://tracker.ceph.com/issues/4724 Signed-off-by: Alex Elder <elder@inktank.com> Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/rbd.c64
1 files changed, 54 insertions, 10 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index c34719c917b1..ee53d8e52801 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1437,20 +1437,20 @@ static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = NULL;
+ struct rbd_device *rbd_dev = NULL;
bool layered = false;
if (obj_request_img_data_test(obj_request)) {
img_request = obj_request->img_request;
layered = img_request && img_request_layered_test(img_request);
- } else {
- img_request = NULL;
- layered = false;
+ rbd_dev = img_request->rbd_dev;
}
dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
obj_request, img_request, obj_request->result,
obj_request->xferred, obj_request->length);
- if (layered && obj_request->result == -ENOENT)
+ if (layered && obj_request->result == -ENOENT &&
+ obj_request->img_offset < rbd_dev->parent_overlap)
rbd_img_parent_read(obj_request);
else if (img_request)
rbd_img_obj_request_read_callback(obj_request);
@@ -2166,6 +2166,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
length = (u64)1 << rbd_dev->header.obj_order;
/*
+ * There is no defined parent data beyond the parent
+ * overlap, so limit what we read at that boundary if
+ * necessary.
+ */
+ if (img_offset + length > rbd_dev->parent_overlap) {
+ rbd_assert(img_offset < rbd_dev->parent_overlap);
+ length = rbd_dev->parent_overlap - img_offset;
+ }
+
+ /*
* Allocate a page array big enough to receive the data read
* from the parent.
*/
@@ -2325,21 +2335,28 @@ out:
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
+ struct rbd_device *rbd_dev;
bool known;
rbd_assert(obj_request_img_data_test(obj_request));
img_request = obj_request->img_request;
rbd_assert(img_request);
+ rbd_dev = img_request->rbd_dev;
/*
- * Only layered writes need special handling. If it's not a
- * layered write, or it is a layered write but we know the
- * target object exists, it's no different from any other
- * object request.
+ * Only writes to layered images need special handling.
+ * Reads and non-layered writes are simple object requests.
+ * Layered writes that start beyond the end of the overlap
+ * with the parent have no parent data, so they too are
+ * simple object requests. Finally, if the target object is
+ * known to already exist, its parent data has already been
+ * copied, so a write to the object can also be handled as a
+ * simple object request.
*/
if (!img_request_write_test(img_request) ||
!img_request_layered_test(img_request) ||
+ rbd_dev->parent_overlap <= obj_request->img_offset ||
((known = obj_request_known_test(obj_request)) &&
obj_request_exists_test(obj_request))) {
@@ -2386,14 +2403,41 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
{
struct rbd_obj_request *obj_request;
+ struct rbd_device *rbd_dev;
+ u64 obj_end;
rbd_assert(img_request_child_test(img_request));
obj_request = img_request->obj_request;
- rbd_assert(obj_request != NULL);
+ rbd_assert(obj_request);
+ rbd_assert(obj_request->img_request);
+
obj_request->result = img_request->result;
- obj_request->xferred = img_request->xferred;
+ if (obj_request->result)
+ goto out;
+ /*
+ * We need to zero anything beyond the parent overlap
+ * boundary. Since rbd_img_obj_request_read_callback()
+ * will zero anything beyond the end of a short read, an
+ * easy way to do this is to pretend the data from the
+ * parent came up short--ending at the overlap boundary.
+ */
+ rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
+ obj_end = obj_request->img_offset + obj_request->length;
+ rbd_dev = obj_request->img_request->rbd_dev;
+ if (obj_end > rbd_dev->parent_overlap) {
+ u64 xferred = 0;
+
+ if (obj_request->img_offset < rbd_dev->parent_overlap)
+ xferred = rbd_dev->parent_overlap -
+ obj_request->img_offset;
+
+ obj_request->xferred = min(img_request->xferred, xferred);
+ } else {
+ obj_request->xferred = img_request->xferred;
+ }
+out:
rbd_img_obj_request_read_callback(obj_request);
rbd_obj_request_complete(obj_request);
}