From 1f13d75ccb806260079e0679d55d9253e370ec8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= Date: Tue, 3 Nov 2015 16:34:09 +0000 Subject: xen-blkback: only read request operation from shared ring once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A compiler may load a switch statement value multiple times, which could be bad when the value is in memory shared with the frontend. When converting a non-native request to a native one, ensure that src->operation is only loaded once by using READ_ONCE(). This is part of XSA155. CC: stable@vger.kernel.org Signed-off-by: Roger Pau Monné Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/common.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/block') diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 68e87a037b99..c929ae22764c 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, struct blkif_x86_32_request *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; - dst->operation = src->operation; - switch (src->operation) { + dst->operation = READ_ONCE(src->operation); + switch (dst->operation) { case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: @@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, struct blkif_x86_64_request *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; - dst->operation = src->operation; - switch (src->operation) { + dst->operation = READ_ONCE(src->operation); + switch (dst->operation) { case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: -- cgit v1.2.3 From 18779149101c0dd43ded43669ae2a92d21b6f9cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= Date: Tue, 3 Nov 2015 16:40:43 +0000 Subject: xen-blkback: read from indirect descriptors only once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since indirect descriptors are in memory shared with the frontend, the frontend could alter the first_sect and last_sect values after they have been validated but before they are recorded in the request. This may result in I/O requests that overflow the foreign page, possibly overwriting local pages when the I/O request is executed. When parsing indirect descriptors, only read first_sect and last_sect once. This is part of XSA155. CC: stable@vger.kernel.org Signed-off-by: Roger Pau Monné Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers/block') diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index f9099940c272..41fb1a917b17 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, goto unmap; for (n = 0, i = 0; n < nseg; n++) { + uint8_t first_sect, last_sect; + if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { /* Map indirect segments */ if (segments) @@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); } i = n % SEGS_PER_INDIRECT_FRAME; + pending_req->segments[n]->gref = segments[i].gref; - seg[n].nsec = segments[i].last_sect - - segments[i].first_sect + 1; - seg[n].offset = (segments[i].first_sect << 9); - if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || - (segments[i].last_sect < segments[i].first_sect)) { + + first_sect = READ_ONCE(segments[i].first_sect); + last_sect = READ_ONCE(segments[i].last_sect); + if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) { rc = -EINVAL; goto unmap; } + + seg[n].nsec = last_sect - first_sect + 1; + seg[n].offset = first_sect << 9; preq->nr_sects += seg[n].nsec; } -- cgit v1.2.3