diff options
30 files changed, 150 insertions, 149 deletions
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 4016fe1c65a9..73ca89a47f49 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -24,9 +24,6 @@ #define __read_mostly __attribute__((__section__(".data..read_mostly"))) -/* Read-only memory is marked before mark_rodata_ro() is called. */ -#define __ro_after_init __read_mostly - void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ void disable_sr_hashing(void); /* turns off space register hashing */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 0338561968a4..a82b3eaa5398 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -29,9 +29,9 @@ #include <asm/sections.h> #include <asm/shmparam.h> -int split_tlb __read_mostly; -int dcache_stride __read_mostly; -int icache_stride __read_mostly; +int split_tlb __ro_after_init; +int dcache_stride __ro_after_init; +int icache_stride __ro_after_init; EXPORT_SYMBOL(dcache_stride); void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); @@ -51,12 +51,12 @@ DEFINE_SPINLOCK(pa_tlb_flush_lock); DEFINE_SPINLOCK(pa_swapper_pg_lock); #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) -int pa_serialize_tlb_flushes __read_mostly; +int pa_serialize_tlb_flushes __ro_after_init; #endif -struct pdc_cache_info cache_info __read_mostly; +struct pdc_cache_info cache_info __ro_after_init; #ifndef CONFIG_PA20 -static struct pdc_btlb_info btlb_info __read_mostly; +static struct pdc_btlb_info btlb_info __ro_after_init; #endif #ifdef CONFIG_SMP @@ -381,10 +381,10 @@ EXPORT_SYMBOL(flush_data_cache_local); EXPORT_SYMBOL(flush_kernel_icache_range_asm); #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ -static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; +static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD; #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */ -static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD; +static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD; void __init parisc_setup_cache_timing(void) { diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 15e7b3be7b6b..00a181f1ecc6 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -41,7 +41,7 @@ #include <asm/ropes.h> /* See comments in include/asm-parisc/pci.h */ -const struct dma_map_ops *hppa_dma_ops __read_mostly; +const struct dma_map_ops *hppa_dma_ops __ro_after_init; EXPORT_SYMBOL(hppa_dma_ops); static struct device root = { diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 7a17551ea31e..f01e102bbfa2 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -87,7 +87,7 @@ extern unsigned long pdc_result2[NUM_PDC_RESULT]; /* Firmware needs to be initially set to narrow to determine the * actual firmware width. */ -int parisc_narrow_firmware __read_mostly = 1; +int parisc_narrow_firmware __ro_after_init = 1; #endif /* On most currently-supported platforms, IODC I/O calls are 32-bit calls diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index d12de2a13753..951a339369dd 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -376,7 +376,7 @@ smp_slave_stext: ENDPROC(parisc_kernel_start) #ifndef CONFIG_64BIT - .section .data..read_mostly + .section .data..ro_after_init .align 4 .export $global$,data diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index 6f2d611347a1..3f4a91c0b805 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -39,12 +39,12 @@ */ #undef DEBUG_PAT -int pdc_type __read_mostly = PDC_TYPE_ILLEGAL; +int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL; /* cell number and location (PAT firmware only) */ -unsigned long parisc_cell_num __read_mostly; -unsigned long parisc_cell_loc __read_mostly; -unsigned long parisc_pat_pdc_cap __read_mostly; +unsigned long parisc_cell_num __ro_after_init; +unsigned long parisc_cell_loc __ro_after_init; +unsigned long parisc_pat_pdc_cap __ro_after_init; void __init setup_pdc(void) diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index ae684ac6efb6..bc41ca243cfe 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -45,14 +45,14 @@ * #define pci_post_reset_delay 50 */ -struct pci_port_ops *pci_port __read_mostly; -struct pci_bios_ops *pci_bios __read_mostly; +struct pci_port_ops *pci_port __ro_after_init; +struct pci_bios_ops *pci_bios __ro_after_init; -static int pci_hba_count __read_mostly; +static int pci_hba_count __ro_after_init; /* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */ #define PCI_HBA_MAX 32 -static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly; +static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __ro_after_init; /******************************************************************** diff --git a/arch/parisc/kernel/perf_images.h b/arch/parisc/kernel/perf_images.h index 7fef9644df47..c108fee989d9 100644 --- a/arch/parisc/kernel/perf_images.h +++ b/arch/parisc/kernel/perf_images.h @@ -25,7 +25,7 @@ #define PCXU_IMAGE_SIZE 584 -static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = { +static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __ro_after_init = { /* * CPI: * @@ -2093,7 +2093,7 @@ static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = }; #define PCXW_IMAGE_SIZE 576 -static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = { +static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __ro_after_init = { /* * CPI: FROM CPI.IDF (Image 0) * diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 97c206734e24..89e4f4497ffb 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -192,7 +192,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) * QEMU idle the host too. */ -int running_on_qemu __read_mostly; +int running_on_qemu __ro_after_init; EXPORT_SYMBOL(running_on_qemu); void __cpuidle arch_cpu_idle_dead(void) diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index e0a81dedc366..e715871cd4ac 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -43,10 +43,10 @@ #include <asm/irq.h> /* for struct irq_region */ #include <asm/parisc-device.h> -struct system_cpuinfo_parisc boot_cpu_data __read_mostly; +struct system_cpuinfo_parisc boot_cpu_data __ro_after_init; EXPORT_SYMBOL(boot_cpu_data); #ifdef CONFIG_PA8X00 -int _parisc_requires_coherency __read_mostly; +int _parisc_requires_coherency __ro_after_init; EXPORT_SYMBOL(_parisc_requires_coherency); #endif diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index e54d5e4d3489..97ac707c6bff 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -641,7 +641,8 @@ cas_action: 2: stw %r24, 0(%r26) /* Free lock */ #ifdef CONFIG_SMP - LDCW 0(%sr2,%r20), %r1 /* Barrier */ +98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ +99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif stw %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG @@ -658,7 +659,8 @@ cas_action: /* Error occurred on load or store */ /* Free lock */ #ifdef CONFIG_SMP - LDCW 0(%sr2,%r20), %r1 /* Barrier */ +98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ +99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif stw %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG @@ -862,7 +864,8 @@ cas2_action: cas2_end: /* Free lock */ #ifdef CONFIG_SMP - LDCW 0(%sr2,%r20), %r1 /* Barrier */ +98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ +99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif stw %r20, 0(%sr2,%r20) /* Enable interrupts */ @@ -875,7 +878,8 @@ cas2_end: /* Error occurred on load or store */ /* Free lock */ #ifdef CONFIG_SMP - LDCW 0(%sr2,%r20), %r1 /* Barrier */ +98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ +99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif stw %r20, 0(%sr2,%r20) ssm PSW_SM_I, %r0 diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index a1e772f909cb..04508158815c 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -40,7 +40,7 @@ #include <linux/timex.h> -static unsigned long clocktick __read_mostly; /* timer cycles per tick */ +static unsigned long clocktick __ro_after_init; /* timer cycles per tick */ /* * We keep time on PA-RISC Linux by using the Interval Timer which is diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 2d14f17838d2..87ae476d1c4f 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -40,7 +40,7 @@ static DEFINE_SPINLOCK(unwind_lock); * we can call unwind_init as early in the bootup process as * possible (before the slab allocator is initialized) */ -static struct unwind_table kernel_unwind_table __read_mostly; +static struct unwind_table kernel_unwind_table __ro_after_init; static LIST_HEAD(unwind_tables); static inline const struct unwind_table_entry * diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index a8be7a47fcc0..c3b1b9c24ede 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -18,9 +18,6 @@ *(.data..vm0.pgd) \ *(.data..vm0.pte) -/* No __ro_after_init data in the .rodata section - which will always be ro */ -#define RO_AFTER_INIT_DATA - #include <asm-generic/vmlinux.lds.h> /* needed for the processor specific cache alignment size */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 11ec1f1221a6..ddca8287d43b 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -66,7 +66,7 @@ static struct resource pdcdata_resource = { .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; -static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; +static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init; /* The following array is initialized from the firmware specific * information retrieved in kernel/inventory.c. @@ -345,16 +345,7 @@ static void __init setup_bootmem(void) memblock_dump_all(); } -static int __init parisc_text_address(unsigned long vaddr) -{ - static unsigned long head_ptr __initdata; - - if (!head_ptr) - head_ptr = PAGE_MASK & (unsigned long) - dereference_function_descriptor(&parisc_kernel_start); - - return core_kernel_text(vaddr) || vaddr == head_ptr; -} +static bool kernel_set_to_readonly; static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, @@ -372,10 +363,11 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long vaddr; unsigned long ro_start; unsigned long ro_end; - unsigned long kernel_end; + unsigned long kernel_start, kernel_end; ro_start = __pa((unsigned long)_text); ro_end = __pa((unsigned long)&data_start); + kernel_start = __pa((unsigned long)&__init_begin); kernel_end = __pa((unsigned long)&_end); end_paddr = start_paddr + size; @@ -438,26 +430,30 @@ static void __init map_pages(unsigned long start_vaddr, pg_table = (pte_t *) __va(pg_table) + start_pte; for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { pte_t pte; - - if (force) - pte = __mk_pte(address, pgprot); - else if (parisc_text_address(vaddr)) { - pte = __mk_pte(address, PAGE_KERNEL_EXEC); - if (address >= ro_start && address < kernel_end) - pte = pte_mkhuge(pte); + pgprot_t prot; + bool huge = false; + + if (force) { + prot = pgprot; + } else if (address < kernel_start || address >= kernel_end) { + /* outside kernel memory */ + prot = PAGE_KERNEL; + } else if (!kernel_set_to_readonly) { + /* still initializing, allow writing to RO memory */ + prot = PAGE_KERNEL_RWX; + huge = true; + } else if (address >= ro_start) { + /* Code (ro) and Data areas */ + prot = (address < ro_end) ? + PAGE_KERNEL_EXEC : PAGE_KERNEL; + huge = true; + } else { + prot = PAGE_KERNEL; } - else -#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) - if (address >= ro_start && address < ro_end) { - pte = __mk_pte(address, PAGE_KERNEL_EXEC); + + pte = __mk_pte(address, prot); + if (huge) pte = pte_mkhuge(pte); - } else -#endif - { - pte = __mk_pte(address, pgprot); - if (address >= ro_start && address < kernel_end) - pte = pte_mkhuge(pte); - } if (address >= end_paddr) break; @@ -493,6 +489,12 @@ void __ref free_initmem(void) { unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_end = (unsigned long)__init_end; + unsigned long kernel_end = (unsigned long)&_end; + + /* Remap kernel text and data, but do not touch init section yet. */ + kernel_set_to_readonly = true; + map_pages(init_end, __pa(init_end), kernel_end - init_end, + PAGE_KERNEL, 0); /* The init text pages are marked R-X. We have to * flush the icache and mark them RW- @@ -509,7 +511,7 @@ void __ref free_initmem(void) PAGE_KERNEL, 1); /* force the kernel to see the new TLB entries */ - __flush_tlb_range(0, init_begin, init_end); + __flush_tlb_range(0, init_begin, kernel_end); /* finally dump all the instructions which were cached, since the * pages are no-longer executable */ @@ -527,8 +529,9 @@ void mark_rodata_ro(void) { /* rodata memory was already mapped with KERNEL_RO access rights by pagetable_init() and map_pages(). No need to do additional stuff here */ - printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", - (unsigned long)(__end_rodata - __start_rodata) >> 10); + unsigned long roai_size = __end_ro_after_init - __start_ro_after_init; + + pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10); } #endif @@ -554,11 +557,11 @@ void mark_rodata_ro(void) #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ & ~(VM_MAP_OFFSET-1))) -void *parisc_vmalloc_start __read_mostly; +void *parisc_vmalloc_start __ro_after_init; EXPORT_SYMBOL(parisc_vmalloc_start); #ifdef CONFIG_PA11 -unsigned long pcxl_dma_start __read_mostly; +unsigned long pcxl_dma_start __ro_after_init; #endif void __init mem_init(void) @@ -632,7 +635,7 @@ void __init mem_init(void) #endif } -unsigned long *empty_zero_page __read_mostly; +unsigned long *empty_zero_page __ro_after_init; EXPORT_SYMBOL(empty_zero_page); /* diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 991420caa4f2..6a3076881321 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -66,6 +66,7 @@ struct virtio_ccw_device { bool device_lost; unsigned int config_ready; void *airq_info; + u64 dma_mask; }; struct vq_info_block_legacy { @@ -108,7 +109,6 @@ struct virtio_rev_info { struct virtio_ccw_vq_info { struct virtqueue *vq; int num; - void *queue; union { struct vq_info_block s; struct vq_info_block_legacy l; @@ -423,7 +423,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); struct virtio_ccw_vq_info *info = vq->priv; unsigned long flags; - unsigned long size; int ret; unsigned int index = vq->index; @@ -461,8 +460,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) ret, index); vring_del_virtqueue(vq); - size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); - free_pages_exact(info->queue, size); kfree(info->info_block); kfree(info); } @@ -494,8 +491,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, int err; struct virtqueue *vq = NULL; struct virtio_ccw_vq_info *info; - unsigned long size = 0; /* silence the compiler */ + u64 queue; unsigned long flags; + bool may_reduce; /* Allocate queue. */ info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); @@ -516,37 +514,34 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, err = info->num; goto out_err; } - size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); - info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); - if (info->queue == NULL) { - dev_warn(&vcdev->cdev->dev, "no queue\n"); - err = -ENOMEM; - goto out_err; - } + may_reduce = vcdev->revision > 0; + vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, + vdev, true, may_reduce, ctx, + virtio_ccw_kvm_notify, callback, name); - vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, - true, ctx, info->queue, virtio_ccw_kvm_notify, - callback, name); if (!vq) { /* For now, we fail if we can't get the requested size. */ dev_warn(&vcdev->cdev->dev, "no vq\n"); err = -ENOMEM; goto out_err; } + /* it may have been reduced */ + info->num = virtqueue_get_vring_size(vq); /* Register it with the host. */ + queue = virtqueue_get_desc_addr(vq); if (vcdev->revision == 0) { - info->info_block->l.queue = (__u64)info->queue; + info->info_block->l.queue = queue; info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; info->info_block->l.index = i; info->info_block->l.num = info->num; ccw->count = sizeof(info->info_block->l); } else { - info->info_block->s.desc = (__u64)info->queue; + info->info_block->s.desc = queue; info->info_block->s.index = i; info->info_block->s.num = info->num; - info->info_block->s.avail = (__u64)virtqueue_get_avail(vq); - info->info_block->s.used = (__u64)virtqueue_get_used(vq); + info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq); + info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq); ccw->count = sizeof(info->info_block->s); } ccw->cmd_code = CCW_CMD_SET_VQ; @@ -572,8 +567,6 @@ out_err: if (vq) vring_del_virtqueue(vq); if (info) { - if (info->queue) - free_pages_exact(info->queue, size); kfree(info->info_block); } kfree(info); @@ -780,12 +773,8 @@ out_free: static void ccw_transport_features(struct virtio_device *vdev) { /* - * Packed ring isn't enabled on virtio_ccw for now, - * because virtio_ccw uses some legacy accessors, - * e.g. virtqueue_get_avail() and virtqueue_get_used() - * which aren't available in packed ring currently. + * Currently nothing to do here. */ - __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED); } static int virtio_ccw_finalize_features(struct virtio_device *vdev) @@ -1266,6 +1255,16 @@ static int virtio_ccw_online(struct ccw_device *cdev) ret = -ENOMEM; goto out_free; } + + vcdev->vdev.dev.parent = &cdev->dev; + cdev->dev.dma_mask = &vcdev->dma_mask; + /* we are fine with common virtio infrastructure using 64 bit DMA */ + ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n"); + goto out_free; + } + vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), GFP_DMA | GFP_KERNEL); if (!vcdev->config_block) { @@ -1280,7 +1279,6 @@ static int virtio_ccw_online(struct ccw_device *cdev) vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ - vcdev->vdev.dev.parent = &cdev->dev; vcdev->vdev.dev.release = virtio_ccw_release_dev; vcdev->vdev.config = &virtio_ccw_config_ops; vcdev->cdev = cdev; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 618fb6461017..c090d177bd75 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1443,7 +1443,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, tpg->tv_tpg_vhost_count++; tpg->vhost_scsi = vs; vs_tpg[tpg->tport_tpgt] = tpg; - smp_mb__after_atomic(); match = true; } mutex_unlock(&tpg->tv_tpg_mutex); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5df92c308286..0a7b3ce3fb75 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1004,6 +1004,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, if (unlikely(vq->vq.num_free < 1)) { pr_debug("Can't add buf len 1 - avail = 0\n"); + kfree(desc); END_USE(vq); return -ENOSPC; } @@ -1718,10 +1719,10 @@ static inline int virtqueue_add(struct virtqueue *_vq, /** * virtqueue_add_sgs - expose buffers to other end - * @vq: the struct virtqueue we're talking about. + * @_vq: the struct virtqueue we're talking about. * @sgs: array of terminated scatterlists. - * @out_num: the number of scatterlists readable by other side - * @in_num: the number of scatterlists which are writable (after readable ones) + * @out_sgs: the number of scatterlists readable by other side + * @in_sgs: the number of scatterlists which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * @@ -1821,7 +1822,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. - * @vq: the struct virtqueue + * @_vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) @@ -1841,7 +1842,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); /** * virtqueue_notify - second half of split virtqueue_kick call. - * @vq: the struct virtqueue + * @_vq: the struct virtqueue * * This does not need to be serialized. * @@ -1885,8 +1886,9 @@ EXPORT_SYMBOL_GPL(virtqueue_kick); /** * virtqueue_get_buf - get the next used buffer - * @vq: the struct virtqueue we're talking about. + * @_vq: the struct virtqueue we're talking about. * @len: the length written into the buffer + * @ctx: extra context for the token * * If the device wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer @@ -1916,7 +1918,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks - * @vq: the struct virtqueue we're talking about. + * @_vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only * useful as an optimization. @@ -1936,7 +1938,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** * virtqueue_enable_cb_prepare - restart callbacks after disable_cb - * @vq: the struct virtqueue we're talking about. + * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns current queue state * in an opaque unsigned value. This value should be later tested by @@ -1957,7 +1959,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); /** * virtqueue_poll - query pending used buffers - * @vq: the struct virtqueue we're talking about. + * @_vq: the struct virtqueue we're talking about. * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). * * Returns "true" if there are pending used buffers in the queue. @@ -1976,7 +1978,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll); /** * virtqueue_enable_cb - restart callbacks after disable_cb. - * @vq: the struct virtqueue we're talking about. + * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver @@ -1995,7 +1997,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb); /** * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. - * @vq: the struct virtqueue we're talking about. + * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay * interrupts until most of the available buffers have been processed; @@ -2017,7 +2019,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); /** * virtqueue_detach_unused_buf - detach first unused buffer - * @vq: the struct virtqueue we're talking about. + * @_vq: the struct virtqueue we're talking about. * * Returns NULL or the "data" token handed to virtqueue_add_*(). * This is not valid on an active queue; it is useful only for device @@ -2249,7 +2251,7 @@ EXPORT_SYMBOL_GPL(vring_transport_features); /** * virtqueue_get_vring_size - return the size of the virtqueue's vring - * @vq: the struct virtqueue containing the vring of interest. + * @_vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index bbb9e332f2fe..088987e9a3ea 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -332,7 +332,6 @@ __start_rodata = .; \ *(.rodata) *(.rodata.*) \ RO_AFTER_INIT_DATA /* Read only after init */ \ - KEEP(*(__vermagic)) /* Kernel version magic */ \ . = ALIGN(8); \ __start___tracepoints_ptrs = .; \ KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ diff --git a/include/linux/module.h b/include/linux/module.h index 8f75277d4cef..188998d3dca9 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -332,6 +332,7 @@ struct mod_kallsyms { Elf_Sym *symtab; unsigned int num_symtab; char *strtab; + char *typetab; }; #ifdef CONFIG_LIVEPATCH @@ -717,6 +718,17 @@ static inline bool within_module_core(unsigned long addr, return false; } +static inline bool within_module_init(unsigned long addr, + const struct module *mod) +{ + return false; +} + +static inline bool within_module(unsigned long addr, const struct module *mod) +{ + return false; +} + /* Get/put a kernel symbol (calls should be symmetric) */ #define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) #define symbol_put(x) do { } while (0) diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 673fe3ef3607..15f906e4a748 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -90,23 +90,6 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq); dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq); dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq); -/* - * Legacy accessors -- in almost all cases, these are the wrong functions - * to use. - */ -static inline void *virtqueue_get_desc(struct virtqueue *vq) -{ - return virtqueue_get_vring(vq)->desc; -} -static inline void *virtqueue_get_avail(struct virtqueue *vq) -{ - return virtqueue_get_vring(vq)->avail; -} -static inline void *virtqueue_get_used(struct virtqueue *vq) -{ - return virtqueue_get_vring(vq)->used; -} - /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 7510dc687c0d..4b280fc7dd67 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -1033,13 +1033,14 @@ int gdb_serial_stub(struct kgdb_state *ks) return DBG_PASS_EVENT; } #endif + /* Fall through */ case 'C': /* Exception passing */ tmp = gdb_cmd_exception_pass(ks); if (tmp > 0) goto default_handle; if (tmp == 0) break; - /* Fall through on tmp < 0 */ + /* Fall through - on tmp < 0 */ case 'c': /* Continue packet */ case 's': /* Single step packet */ if (kgdb_contthread && kgdb_contthread != current) { @@ -1048,7 +1049,7 @@ int gdb_serial_stub(struct kgdb_state *ks) break; } dbg_activate_sw_breakpoints(); - /* Fall through to default processing */ + /* Fall through - to default processing */ default: default_handle: error = kgdb_arch_handle_exception(ks->ex_vector, @@ -1094,10 +1095,10 @@ int gdbstub_state(struct kgdb_state *ks, char *cmd) return error; case 's': case 'c': - strcpy(remcom_in_buffer, cmd); + strscpy(remcom_in_buffer, cmd, sizeof(remcom_in_buffer)); return 0; case '$': - strcpy(remcom_in_buffer, cmd); + strscpy(remcom_in_buffer, cmd, sizeof(remcom_in_buffer)); gdbstub_use_prev_in_buf = strlen(remcom_in_buffer); gdbstub_prev_in_buf_pos = 0; return 0; diff --git a/kernel/debug/kdb/Makefile b/kernel/debug/kdb/Makefile index d4fc58f4b88d..efac857c5511 100644 --- a/kernel/debug/kdb/Makefile +++ b/kernel/debug/kdb/Makefile @@ -6,7 +6,6 @@ # Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. # -CCVERSION := $(shell $(CC) -v 2>&1 | sed -ne '$$p') obj-y := kdb_io.o kdb_main.o kdb_support.o kdb_bt.o gen-kdb_cmds.o kdb_bp.o kdb_debugger.o obj-$(CONFIG_KDB_KEYBOARD) += kdb_keyboard.o diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 6a4b41484afe..3a5184eb6977 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -446,7 +446,7 @@ poll_again: char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt) { if (prompt && kdb_prompt_str != prompt) - strncpy(kdb_prompt_str, prompt, CMD_BUFLEN); + strscpy(kdb_prompt_str, prompt, CMD_BUFLEN); kdb_printf(kdb_prompt_str); kdb_nextline = 1; /* Prompt and input resets line number */ return kdb_read(buffer, bufsize); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 82a3b32a7cfc..9ecfa37c7fbf 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2522,7 +2522,6 @@ static int kdb_summary(int argc, const char **argv) kdb_printf("machine %s\n", init_uts_ns.name.machine); kdb_printf("nodename %s\n", init_uts_ns.name.nodename); kdb_printf("domainname %s\n", init_uts_ns.name.domainname); - kdb_printf("ccversion %s\n", __stringify(CCVERSION)); now = __ktime_get_real_seconds(); time64_to_tm(now, 0, &tm); @@ -2584,7 +2583,7 @@ static int kdb_per_cpu(int argc, const char **argv) diag = kdbgetularg(argv[3], &whichcpu); if (diag) return diag; - if (!cpu_online(whichcpu)) { + if (whichcpu >= nr_cpu_ids || !cpu_online(whichcpu)) { kdb_printf("cpu %ld is not online\n", whichcpu); return KDB_BADCPUNUM; } diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 50bf9b119bad..b8e6306e7e13 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -192,7 +192,7 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len) while ((name = kdb_walk_kallsyms(&pos))) { if (strncmp(name, prefix_name, prefix_len) == 0) { - strcpy(ks_namebuf, name); + strscpy(ks_namebuf, name, sizeof(ks_namebuf)); /* Work out the longest name that matches the prefix */ if (++number == 1) { prev_len = min_t(int, max_len-1, diff --git a/kernel/module-internal.h b/kernel/module-internal.h index 79c9be2dbbe9..d354341f8cc0 100644 --- a/kernel/module-internal.h +++ b/kernel/module-internal.h @@ -20,7 +20,7 @@ struct load_info { unsigned long len; Elf_Shdr *sechdrs; char *secstrings, *strtab; - unsigned long symoffs, stroffs; + unsigned long symoffs, stroffs, init_typeoffs, core_typeoffs; struct _ddebug *debug; unsigned int num_debug; bool sig_ok; diff --git a/kernel/module.c b/kernel/module.c index a9e1e7f2c224..6e6712b3aaf5 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2642,6 +2642,8 @@ static void layout_symtab(struct module *mod, struct load_info *info) info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); mod->core_layout.size += strtab_size; + info->core_typeoffs = mod->core_layout.size; + mod->core_layout.size += ndst * sizeof(char); mod->core_layout.size = debug_align(mod->core_layout.size); /* Put string table section at end of init part of module. */ @@ -2655,6 +2657,8 @@ static void layout_symtab(struct module *mod, struct load_info *info) __alignof__(struct mod_kallsyms)); info->mod_kallsyms_init_off = mod->init_layout.size; mod->init_layout.size += sizeof(struct mod_kallsyms); + info->init_typeoffs = mod->init_layout.size; + mod->init_layout.size += nsrc * sizeof(char); mod->init_layout.size = debug_align(mod->init_layout.size); } @@ -2678,20 +2682,23 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); /* Make sure we get permanent strtab: don't use info->strtab. */ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs; - /* Set types up while we still have access to sections. */ - for (i = 0; i < mod->kallsyms->num_symtab; i++) - mod->kallsyms->symtab[i].st_size - = elf_type(&mod->kallsyms->symtab[i], info); - - /* Now populate the cut down core kallsyms for after init. */ + /* + * Now populate the cut down core kallsyms for after init + * and set types up while we still have access to sections. + */ mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; + mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs; src = mod->kallsyms->symtab; for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + mod->kallsyms->typetab[i] = elf_type(src + i, info); if (i == 0 || is_livepatch_module(mod) || is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, info->index.pcpu)) { + mod->core_kallsyms.typetab[ndst] = + mod->kallsyms->typetab[i]; dst[ndst] = src[i]; dst[ndst++].st_name = s - mod->core_kallsyms.strtab; s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], @@ -4091,7 +4098,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, const Elf_Sym *sym = &kallsyms->symtab[symnum]; *value = kallsyms_symbol_value(sym); - *type = sym->st_size; + *type = kallsyms->typetab[symnum]; strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); strlcpy(module_name, mod->name, MODULE_NAME_LEN); *exported = is_exported(name, *value, mod); diff --git a/samples/vfs/.gitignore b/samples/vfs/.gitignore new file mode 100644 index 000000000000..0806eb0be62d --- /dev/null +++ b/samples/vfs/.gitignore @@ -0,0 +1,2 @@ +test-fsmount +test-statx diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c index 2d566fbd236b..c9b26335f891 100644 --- a/tools/virtio/ringtest/ptr_ring.c +++ b/tools/virtio/ringtest/ptr_ring.c @@ -18,7 +18,6 @@ #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) #define SIZE_MAX (~(size_t)0) #define KMALLOC_MAX_SIZE SIZE_MAX -#define BUG_ON(x) assert(x) typedef pthread_spinlock_t spinlock_t; |