From 9a558ee3ccb8afcf43c8d9d4d206ab6de4aa30a2 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Mon, 3 Feb 2014 10:42:30 +0100 Subject: KVM: s390: Handle MVPG partial execution interception When the guest executes the MVPG instruction with DAT disabled, and the source or destination page is not mapped in the host, the so-called partial execution interception occurs. We need to handle this event by setting up a mapping for the corresponding user pages. Signed-off-by: Thomas Huth Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/intercept.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 30e1c5eb726a..54313fe6e8d4 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -1,7 +1,7 @@ /* * in-kernel handling for sie intercepts * - * Copyright IBM Corp. 2008, 2009 + * Copyright IBM Corp. 2008, 2014 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) @@ -234,6 +234,58 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) return rc2; } +/** + * Handle MOVE PAGE partial execution interception. + * + * This interception can only happen for guests with DAT disabled and + * addresses that are currently not mapped in the host. Thus we try to + * set up the mappings for the corresponding user pages here (or throw + * addressing exceptions in case of illegal guest addresses). + */ +static int handle_mvpg_pei(struct kvm_vcpu *vcpu) +{ + unsigned long hostaddr, srcaddr, dstaddr; + psw_t *psw = &vcpu->arch.sie_block->gpsw; + struct mm_struct *mm = current->mm; + int reg1, reg2, rc; + + kvm_s390_get_regs_rre(vcpu, ®1, ®2); + srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]); + dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]); + + /* Make sure that the source is paged-in */ + hostaddr = gmap_fault(srcaddr, vcpu->arch.gmap); + if (IS_ERR_VALUE(hostaddr)) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + down_read(&mm->mmap_sem); + rc = get_user_pages(current, mm, hostaddr, 1, 0, 0, NULL, NULL); + up_read(&mm->mmap_sem); + if (rc < 0) + return rc; + + /* Make sure that the destination is paged-in */ + hostaddr = gmap_fault(dstaddr, vcpu->arch.gmap); + if (IS_ERR_VALUE(hostaddr)) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + down_read(&mm->mmap_sem); + rc = get_user_pages(current, mm, hostaddr, 1, 1, 0, NULL, NULL); + up_read(&mm->mmap_sem); + if (rc < 0) + return rc; + + psw->addr = __rewind_psw(*psw, 4); + + return 0; +} + +static int handle_partial_execution(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ + return handle_mvpg_pei(vcpu); + + return -EOPNOTSUPP; +} + static const intercept_handler_t intercept_funcs[] = { [0x00 >> 2] = handle_noop, [0x04 >> 2] = handle_instruction, @@ -245,6 +297,7 @@ static const intercept_handler_t intercept_funcs[] = { [0x1C >> 2] = kvm_s390_handle_wait, [0x20 >> 2] = handle_validity, [0x28 >> 2] = handle_stop, + [0x38 >> 2] = handle_partial_execution, }; int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From f8232c8cf720074c0eb0804202d424a2b7b4ee76 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Mon, 3 Mar 2014 23:34:42 +0100 Subject: KVM: s390: Add a function for checking the low-address protection The s390 architecture has a special protection mechanism that can be used to prevent write access to the vital data in the low-core memory area. This patch adds a new helper function that can be used to check for such write accesses and in case of protection, it also sets up the exception data accordingly. Signed-off-by: Thomas Huth Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 28 ++++++++++++++++++++++++++++ arch/s390/kvm/gaccess.h | 1 + 2 files changed, 29 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 691fdb776c90..db608c3f9303 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -643,3 +643,31 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, } return rc; } + +/** + * kvm_s390_check_low_addr_protection - check for low-address protection + * @ga: Guest address + * + * Checks whether an address is subject to low-address protection and set + * up vcpu->arch.pgm accordingly if necessary. + * + * Return: 0 if no protection exception, or PGM_PROTECTION if protected. + */ +int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga) +{ + struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; + psw_t *psw = &vcpu->arch.sie_block->gpsw; + struct trans_exc_code_bits *tec_bits; + + if (!is_low_address(ga) || !low_address_protection_enabled(vcpu)) + return 0; + + memset(pgm, 0, sizeof(*pgm)); + tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; + tec_bits->fsi = FSI_STORE; + tec_bits->as = psw_bits(*psw).as; + tec_bits->addr = ga >> PAGE_SHIFT; + pgm->code = PGM_PROTECTION; + + return pgm->code; +} diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 1079c8fc6d0d..68db43e4254f 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -325,5 +325,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, } int ipte_lock_held(struct kvm_vcpu *vcpu); +int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); #endif /* __KVM_S390_GACCESS_H */ -- cgit v1.2.3 From fb34c603655fafdd244227cb6b42d006469735fc Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Mon, 9 Sep 2013 17:58:38 +0200 Subject: KVM: s390: Fixes for PFMF Add a check for low-address protection to the PFMF handler and convert real-addresses to absolute if necessary, as it is defined in the Principles of Operations specification. Signed-off-by: Thomas Huth Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/priv.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 27f9051a78f8..a47157bca8a6 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -650,6 +650,11 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; + if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { + if (kvm_s390_check_low_addr_protection(vcpu, start)) + return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); + } + switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { case 0x00000000: end = (start + (1UL << 12)) & ~((1UL << 12) - 1); @@ -665,10 +670,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); } while (start < end) { - unsigned long useraddr; - - useraddr = gmap_translate(start, vcpu->arch.gmap); - if (IS_ERR((void *)useraddr)) + unsigned long useraddr, abs_addr; + + /* Translate guest address to host address */ + if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) + abs_addr = kvm_s390_real_to_abs(vcpu, start); + else + abs_addr = start; + useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); + if (kvm_is_error_hva(useraddr)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { -- cgit v1.2.3 From e45efa28e578758d3c5ef0d7d3f88aadb9a61515 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Fri, 7 Mar 2014 12:14:23 +0100 Subject: KVM: s390: Add low-address protection to TEST BLOCK TEST BLOCK is also subject to the low-address protection, so we need to check the destination address in our handler. Signed-off-by: Thomas Huth Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/priv.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index a47157bca8a6..07d0c1025cb9 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -206,6 +206,9 @@ static int handle_test_block(struct kvm_vcpu *vcpu) kvm_s390_get_regs_rre(vcpu, NULL, ®2); addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; + addr = kvm_s390_logical_to_effective(vcpu, addr); + if (kvm_s390_check_low_addr_protection(vcpu, addr)) + return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); addr = kvm_s390_real_to_abs(vcpu, addr); if (kvm_is_error_gpa(vcpu->kvm, addr)) -- cgit v1.2.3 From 6852d7b69b4949234c3a8ae1f279f6a4c6563662 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 14 Mar 2014 10:59:29 +0100 Subject: KVM: s390: introduce kvm_s390_vcpu_{start,stop} This patch introduces two new functions to set/clear the CPUSTAT_STOPPED bit and makes use of it at all applicable places. These functions prepare the additional execution of code when starting/stopping a vcpu. The CPUSTAT_STOPPED bit should not be touched outside of these functions. Signed-off-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/diag.c | 2 +- arch/s390/kvm/intercept.c | 3 +-- arch/s390/kvm/interrupt.c | 2 +- arch/s390/kvm/kvm-s390.c | 16 ++++++++++++++-- arch/s390/kvm/kvm-s390.h | 2 ++ arch/s390/kvm/trace-s390.h | 21 +++++++++++++++++++++ 6 files changed, 40 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 5521ace8b60d..004d385d9519 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -176,7 +176,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } - atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + kvm_s390_vcpu_stop(vcpu); vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 54313fe6e8d4..99e4b76e3487 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -65,8 +65,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { - atomic_set_mask(CPUSTAT_STOPPED, - &vcpu->arch.sie_block->cpuflags); + kvm_s390_vcpu_stop(vcpu); vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); rc = -EOPNOTSUPP; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 077e4738ebdc..d9526bb29194 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -413,7 +413,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + kvm_s390_vcpu_start(vcpu); break; case KVM_S390_PROGRAM_INT: VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b32c42cbc706..6c972d229ace 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -592,7 +592,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->pp = 0; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; kvm_clear_async_pf_completion_queue(vcpu); - atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + kvm_s390_vcpu_stop(vcpu); kvm_s390_clear_local_irqs(vcpu); } @@ -1235,7 +1235,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + kvm_s390_vcpu_start(vcpu); switch (kvm_run->exit_reason) { case KVM_EXIT_S390_SIEIC: @@ -1362,6 +1362,18 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return kvm_s390_store_status_unloaded(vcpu, addr); } +void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) +{ + trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); + atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); +} + +void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) +{ + trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); + atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); +} + static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, struct kvm_enable_cap *cap) { diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 9b5680d1f6cc..c28423a3acc0 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -157,6 +157,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); +void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void s390_vcpu_block(struct kvm_vcpu *vcpu); void s390_vcpu_unblock(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 13f30f58a2df..34d4f8af3a1d 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -67,6 +67,27 @@ TRACE_EVENT(kvm_s390_destroy_vcpu, TP_printk("destroy cpu %d", __entry->id) ); +/* + * Trace point for start and stop of vpcus. + */ +TRACE_EVENT(kvm_s390_vcpu_start_stop, + TP_PROTO(unsigned int id, int state), + TP_ARGS(id, state), + + TP_STRUCT__entry( + __field(unsigned int, id) + __field(int, state) + ), + + TP_fast_assign( + __entry->id = id; + __entry->state = state; + ), + + TP_printk("%s cpu %d", __entry->state ? "starting" : "stopping", + __entry->id) + ); + /* * Trace points for injection of interrupts, either per machine or * per vcpu. -- cgit v1.2.3 From 8ad357551797b1edc184fb9f6a4f80a6fa626459 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 14 Mar 2014 11:00:21 +0100 Subject: KVM: s390: enable IBS for single running VCPUs This patch enables the IBS facility when a single VCPU is running. The facility is dynamically turned on/off as soon as other VCPUs enter/leave the stopped state. When this facility is operating, some instructions can be executed faster for single-cpu guests. Signed-off-by: David Hildenbrand Reviewed-by: Dominik Dingel Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 + arch/s390/kvm/kvm-s390.c | 123 ++++++++++++++++++++++++++++++++++++++- arch/s390/kvm/trace-s390.h | 22 +++++++ 3 files changed, 145 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 0d45f6fe734f..f0a1dc5e5d1f 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -72,6 +72,7 @@ struct sca_block { #define CPUSTAT_ZARCH 0x00000800 #define CPUSTAT_MCDS 0x00000100 #define CPUSTAT_SM 0x00000080 +#define CPUSTAT_IBS 0x00000040 #define CPUSTAT_G 0x00000008 #define CPUSTAT_GED 0x00000004 #define CPUSTAT_J 0x00000002 @@ -411,6 +412,7 @@ struct kvm_arch{ int use_cmma; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; wait_queue_head_t ipte_wq; + spinlock_t start_stop_lock; }; #define KVM_HVA_ERR_BAD (-1UL) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 6c972d229ace..0a01744cbdd9 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -458,6 +458,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.css_support = 0; kvm->arch.use_irqchip = 0; + spin_lock_init(&kvm->arch.start_stop_lock); + return 0; out_nogmap: debug_unregister(kvm->arch.dbf); @@ -996,8 +998,15 @@ bool kvm_s390_cmma_enabled(struct kvm *kvm) return true; } +static bool ibs_enabled(struct kvm_vcpu *vcpu) +{ + return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; +} + static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) { +retry: + s390_vcpu_unblock(vcpu); /* * We use MMU_RELOAD just to re-arm the ipte notifier for the * guest prefix page. gmap_ipte_notify will wait on the ptl lock. @@ -1005,15 +1014,34 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) * already finished. We might race against a second unmapper that * wants to set the blocking bit. Lets just retry the request loop. */ - while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { + if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { int rc; rc = gmap_ipte_notify(vcpu->arch.gmap, vcpu->arch.sie_block->prefix, PAGE_SIZE * 2); if (rc) return rc; - s390_vcpu_unblock(vcpu); + goto retry; + } + + if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { + if (!ibs_enabled(vcpu)) { + trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); + atomic_set_mask(CPUSTAT_IBS, + &vcpu->arch.sie_block->cpuflags); + } + goto retry; } + + if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { + if (ibs_enabled(vcpu)) { + trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); + atomic_clear_mask(CPUSTAT_IBS, + &vcpu->arch.sie_block->cpuflags); + } + goto retry; + } + return 0; } @@ -1362,16 +1390,107 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return kvm_s390_store_status_unloaded(vcpu, addr); } +static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) +{ + return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; +} + +static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) +{ + kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); + kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); + exit_sie_sync(vcpu); +} + +static void __disable_ibs_on_all_vcpus(struct kvm *kvm) +{ + unsigned int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + __disable_ibs_on_vcpu(vcpu); + } +} + +static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) +{ + kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); + kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); + exit_sie_sync(vcpu); +} + void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) { + int i, online_vcpus, started_vcpus = 0; + + if (!is_vcpu_stopped(vcpu)) + return; + trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); + /* Only one cpu at a time may enter/leave the STOPPED state. */ + spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); + online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); + + for (i = 0; i < online_vcpus; i++) { + if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) + started_vcpus++; + } + + if (started_vcpus == 0) { + /* we're the only active VCPU -> speed it up */ + __enable_ibs_on_vcpu(vcpu); + } else if (started_vcpus == 1) { + /* + * As we are starting a second VCPU, we have to disable + * the IBS facility on all VCPUs to remove potentially + * oustanding ENABLE requests. + */ + __disable_ibs_on_all_vcpus(vcpu->kvm); + } + atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + /* + * Another VCPU might have used IBS while we were offline. + * Let's play safe and flush the VCPU at startup. + */ + vcpu->arch.sie_block->ihcpu = 0xffff; + spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); + return; } void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) { + int i, online_vcpus, started_vcpus = 0; + struct kvm_vcpu *started_vcpu = NULL; + + if (is_vcpu_stopped(vcpu)) + return; + trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); + /* Only one cpu at a time may enter/leave the STOPPED state. */ + spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); + online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); + atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + __disable_ibs_on_vcpu(vcpu); + + for (i = 0; i < online_vcpus; i++) { + if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { + started_vcpus++; + started_vcpu = vcpu->kvm->vcpus[i]; + } + } + + if (started_vcpus == 1) { + /* + * As we only have one VCPU left, we want to enable the + * IBS facility for that VCPU to speed it up. + */ + __enable_ibs_on_vcpu(started_vcpu); + } + + spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); + return; } static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 34d4f8af3a1d..647e9d6a4818 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -244,6 +244,28 @@ TRACE_EVENT(kvm_s390_enable_css, __entry->kvm) ); +/* + * Trace point for enabling and disabling interlocking-and-broadcasting + * suppression. + */ +TRACE_EVENT(kvm_s390_enable_disable_ibs, + TP_PROTO(unsigned int id, int state), + TP_ARGS(id, state), + + TP_STRUCT__entry( + __field(unsigned int, id) + __field(int, state) + ), + + TP_fast_assign( + __entry->id = id; + __entry->state = state; + ), + + TP_printk("%s ibs on cpu %d", + __entry->state ? "enabling" : "disabling", __entry->id) + ); + #endif /* _TRACE_KVMS390_H */ -- cgit v1.2.3