diff options
| author | Sean Christopherson <sean.j.christopherson@intel.com> | 2020-06-08 18:56:07 -0700 | 
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-07-08 16:21:47 -0400 | 
| commit | b2656e4d8b29a25ea26ae92d14694904274e63b0 (patch) | |
| tree | 35f464bc8613cfa34db36afad76e6ea93a283f4a /arch/x86/kvm/vmx | |
| parent | c967118ddb21191178c0e0080fdc41f5d85ca1d1 (diff) | |
KVM: nVMX: Wrap VM-Fail valid path in generic VM-Fail helper
Add nested_vmx_fail() to wrap VM-Fail paths that _may_ result in VM-Fail
Valid to make it clear at the call sites that the Valid flavor isn't
guaranteed.
Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200609015607.6994-1-sean.j.christopherson@intel.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx')
| -rw-r--r-- | arch/x86/kvm/vmx/nested.c | 77 | 
1 files changed, 36 insertions, 41 deletions
| diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index aeaac9febca4..7693d41a2446 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -171,15 +171,6 @@ static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)  static int nested_vmx_failValid(struct kvm_vcpu *vcpu,  				u32 vm_instruction_error)  { -	struct vcpu_vmx *vmx = to_vmx(vcpu); - -	/* -	 * failValid writes the error number to the current VMCS, which -	 * can't be done if there isn't a current VMCS. -	 */ -	if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) -		return nested_vmx_failInvalid(vcpu); -  	vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)  			& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |  			    X86_EFLAGS_SF | X86_EFLAGS_OF)) @@ -192,6 +183,20 @@ static int nested_vmx_failValid(struct kvm_vcpu *vcpu,  	return kvm_skip_emulated_instruction(vcpu);  } +static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) +{ +	struct vcpu_vmx *vmx = to_vmx(vcpu); + +	/* +	 * failValid writes the error number to the current VMCS, which +	 * can't be done if there isn't a current VMCS. +	 */ +	if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) +		return nested_vmx_failInvalid(vcpu); + +	return nested_vmx_failValid(vcpu, vm_instruction_error); +} +  static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)  {  	/* TODO: not to reset guest simply here. */ @@ -3493,19 +3498,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)  	 * when using the merged vmcs02.  	 */  	if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) -		return nested_vmx_failValid(vcpu, -			VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); +		return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);  	if (vmcs12->launch_state == launch) -		return nested_vmx_failValid(vcpu, +		return nested_vmx_fail(vcpu,  			launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS  			       : VMXERR_VMRESUME_NONLAUNCHED_VMCS);  	if (nested_vmx_check_controls(vcpu, vmcs12)) -		return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); +		return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);  	if (nested_vmx_check_host_state(vcpu, vmcs12)) -		return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); +		return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);  	/*  	 * We're finally done with prerequisite checking, and can start with @@ -3554,7 +3558,7 @@ vmentry_failed:  	if (status == NVMX_VMENTRY_VMEXIT)  		return 1;  	WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); -	return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); +	return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);  }  /* @@ -4497,7 +4501,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,  	 * flag and the VM-instruction error field of the VMCS  	 * accordingly, and skip the emulated instruction.  	 */ -	(void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); +	(void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);  	/*  	 * Restore L1's host state to KVM's software model.  We're here @@ -4797,8 +4801,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)  	}  	if (vmx->nested.vmxon) -		return nested_vmx_failValid(vcpu, -			VMXERR_VMXON_IN_VMX_ROOT_OPERATION); +		return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);  	if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)  			!= VMXON_NEEDED_FEATURES) { @@ -4889,12 +4892,10 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)  		return r;  	if (!page_address_valid(vcpu, vmptr)) -		return nested_vmx_failValid(vcpu, -			VMXERR_VMCLEAR_INVALID_ADDRESS); +		return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);  	if (vmptr == vmx->nested.vmxon_ptr) -		return nested_vmx_failValid(vcpu, -			VMXERR_VMCLEAR_VMXON_POINTER); +		return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);  	/*  	 * When Enlightened VMEntry is enabled on the calling CPU we treat @@ -4964,8 +4965,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)  	offset = vmcs_field_to_offset(field);  	if (offset < 0) -		return nested_vmx_failValid(vcpu, -			VMXERR_UNSUPPORTED_VMCS_COMPONENT); +		return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);  	if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))  		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); @@ -5068,8 +5068,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)  	offset = vmcs_field_to_offset(field);  	if (offset < 0) -		return nested_vmx_failValid(vcpu, -			VMXERR_UNSUPPORTED_VMCS_COMPONENT); +		return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);  	/*  	 * If the vCPU supports "VMWRITE to any supported field in the @@ -5077,8 +5076,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)  	 */  	if (vmcs_field_readonly(field) &&  	    !nested_cpu_has_vmwrite_any_field(vcpu)) -		return nested_vmx_failValid(vcpu, -			VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); +		return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);  	/*  	 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties @@ -5153,12 +5151,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)  		return r;  	if (!page_address_valid(vcpu, vmptr)) -		return nested_vmx_failValid(vcpu, -			VMXERR_VMPTRLD_INVALID_ADDRESS); +		return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);  	if (vmptr == vmx->nested.vmxon_ptr) -		return nested_vmx_failValid(vcpu, -			VMXERR_VMPTRLD_VMXON_POINTER); +		return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);  	/* Forbid normal VMPTRLD if Enlightened version was used */  	if (vmx->nested.hv_evmcs) @@ -5175,7 +5171,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)  			 * given physical address won't match the required  			 * VMCS12_REVISION identifier.  			 */ -			return nested_vmx_failValid(vcpu, +			return nested_vmx_fail(vcpu,  				VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);  		} @@ -5185,7 +5181,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)  		    (new_vmcs12->hdr.shadow_vmcs &&  		     !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {  			kvm_vcpu_unmap(vcpu, &map, false); -			return nested_vmx_failValid(vcpu, +			return nested_vmx_fail(vcpu,  				VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);  		} @@ -5270,8 +5266,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)  	types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;  	if (type >= 32 || !(types & (1 << type))) -		return nested_vmx_failValid(vcpu, -				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); +		return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);  	/* According to the Intel VMX instruction reference, the memory  	 * operand is read even if it isn't needed (e.g., for type==global) @@ -5292,7 +5287,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)  	switch (type) {  	case VMX_EPT_EXTENT_CONTEXT:  		if (!nested_vmx_check_eptp(vcpu, operand.eptp)) -			return nested_vmx_failValid(vcpu, +			return nested_vmx_fail(vcpu,  				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);  		roots_to_free = 0; @@ -5352,7 +5347,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)  			VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;  	if (type >= 32 || !(types & (1 << type))) -		return nested_vmx_failValid(vcpu, +		return nested_vmx_fail(vcpu,  			VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);  	/* according to the intel vmx instruction reference, the memory @@ -5366,7 +5361,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)  		return vmx_handle_memory_failure(vcpu, r, &e);  	if (operand.vpid >> 16) -		return nested_vmx_failValid(vcpu, +		return nested_vmx_fail(vcpu,  			VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);  	vpid02 = nested_get_vpid02(vcpu); @@ -5374,14 +5369,14 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)  	case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:  		if (!operand.vpid ||  		    is_noncanonical_address(operand.gla, vcpu)) -			return nested_vmx_failValid(vcpu, +			return nested_vmx_fail(vcpu,  				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);  		vpid_sync_vcpu_addr(vpid02, operand.gla);  		break;  	case VMX_VPID_EXTENT_SINGLE_CONTEXT:  	case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:  		if (!operand.vpid) -			return nested_vmx_failValid(vcpu, +			return nested_vmx_fail(vcpu,  				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);  		vpid_sync_context(vpid02);  		break; | 
