217 lines
4.6 KiB
C
217 lines
4.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* AMD Encrypted Register State Support
|
|
*
|
|
* Author: Joerg Roedel <jroedel@suse.de>
|
|
*/
|
|
|
|
/*
|
|
* misc.h needs to be first because it knows how to include the other kernel
|
|
* headers in the pre-decompression code in a way that does not break
|
|
* compilation.
|
|
*/
|
|
#include "misc.h"
|
|
|
|
#include <asm/pgtable_types.h>
|
|
#include <asm/sev.h>
|
|
#include <asm/trapnr.h>
|
|
#include <asm/trap_pf.h>
|
|
#include <asm/msr-index.h>
|
|
#include <asm/fpu/xcr.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/svm.h>
|
|
|
|
#include "error.h"
|
|
|
|
struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);
|
|
struct ghcb *boot_ghcb;
|
|
|
|
/*
|
|
* Copy a version of this function here - insn-eval.c can't be used in
|
|
* pre-decompression code.
|
|
*/
|
|
static bool insn_has_rep_prefix(struct insn *insn)
|
|
{
|
|
insn_byte_t p;
|
|
int i;
|
|
|
|
insn_get_prefixes(insn);
|
|
|
|
for_each_insn_prefix(insn, i, p) {
|
|
if (p == 0xf2 || p == 0xf3)
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and
|
|
* doesn't use segments.
|
|
*/
|
|
static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
|
|
{
|
|
return 0UL;
|
|
}
|
|
|
|
static inline u64 sev_es_rd_ghcb_msr(void)
|
|
{
|
|
unsigned long low, high;
|
|
|
|
asm volatile("rdmsr" : "=a" (low), "=d" (high) :
|
|
"c" (MSR_AMD64_SEV_ES_GHCB));
|
|
|
|
return ((high << 32) | low);
|
|
}
|
|
|
|
static inline void sev_es_wr_ghcb_msr(u64 val)
|
|
{
|
|
u32 low, high;
|
|
|
|
low = val & 0xffffffffUL;
|
|
high = val >> 32;
|
|
|
|
asm volatile("wrmsr" : : "c" (MSR_AMD64_SEV_ES_GHCB),
|
|
"a"(low), "d" (high) : "memory");
|
|
}
|
|
|
|
static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
|
|
{
|
|
char buffer[MAX_INSN_SIZE];
|
|
int ret;
|
|
|
|
memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
|
|
|
|
ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
|
|
if (ret < 0)
|
|
return ES_DECODE_FAILED;
|
|
|
|
return ES_OK;
|
|
}
|
|
|
|
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
|
void *dst, char *buf, size_t size)
|
|
{
|
|
memcpy(dst, buf, size);
|
|
|
|
return ES_OK;
|
|
}
|
|
|
|
static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
|
void *src, char *buf, size_t size)
|
|
{
|
|
memcpy(buf, src, size);
|
|
|
|
return ES_OK;
|
|
}
|
|
|
|
static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
|
|
{
|
|
return ES_OK;
|
|
}
|
|
|
|
static bool fault_in_kernel_space(unsigned long address)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#undef __init
|
|
#undef __pa
|
|
#define __init
|
|
#define __pa(x) ((unsigned long)(x))
|
|
|
|
#define __BOOT_COMPRESSED
|
|
|
|
/* Basic instruction decoding support needed */
|
|
#include "../../lib/inat.c"
|
|
#include "../../lib/insn.c"
|
|
|
|
/* Include code for early handlers */
|
|
#include "../../kernel/sev-shared.c"
|
|
|
|
static bool early_setup_sev_es(void)
|
|
{
|
|
if (!sev_es_negotiate_protocol())
|
|
sev_es_terminate(GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED);
|
|
|
|
if (set_page_decrypted((unsigned long)&boot_ghcb_page))
|
|
return false;
|
|
|
|
/* Page is now mapped decrypted, clear it */
|
|
memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page));
|
|
|
|
boot_ghcb = &boot_ghcb_page;
|
|
|
|
/* Initialize lookup tables for the instruction decoder */
|
|
inat_init_tables();
|
|
|
|
return true;
|
|
}
|
|
|
|
void sev_es_shutdown_ghcb(void)
|
|
{
|
|
if (!boot_ghcb)
|
|
return;
|
|
|
|
if (!sev_es_check_cpu_features())
|
|
error("SEV-ES CPU Features missing.");
|
|
|
|
/*
|
|
* GHCB Page must be flushed from the cache and mapped encrypted again.
|
|
* Otherwise the running kernel will see strange cache effects when
|
|
* trying to use that page.
|
|
*/
|
|
if (set_page_encrypted((unsigned long)&boot_ghcb_page))
|
|
error("Can't map GHCB page encrypted");
|
|
|
|
/*
|
|
* GHCB page is mapped encrypted again and flushed from the cache.
|
|
* Mark it non-present now to catch bugs when #VC exceptions trigger
|
|
* after this point.
|
|
*/
|
|
if (set_page_non_present((unsigned long)&boot_ghcb_page))
|
|
error("Can't unmap GHCB page");
|
|
}
|
|
|
|
bool sev_es_check_ghcb_fault(unsigned long address)
|
|
{
|
|
/* Check whether the fault was on the GHCB page */
|
|
return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);
|
|
}
|
|
|
|
void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
|
|
{
|
|
struct es_em_ctxt ctxt;
|
|
enum es_result result;
|
|
|
|
if (!boot_ghcb && !early_setup_sev_es())
|
|
sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
|
|
|
|
vc_ghcb_invalidate(boot_ghcb);
|
|
result = vc_init_em_ctxt(&ctxt, regs, exit_code);
|
|
if (result != ES_OK)
|
|
goto finish;
|
|
|
|
switch (exit_code) {
|
|
case SVM_EXIT_RDTSC:
|
|
case SVM_EXIT_RDTSCP:
|
|
result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code);
|
|
break;
|
|
case SVM_EXIT_IOIO:
|
|
result = vc_handle_ioio(boot_ghcb, &ctxt);
|
|
break;
|
|
case SVM_EXIT_CPUID:
|
|
result = vc_handle_cpuid(boot_ghcb, &ctxt);
|
|
break;
|
|
default:
|
|
result = ES_UNSUPPORTED;
|
|
break;
|
|
}
|
|
|
|
finish:
|
|
if (result == ES_OK)
|
|
vc_finish_insn(&ctxt);
|
|
else if (result != ES_RETRY)
|
|
sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
|
|
}
|