blob: 5177c4da303d18d95c399a34de26acb5246fdc66 [file] [log] [blame]
/* SPDX-License-Identifier: GPL-2.0-only */
#include <assert.h>
#include <console/console.h>
#include <cpu/cpu.h>
#include <cpu/intel/common/common.h>
#include <cpu/intel/smm_reloc.h>
#include <cpu/intel/turbo.h>
#include <cpu/x86/lapic.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/msr.h>
#include <device/pci.h>
#include <fsp/api.h>
#include <intelblocks/acpi.h>
#include <intelblocks/cpulib.h>
#include <intelblocks/mp_init.h>
#include <intelblocks/msr.h>
#include <soc/cpu.h>
#include <soc/msr.h>
#include <soc/pci_devs.h>
#include <soc/soc_chip.h>
#include <soc/soc_info.h>
bool cpu_soc_is_in_untrusted_mode(void)
{
msr_t msr;
msr = rdmsr(MSR_BIOS_DONE);
return !!(msr.lo & ENABLE_IA_UNTRUSTED);
}
void cpu_soc_bios_done(void)
{
msr_t msr;
msr = rdmsr(MSR_BIOS_DONE);
msr.lo |= ENABLE_IA_UNTRUSTED;
wrmsr(MSR_BIOS_DONE, msr);
}
uint8_t get_supported_lpm_mask(void)
{
return LPM_S0i2_0 | LPM_S0i2_1 | LPM_S0i2_2;
}
static void soc_fsp_load(void)
{
fsps_load();
}
static void configure_misc(void)
{
msr_t msr;
config_t *conf = (config_t *)config_of_soc();
msr = rdmsr(IA32_MISC_ENABLE);
msr.lo |= (1 << 0); /* Fast String enable */
msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
wrmsr(IA32_MISC_ENABLE, msr);
/* Set EIST status */
cpu_set_eist(conf->eist_enable);
/* Disable Thermal interrupts */
msr.lo = 0;
msr.hi = 0;
wrmsr(IA32_THERM_INTERRUPT, msr);
/* Enable package critical interrupt only */
msr.lo = 1 << 4;
msr.hi = 0;
wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
/* Enable PROCHOT and Power Performance Platform Override */
msr = rdmsr(MSR_POWER_CTL);
msr.lo |= (1 << 0); /* Enable Bi-directional PROCHOT as an input*/
msr.lo |= (1 << 23); /* Lock it */
msr.lo |= (1 << 18); /* Power Performance Platform Override */
wrmsr(MSR_POWER_CTL, msr);
}
enum core_type get_soc_cpu_type(void)
{
if (cpu_is_hybrid_supported())
return cpu_get_cpu_type();
else
return CPUID_CORE_TYPE_INTEL_CORE;
}
bool soc_is_nominal_freq_supported(void)
{
return true;
}
static void enable_x2apic(void)
{
if (!CONFIG(X2APIC_LATE_WORKAROUND))
return;
enable_lapic_mode(true);
}
/* All CPUs including BSP will run the following function. */
void soc_core_init(struct device *cpu)
{
/* Clear out pending MCEs */
/* TODO(adurbin): This should only be done on a cold boot. Also, some
* of these banks are core vs package scope. For now every CPU clears
* every bank. */
mca_configure();
enable_x2apic();
enable_lapic_tpr();
/* Configure Enhanced SpeedStep and Thermal Sensors */
configure_misc();
enable_pm_timer_emulation();
/* Enable Direct Cache Access */
configure_dca_cap();
/* Set energy policy */
set_energy_perf_bias(ENERGY_POLICY_NORMAL);
const config_t *conf = config_of_soc();
/* Set energy-performance preference */
if (conf->enable_energy_perf_pref)
if (check_energy_perf_cap())
set_energy_perf_pref(conf->energy_perf_pref_value);
/* Enable Turbo */
enable_turbo();
/* Set core type in struct cpu_info */
set_dev_core_type();
if (CONFIG(INTEL_TME) && is_tme_supported())
set_tme_core_activate();
if (CONFIG(DROP_CPU_FEATURE_PROGRAM_IN_FSP)) {
/* Disable 3-strike error */
disable_three_strike_error();
set_aesni_lock();
/* Enable VMX */
set_feature_ctrl_vmx_arg(CONFIG(ENABLE_VMX) && !conf->disable_vmx);
/* Feature control lock configure */
set_feature_ctrl_lock();
}
}
static void per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
smm_relocate();
}
static void pre_mp_init(void)
{
soc_fsp_load();
const config_t *conf = config_of_soc();
if (conf->enable_energy_perf_pref) {
if (check_energy_perf_cap())
enable_energy_perf_pref();
else
printk(BIOS_WARNING, "Energy Performance Preference not supported!\n");
}
}
static void post_mp_init(void)
{
/* Set Max Ratio */
cpu_set_max_ratio();
/*
* 1. Now that all APs have been relocated as well as the BSP let SMIs
* start flowing.
* 2. Skip enabling power button SMI and enable it after BS_CHIPS_INIT
* to avoid shutdown hang due to lack of init on certain IP in FSP-S.
*/
global_smi_enable_no_pwrbtn();
}
static const struct mp_ops mp_ops = {
/*
* Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
* that are set prior to ramstage.
* Real MTRRs programming are being done after resource allocation.
*/
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
.get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
.pre_mp_smm_init = smm_initialize,
.per_cpu_smm_trigger = per_cpu_smm_trigger,
.relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};
void mp_init_cpus(struct bus *cpu_bus)
{
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
/* Thermal throttle activation offset */
configure_tcc_thermal_target();
}