ASoC: Intel: Remove skylake driver

The avs-driver found in sound/soc/intel/avs is a direct replacement to
the existing skylake-driver. It covers all features supported by it and
more and aligns with the recommended flows and requirements based on
Windows driver equivalent.

For the official kernel tree the deprecation begun with v6.0. Most
skylake-drivers users moved to avs- or SOF-driver when AudioDSP
capabilities are available on the platform or to snd-hda-intel
(sound/pci/hda) when such capabilities are not.

For the supported trees the deprecation begun with v5.4 with v5.15 being
the first where the skylake-driver is disabled entirely.

All machine board drivers that consume the DSP driver have their
replacements present within sound/soc/intel/avs/boards/ directory.

Acked-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Cezary Rojewski <cezary.rojewski@intel.com>
Link: https://patch.msgid.link/20240814083929.1217319-14-cezary.rojewski@intel.com
Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
Cezary Rojewski 2024-08-14 10:39:28 +02:00 committed by Mark Brown
parent 6de8dddc56
commit a882f4d750
No known key found for this signature in database
GPG key ID: 24D68B725D5487D0
27 changed files with 2 additions and 14821 deletions

View file

@ -67,126 +67,6 @@ config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
Baytrail/Cherrytrail. If you want to enable SOF on
Baytrail/Cherrytrail, you need to deselect this option first.
config SND_SOC_INTEL_SKYLAKE
tristate "All Skylake/SST Platforms"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_INTEL_SKL
select SND_SOC_INTEL_APL
select SND_SOC_INTEL_KBL
select SND_SOC_INTEL_GLK
select SND_SOC_INTEL_CNL
select SND_SOC_INTEL_CFL
help
This is a backwards-compatible option to select all devices
supported by the Intel SST/Skylake driver. This option is no
longer recommended and will be deprecated when the SOF
driver is introduced. Distributions should explicitly
select which platform uses this driver.
config SND_SOC_INTEL_SKL
tristate "Skylake Platforms"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_INTEL_SKYLAKE_FAMILY
help
If you have a Intel Skylake platform with the DSP enabled
in the BIOS then enable this option by saying Y or m.
config SND_SOC_INTEL_APL
tristate "Broxton/ApolloLake Platforms"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_INTEL_SKYLAKE_FAMILY
help
If you have a Intel Broxton/ApolloLake platform with the DSP
enabled in the BIOS then enable this option by saying Y or m.
config SND_SOC_INTEL_KBL
tristate "Kabylake Platforms"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_INTEL_SKYLAKE_FAMILY
help
If you have a Intel Kabylake platform with the DSP
enabled in the BIOS then enable this option by saying Y or m.
config SND_SOC_INTEL_GLK
tristate "GeminiLake Platforms"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_INTEL_SKYLAKE_FAMILY
help
If you have a Intel GeminiLake platform with the DSP
enabled in the BIOS then enable this option by saying Y or m.
config SND_SOC_INTEL_CNL
tristate "CannonLake/WhiskyLake Platforms"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_INTEL_SKYLAKE_FAMILY
help
If you have a Intel CNL/WHL platform with the DSP
enabled in the BIOS then enable this option by saying Y or m.
config SND_SOC_INTEL_CFL
tristate "CoffeeLake Platforms"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_INTEL_SKYLAKE_FAMILY
help
If you have a Intel CoffeeLake platform with the DSP
enabled in the BIOS then enable this option by saying Y or m.
config SND_SOC_INTEL_CML_H
tristate "CometLake-H Platforms"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_INTEL_SKYLAKE_FAMILY
help
If you have a Intel CometLake-H platform with the DSP
enabled in the BIOS then enable this option by saying Y or m.
config SND_SOC_INTEL_CML_LP
tristate "CometLake-LP Platforms"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_INTEL_SKYLAKE_FAMILY
help
If you have a Intel CometLake-LP platform with the DSP
enabled in the BIOS then enable this option by saying Y or m.
config SND_SOC_INTEL_SKYLAKE_FAMILY
tristate
select SND_SOC_INTEL_SKYLAKE_COMMON
if SND_SOC_INTEL_SKYLAKE_FAMILY
config SND_SOC_INTEL_SKYLAKE_SSP_CLK
tristate
config SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
bool "HDAudio codec support"
help
If you have Intel Skylake or Kabylake with HDAudio codec
and DMIC present then enable this option by saying Y.
config SND_SOC_INTEL_SKYLAKE_COMMON
tristate
select SND_HDA_EXT_CORE
select SND_HDA_DSP_LOADER
select SND_SOC_TOPOLOGY
select SND_SOC_INTEL_SST
select SND_SOC_HDAC_HDA
select SND_SOC_ACPI_INTEL_MATCH
select SND_INTEL_DSP_CONFIG
help
If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
GeminiLake or CannonLake platform with the DSP enabled in the BIOS
then enable this option by saying Y or m.
endif ## SND_SOC_INTEL_SKYLAKE_FAMILY
endif ## SND_SOC_INTEL_SST_TOPLEVEL
if SND_SOC_INTEL_SST_TOPLEVEL || SND_SOC_SOF_INTEL_TOPLEVEL

View file

@ -5,7 +5,6 @@ obj-$(CONFIG_SND_SOC) += common/
# Platform Support
obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
obj-$(CONFIG_SND_SOC_INTEL_AVS) += avs/

View file

@ -300,7 +300,7 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
endif ## SND_SOC_SOF_GEMINILAKE
if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
if SND_SOC_SOF_HDA_AUDIO_CODEC
config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
tristate "Skylake+ with HDA Codecs"
@ -316,7 +316,7 @@ config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
endif ## SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
endif ## SND_SOC_SOF_HDA_AUDIO_CODEC
if SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
config SND_SOC_INTEL_SOF_RT5682_MACH

View file

@ -1,15 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
snd-soc-skl-y := skl.o skl-pcm.o skl-nhlt.o skl-messages.o skl-topology.o \
skl-sst-ipc.o skl-sst-dsp.o cnl-sst-dsp.o skl-sst-cldma.o \
skl-sst.o bxt-sst.o cnl-sst.o skl-sst-utils.o
ifdef CONFIG_DEBUG_FS
snd-soc-skl-y += skl-debug.o
endif
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += snd-soc-skl.o
#Skylake Clock device support
snd-soc-skl-ssp-clk-y := skl-ssp-clk.o
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK) += snd-soc-skl-ssp-clk.o

View file

@ -1,629 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* bxt-sst.c - DSP library functions for BXT platform
*
* Copyright (C) 2015-16 Intel Corp
* Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
* Jeeja KP <jeeja.kp@intel.com>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/device.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#include "skl.h"
#define BXT_BASEFW_TIMEOUT 3000
#define BXT_ROM_INIT_TIMEOUT 70
#define BXT_IPC_PURGE_FW 0x01004000
#define BXT_ROM_INIT 0x5
#define BXT_ADSP_SRAM0_BASE 0x80000
/* Firmware status window */
#define BXT_ADSP_FW_STATUS BXT_ADSP_SRAM0_BASE
#define BXT_ADSP_ERROR_CODE (BXT_ADSP_FW_STATUS + 0x4)
#define BXT_ADSP_SRAM1_BASE 0xA0000
#define BXT_INSTANCE_ID 0
#define BXT_BASE_FW_MODULE_ID 0
#define BXT_ADSP_FW_BIN_HDR_OFFSET 0x2000
/* Delay before scheduling D0i3 entry */
#define BXT_D0I3_DELAY 5000
static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
}
static int
bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count)
{
struct snd_dma_buffer dmab;
struct skl_dev *skl = ctx->thread_context;
struct firmware stripped_fw;
int ret = 0, i, dma_id, stream_tag;
/* library indices start from 1 to N. 0 represents base FW */
for (i = 1; i < lib_count; i++) {
ret = skl_prepare_lib_load(skl, &skl->lib_info[i], &stripped_fw,
BXT_ADSP_FW_BIN_HDR_OFFSET, i);
if (ret < 0)
goto load_library_failed;
stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40,
stripped_fw.size, &dmab);
if (stream_tag <= 0) {
dev_err(ctx->dev, "Lib prepare DMA err: %x\n",
stream_tag);
ret = stream_tag;
goto load_library_failed;
}
dma_id = stream_tag - 1;
memcpy(dmab.area, stripped_fw.data, stripped_fw.size);
ctx->dsp_ops.trigger(ctx->dev, true, stream_tag);
ret = skl_sst_ipc_load_library(&skl->ipc, dma_id, i, true);
if (ret < 0)
dev_err(ctx->dev, "IPC Load Lib for %s fail: %d\n",
linfo[i].name, ret);
ctx->dsp_ops.trigger(ctx->dev, false, stream_tag);
ctx->dsp_ops.cleanup(ctx->dev, &dmab, stream_tag);
}
return ret;
load_library_failed:
skl_release_library(linfo, lib_count);
return ret;
}
/*
* First boot sequence has some extra steps. Core 0 waits for power
* status on core 1, so power up core 1 also momentarily, keep it in
* reset/stall and then turn it off
*/
static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
const void *fwdata, u32 fwsize)
{
int stream_tag, ret;
stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
if (stream_tag <= 0) {
dev_err(ctx->dev, "Failed to prepare DMA FW loading err: %x\n",
stream_tag);
return stream_tag;
}
ctx->dsp_ops.stream_tag = stream_tag;
memcpy(ctx->dmab.area, fwdata, fwsize);
/* Step 1: Power up core 0 and core1 */
ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK |
SKL_DSP_CORE_MASK(1));
if (ret < 0) {
dev_err(ctx->dev, "dsp core0/1 power up failed\n");
goto base_fw_load_failed;
}
/* Step 2: Purge FW request */
sst_dsp_shim_write(ctx, SKL_ADSP_REG_HIPCI, SKL_ADSP_REG_HIPCI_BUSY |
(BXT_IPC_PURGE_FW | ((stream_tag - 1) << 9)));
/* Step 3: Unset core0 reset state & unstall/run core0 */
ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
ret = -EIO;
goto base_fw_load_failed;
}
/* Step 4: Wait for DONE Bit */
ret = sst_dsp_register_poll(ctx, SKL_ADSP_REG_HIPCIE,
SKL_ADSP_REG_HIPCIE_DONE,
SKL_ADSP_REG_HIPCIE_DONE,
BXT_INIT_TIMEOUT, "HIPCIE Done");
if (ret < 0) {
dev_err(ctx->dev, "Timeout for Purge Request%d\n", ret);
goto base_fw_load_failed;
}
/* Step 5: power down core1 */
ret = skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
if (ret < 0) {
dev_err(ctx->dev, "dsp core1 power down failed\n");
goto base_fw_load_failed;
}
/* Step 6: Enable Interrupt */
skl_ipc_int_enable(ctx);
skl_ipc_op_int_enable(ctx);
/* Step 7: Wait for ROM init */
ret = sst_dsp_register_poll(ctx, BXT_ADSP_FW_STATUS, SKL_FW_STS_MASK,
SKL_FW_INIT, BXT_ROM_INIT_TIMEOUT, "ROM Load");
if (ret < 0) {
dev_err(ctx->dev, "Timeout for ROM init, ret:%d\n", ret);
goto base_fw_load_failed;
}
return ret;
base_fw_load_failed:
ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
return ret;
}
static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
{
int ret;
ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag);
ret = sst_dsp_register_poll(ctx, BXT_ADSP_FW_STATUS, SKL_FW_STS_MASK,
BXT_ROM_INIT, BXT_BASEFW_TIMEOUT, "Firmware boot");
ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag);
ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag);
return ret;
}
static int bxt_load_base_firmware(struct sst_dsp *ctx)
{
struct firmware stripped_fw;
struct skl_dev *skl = ctx->thread_context;
int ret, i;
if (ctx->fw == NULL) {
ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "Request firmware failed %d\n", ret);
return ret;
}
}
/* prase uuids on first boot */
if (skl->is_first_boot) {
ret = snd_skl_parse_uuids(ctx, ctx->fw, BXT_ADSP_FW_BIN_HDR_OFFSET, 0);
if (ret < 0)
goto sst_load_base_firmware_failed;
}
stripped_fw.data = ctx->fw->data;
stripped_fw.size = ctx->fw->size;
skl_dsp_strip_extended_manifest(&stripped_fw);
for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) {
ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
if (ret == 0)
break;
}
if (ret < 0) {
dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
dev_err(ctx->dev, "Core En/ROM load fail:%d\n", ret);
goto sst_load_base_firmware_failed;
}
ret = sst_transfer_fw_host_dma(ctx);
if (ret < 0) {
dev_err(ctx->dev, "Transfer firmware failed %d\n", ret);
dev_info(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
} else {
dev_dbg(ctx->dev, "Firmware download successful\n");
ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0) {
dev_err(ctx->dev, "DSP boot fail, FW Ready timeout\n");
skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
ret = -EIO;
} else {
ret = 0;
skl->fw_loaded = true;
}
}
return ret;
sst_load_base_firmware_failed:
release_firmware(ctx->fw);
ctx->fw = NULL;
return ret;
}
/*
* Decide the D0i3 state that can be targeted based on the usecase
* ref counts and DSP state
*
* Decision Matrix: (X= dont care; state = target state)
*
* DSP state != SKL_DSP_RUNNING ; state = no d0i3
*
* DSP state == SKL_DSP_RUNNING , the following matrix applies
* non_d0i3 >0; streaming =X; non_streaming =X; state = no d0i3
* non_d0i3 =X; streaming =0; non_streaming =0; state = no d0i3
* non_d0i3 =0; streaming >0; non_streaming =X; state = streaming d0i3
* non_d0i3 =0; streaming =0; non_streaming =X; state = non-streaming d0i3
*/
static int bxt_d0i3_target_state(struct sst_dsp *ctx)
{
struct skl_dev *skl = ctx->thread_context;
struct skl_d0i3_data *d0i3 = &skl->d0i3;
if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING)
return SKL_DSP_D0I3_NONE;
if (d0i3->non_d0i3)
return SKL_DSP_D0I3_NONE;
else if (d0i3->streaming)
return SKL_DSP_D0I3_STREAMING;
else if (d0i3->non_streaming)
return SKL_DSP_D0I3_NON_STREAMING;
else
return SKL_DSP_D0I3_NONE;
}
static void bxt_set_dsp_D0i3(struct work_struct *work)
{
int ret;
struct skl_ipc_d0ix_msg msg;
struct skl_dev *skl = container_of(work,
struct skl_dev, d0i3.work.work);
struct sst_dsp *ctx = skl->dsp;
struct skl_d0i3_data *d0i3 = &skl->d0i3;
int target_state;
dev_dbg(ctx->dev, "In %s:\n", __func__);
/* D0i3 entry allowed only if core 0 alone is running */
if (skl_dsp_get_enabled_cores(ctx) != SKL_DSP_CORE0_MASK) {
dev_warn(ctx->dev,
"D0i3 allowed when only core0 running:Exit\n");
return;
}
target_state = bxt_d0i3_target_state(ctx);
if (target_state == SKL_DSP_D0I3_NONE)
return;
msg.instance_id = 0;
msg.module_id = 0;
msg.wake = 1;
msg.streaming = 0;
if (target_state == SKL_DSP_D0I3_STREAMING)
msg.streaming = 1;
ret = skl_ipc_set_d0ix(&skl->ipc, &msg);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set DSP to D0i3 state\n");
return;
}
/* Set Vendor specific register D0I3C.I3 to enable D0i3*/
if (skl->update_d0i3c)
skl->update_d0i3c(skl->dev, true);
d0i3->state = target_state;
skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING_D0I3;
}
static int bxt_schedule_dsp_D0i3(struct sst_dsp *ctx)
{
struct skl_dev *skl = ctx->thread_context;
struct skl_d0i3_data *d0i3 = &skl->d0i3;
/* Schedule D0i3 only if the usecase ref counts are appropriate */
if (bxt_d0i3_target_state(ctx) != SKL_DSP_D0I3_NONE) {
dev_dbg(ctx->dev, "%s: Schedule D0i3\n", __func__);
schedule_delayed_work(&d0i3->work,
msecs_to_jiffies(BXT_D0I3_DELAY));
}
return 0;
}
static int bxt_set_dsp_D0i0(struct sst_dsp *ctx)
{
int ret;
struct skl_ipc_d0ix_msg msg;
struct skl_dev *skl = ctx->thread_context;
dev_dbg(ctx->dev, "In %s:\n", __func__);
/* First Cancel any pending attempt to put DSP to D0i3 */
cancel_delayed_work_sync(&skl->d0i3.work);
/* If DSP is currently in D0i3, bring it to D0i0 */
if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING_D0I3)
return 0;
dev_dbg(ctx->dev, "Set DSP to D0i0\n");
msg.instance_id = 0;
msg.module_id = 0;
msg.streaming = 0;
msg.wake = 0;
if (skl->d0i3.state == SKL_DSP_D0I3_STREAMING)
msg.streaming = 1;
/* Clear Vendor specific register D0I3C.I3 to disable D0i3*/
if (skl->update_d0i3c)
skl->update_d0i3c(skl->dev, false);
ret = skl_ipc_set_d0ix(&skl->ipc, &msg);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set DSP to D0i0\n");
return ret;
}
skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
skl->d0i3.state = SKL_DSP_D0I3_NONE;
return 0;
}
static int bxt_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_dev *skl = ctx->thread_context;
int ret;
struct skl_ipc_dxstate_info dx;
unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
if (skl->fw_loaded == false) {
skl->boot_complete = false;
ret = bxt_load_base_firmware(ctx);
if (ret < 0) {
dev_err(ctx->dev, "reload fw failed: %d\n", ret);
return ret;
}
if (skl->lib_count > 1) {
ret = bxt_load_library(ctx, skl->lib_info,
skl->lib_count);
if (ret < 0) {
dev_err(ctx->dev, "reload libs failed: %d\n", ret);
return ret;
}
}
skl->cores.state[core_id] = SKL_DSP_RUNNING;
return ret;
}
/* If core 0 is being turned on, turn on core 1 as well */
if (core_id == SKL_DSP_CORE0_ID)
ret = skl_dsp_core_power_up(ctx, core_mask |
SKL_DSP_CORE_MASK(1));
else
ret = skl_dsp_core_power_up(ctx, core_mask);
if (ret < 0)
goto err;
if (core_id == SKL_DSP_CORE0_ID) {
/*
* Enable interrupt after SPA is set and before
* DSP is unstalled
*/
skl_ipc_int_enable(ctx);
skl_ipc_op_int_enable(ctx);
skl->boot_complete = false;
}
ret = skl_dsp_start_core(ctx, core_mask);
if (ret < 0)
goto err;
if (core_id == SKL_DSP_CORE0_ID) {
ret = wait_event_timeout(skl->boot_wait,
skl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
/* If core 1 was turned on for booting core 0, turn it off */
skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
if (ret == 0) {
dev_err(ctx->dev, "%s: DSP boot timeout\n", __func__);
dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
dev_err(ctx->dev, "Failed to set core0 to D0 state\n");
ret = -EIO;
goto err;
}
}
/* Tell FW if additional core in now On */
if (core_id != SKL_DSP_CORE0_ID) {
dx.core_mask = core_mask;
dx.dx_mask = core_mask;
ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID,
BXT_BASE_FW_MODULE_ID, &dx);
if (ret < 0) {
dev_err(ctx->dev, "IPC set_dx for core %d fail: %d\n",
core_id, ret);
goto err;
}
}
skl->cores.state[core_id] = SKL_DSP_RUNNING;
return 0;
err:
if (core_id == SKL_DSP_CORE0_ID)
core_mask |= SKL_DSP_CORE_MASK(1);
skl_dsp_disable_core(ctx, core_mask);
return ret;
}
static int bxt_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
{
int ret;
struct skl_ipc_dxstate_info dx;
struct skl_dev *skl = ctx->thread_context;
unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
dx.core_mask = core_mask;
dx.dx_mask = SKL_IPC_D3_MASK;
dev_dbg(ctx->dev, "core mask=%x dx_mask=%x\n",
dx.core_mask, dx.dx_mask);
ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID,
BXT_BASE_FW_MODULE_ID, &dx);
if (ret < 0) {
dev_err(ctx->dev,
"Failed to set DSP to D3:core id = %d;Continue reset\n",
core_id);
/*
* In case of D3 failure, re-download the firmware, so set
* fw_loaded to false.
*/
skl->fw_loaded = false;
}
if (core_id == SKL_DSP_CORE0_ID) {
/* disable Interrupt */
skl_ipc_op_int_disable(ctx);
skl_ipc_int_disable(ctx);
}
ret = skl_dsp_disable_core(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "Failed to disable core %d\n", ret);
return ret;
}
skl->cores.state[core_id] = SKL_DSP_RESET;
return 0;
}
static const struct skl_dsp_fw_ops bxt_fw_ops = {
.set_state_D0 = bxt_set_dsp_D0,
.set_state_D3 = bxt_set_dsp_D3,
.set_state_D0i3 = bxt_schedule_dsp_D0i3,
.set_state_D0i0 = bxt_set_dsp_D0i0,
.load_fw = bxt_load_base_firmware,
.get_fw_errcode = bxt_get_errorcode,
.load_library = bxt_load_library,
};
static struct sst_ops skl_ops = {
.irq_handler = skl_dsp_sst_interrupt,
.write = sst_shim32_write,
.read = sst_shim32_read,
.free = skl_dsp_free,
};
static struct sst_dsp_device skl_dev = {
.thread = skl_dsp_irq_thread_handler,
.ops = &skl_ops,
};
int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
struct skl_dev **dsp)
{
struct skl_dev *skl;
struct sst_dsp *sst;
int ret;
ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev);
if (ret < 0) {
dev_err(dev, "%s: no device\n", __func__);
return ret;
}
skl = *dsp;
sst = skl->dsp;
sst->fw_ops = bxt_fw_ops;
sst->addr.lpe = mmio_base;
sst->addr.shim = mmio_base;
sst->addr.sram0_base = BXT_ADSP_SRAM0_BASE;
sst->addr.sram1_base = BXT_ADSP_SRAM1_BASE;
sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ;
sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ;
sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
ret = skl_ipc_init(dev, skl);
if (ret) {
skl_dsp_free(sst);
return ret;
}
/* set the D0i3 check */
skl->ipc.ops.check_dsp_lp_on = skl_ipc_check_D0i0;
skl->boot_complete = false;
init_waitqueue_head(&skl->boot_wait);
INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
skl->d0i3.state = SKL_DSP_D0I3_NONE;
return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
int bxt_sst_init_fw(struct device *dev, struct skl_dev *skl)
{
int ret;
struct sst_dsp *sst = skl->dsp;
ret = sst->fw_ops.load_fw(sst);
if (ret < 0) {
dev_err(dev, "Load base fw failed: %x\n", ret);
return ret;
}
skl_dsp_init_core_state(sst);
if (skl->lib_count > 1) {
ret = sst->fw_ops.load_library(sst, skl->lib_info,
skl->lib_count);
if (ret < 0) {
dev_err(dev, "Load Library failed : %x\n", ret);
return ret;
}
}
skl->is_first_boot = false;
return 0;
}
EXPORT_SYMBOL_GPL(bxt_sst_init_fw);
void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl)
{
skl_release_library(skl->lib_info, skl->lib_count);
if (skl->dsp->fw)
release_firmware(skl->dsp->fw);
skl_freeup_uuid_list(skl);
skl_ipc_free(&skl->ipc);
skl->dsp->ops->free(skl->dsp);
}
EXPORT_SYMBOL_GPL(bxt_sst_dsp_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Broxton IPC driver");

View file

@ -1,266 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* cnl-sst-dsp.c - CNL SST library generic function
*
* Copyright (C) 2016-17, Intel Corporation.
* Author: Guneshwor Singh <guneshwor.o.singh@intel.com>
*
* Modified from:
* SKL SST library generic function
* Copyright (C) 2014-15, Intel Corporation.
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/device.h>
#include "../common/sst-dsp.h"
#include "../common/sst-ipc.h"
#include "../common/sst-dsp-priv.h"
#include "cnl-sst-dsp.h"
/* various timeout values */
#define CNL_DSP_PU_TO 50
#define CNL_DSP_PD_TO 50
#define CNL_DSP_RESET_TO 50
static int
cnl_dsp_core_set_reset_state(struct sst_dsp *ctx, unsigned int core_mask)
{
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx,
CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_CRST(core_mask),
CNL_ADSPCS_CRST(core_mask));
/* poll with timeout to check if operation successful */
return sst_dsp_register_poll(ctx,
CNL_ADSP_REG_ADSPCS,
CNL_ADSPCS_CRST(core_mask),
CNL_ADSPCS_CRST(core_mask),
CNL_DSP_RESET_TO,
"Set reset");
}
static int
cnl_dsp_core_unset_reset_state(struct sst_dsp *ctx, unsigned int core_mask)
{
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS,
CNL_ADSPCS_CRST(core_mask), 0);
/* poll with timeout to check if operation successful */
return sst_dsp_register_poll(ctx,
CNL_ADSP_REG_ADSPCS,
CNL_ADSPCS_CRST(core_mask),
0,
CNL_DSP_RESET_TO,
"Unset reset");
}
static bool is_cnl_dsp_core_enable(struct sst_dsp *ctx, unsigned int core_mask)
{
int val;
bool is_enable;
val = sst_dsp_shim_read_unlocked(ctx, CNL_ADSP_REG_ADSPCS);
is_enable = (val & CNL_ADSPCS_CPA(core_mask)) &&
(val & CNL_ADSPCS_SPA(core_mask)) &&
!(val & CNL_ADSPCS_CRST(core_mask)) &&
!(val & CNL_ADSPCS_CSTALL(core_mask));
dev_dbg(ctx->dev, "DSP core(s) enabled? %d: core_mask %#x\n",
is_enable, core_mask);
return is_enable;
}
static int cnl_dsp_reset_core(struct sst_dsp *ctx, unsigned int core_mask)
{
/* stall core */
sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS,
CNL_ADSPCS_CSTALL(core_mask),
CNL_ADSPCS_CSTALL(core_mask));
/* set reset state */
return cnl_dsp_core_set_reset_state(ctx, core_mask);
}
static int cnl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* unset reset state */
ret = cnl_dsp_core_unset_reset_state(ctx, core_mask);
if (ret < 0)
return ret;
/* run core */
sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS,
CNL_ADSPCS_CSTALL(core_mask), 0);
if (!is_cnl_dsp_core_enable(ctx, core_mask)) {
cnl_dsp_reset_core(ctx, core_mask);
dev_err(ctx->dev, "DSP core mask %#x enable failed\n",
core_mask);
ret = -EIO;
}
return ret;
}
static int cnl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask)
{
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS,
CNL_ADSPCS_SPA(core_mask),
CNL_ADSPCS_SPA(core_mask));
/* poll with timeout to check if operation successful */
return sst_dsp_register_poll(ctx, CNL_ADSP_REG_ADSPCS,
CNL_ADSPCS_CPA(core_mask),
CNL_ADSPCS_CPA(core_mask),
CNL_DSP_PU_TO,
"Power up");
}
static int cnl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask)
{
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS,
CNL_ADSPCS_SPA(core_mask), 0);
/* poll with timeout to check if operation successful */
return sst_dsp_register_poll(ctx,
CNL_ADSP_REG_ADSPCS,
CNL_ADSPCS_CPA(core_mask),
0,
CNL_DSP_PD_TO,
"Power down");
}
int cnl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* power up */
ret = cnl_dsp_core_power_up(ctx, core_mask);
if (ret < 0) {
dev_dbg(ctx->dev, "DSP core mask %#x power up failed",
core_mask);
return ret;
}
return cnl_dsp_start_core(ctx, core_mask);
}
int cnl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
ret = cnl_dsp_reset_core(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "DSP core mask %#x reset failed\n",
core_mask);
return ret;
}
/* power down core*/
ret = cnl_dsp_core_power_down(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "DSP core mask %#x power down failed\n",
core_mask);
return ret;
}
if (is_cnl_dsp_core_enable(ctx, core_mask)) {
dev_err(ctx->dev, "DSP core mask %#x disable failed\n",
core_mask);
ret = -EIO;
}
return ret;
}
irqreturn_t cnl_dsp_sst_interrupt(int irq, void *dev_id)
{
struct sst_dsp *ctx = dev_id;
u32 val;
irqreturn_t ret = IRQ_NONE;
spin_lock(&ctx->spinlock);
val = sst_dsp_shim_read_unlocked(ctx, CNL_ADSP_REG_ADSPIS);
ctx->intr_status = val;
if (val == 0xffffffff) {
spin_unlock(&ctx->spinlock);
return IRQ_NONE;
}
if (val & CNL_ADSPIS_IPC) {
cnl_ipc_int_disable(ctx);
ret = IRQ_WAKE_THREAD;
}
spin_unlock(&ctx->spinlock);
return ret;
}
void cnl_dsp_free(struct sst_dsp *dsp)
{
cnl_ipc_int_disable(dsp);
free_irq(dsp->irq, dsp);
cnl_ipc_op_int_disable(dsp);
cnl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK);
}
EXPORT_SYMBOL_GPL(cnl_dsp_free);
void cnl_ipc_int_enable(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_ADSPIC,
CNL_ADSPIC_IPC, CNL_ADSPIC_IPC);
}
void cnl_ipc_int_disable(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPIC,
CNL_ADSPIC_IPC, 0);
}
void cnl_ipc_op_int_enable(struct sst_dsp *ctx)
{
/* enable IPC DONE interrupt */
sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_HIPCCTL,
CNL_ADSP_REG_HIPCCTL_DONE,
CNL_ADSP_REG_HIPCCTL_DONE);
/* enable IPC BUSY interrupt */
sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_HIPCCTL,
CNL_ADSP_REG_HIPCCTL_BUSY,
CNL_ADSP_REG_HIPCCTL_BUSY);
}
void cnl_ipc_op_int_disable(struct sst_dsp *ctx)
{
/* disable IPC DONE interrupt */
sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_HIPCCTL,
CNL_ADSP_REG_HIPCCTL_DONE, 0);
/* disable IPC BUSY interrupt */
sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_HIPCCTL,
CNL_ADSP_REG_HIPCCTL_BUSY, 0);
}
bool cnl_ipc_int_status(struct sst_dsp *ctx)
{
return sst_dsp_shim_read_unlocked(ctx, CNL_ADSP_REG_ADSPIS) &
CNL_ADSPIS_IPC;
}
void cnl_ipc_free(struct sst_generic_ipc *ipc)
{
cnl_ipc_op_int_disable(ipc->dsp);
sst_ipc_fini(ipc);
}

View file

@ -1,103 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Cannonlake SST DSP Support
*
* Copyright (C) 2016-17, Intel Corporation.
*/
#ifndef __CNL_SST_DSP_H__
#define __CNL_SST_DSP_H__
struct sst_dsp;
struct sst_dsp_device;
struct sst_generic_ipc;
/* Intel HD Audio General DSP Registers */
#define CNL_ADSP_GEN_BASE 0x0
#define CNL_ADSP_REG_ADSPCS (CNL_ADSP_GEN_BASE + 0x04)
#define CNL_ADSP_REG_ADSPIC (CNL_ADSP_GEN_BASE + 0x08)
#define CNL_ADSP_REG_ADSPIS (CNL_ADSP_GEN_BASE + 0x0c)
/* Intel HD Audio Inter-Processor Communication Registers */
#define CNL_ADSP_IPC_BASE 0xc0
#define CNL_ADSP_REG_HIPCTDR (CNL_ADSP_IPC_BASE + 0x00)
#define CNL_ADSP_REG_HIPCTDA (CNL_ADSP_IPC_BASE + 0x04)
#define CNL_ADSP_REG_HIPCTDD (CNL_ADSP_IPC_BASE + 0x08)
#define CNL_ADSP_REG_HIPCIDR (CNL_ADSP_IPC_BASE + 0x10)
#define CNL_ADSP_REG_HIPCIDA (CNL_ADSP_IPC_BASE + 0x14)
#define CNL_ADSP_REG_HIPCIDD (CNL_ADSP_IPC_BASE + 0x18)
#define CNL_ADSP_REG_HIPCCTL (CNL_ADSP_IPC_BASE + 0x28)
/* HIPCTDR */
#define CNL_ADSP_REG_HIPCTDR_BUSY BIT(31)
/* HIPCTDA */
#define CNL_ADSP_REG_HIPCTDA_DONE BIT(31)
/* HIPCIDR */
#define CNL_ADSP_REG_HIPCIDR_BUSY BIT(31)
/* HIPCIDA */
#define CNL_ADSP_REG_HIPCIDA_DONE BIT(31)
/* CNL HIPCCTL */
#define CNL_ADSP_REG_HIPCCTL_DONE BIT(1)
#define CNL_ADSP_REG_HIPCCTL_BUSY BIT(0)
/* CNL HIPCT */
#define CNL_ADSP_REG_HIPCT_BUSY BIT(31)
/* Intel HD Audio SRAM Window 1 */
#define CNL_ADSP_SRAM1_BASE 0xa0000
#define CNL_ADSP_MMIO_LEN 0x10000
#define CNL_ADSP_W0_STAT_SZ 0x1000
#define CNL_ADSP_W0_UP_SZ 0x1000
#define CNL_ADSP_W1_SZ 0x1000
#define CNL_FW_STS_MASK 0xf
#define CNL_ADSPIC_IPC 0x1
#define CNL_ADSPIS_IPC 0x1
#define CNL_DSP_CORES 4
#define CNL_DSP_CORES_MASK ((1 << CNL_DSP_CORES) - 1)
/* core reset - asserted high */
#define CNL_ADSPCS_CRST_SHIFT 0
#define CNL_ADSPCS_CRST(x) (x << CNL_ADSPCS_CRST_SHIFT)
/* core run/stall - when set to 1 core is stalled */
#define CNL_ADSPCS_CSTALL_SHIFT 8
#define CNL_ADSPCS_CSTALL(x) (x << CNL_ADSPCS_CSTALL_SHIFT)
/* set power active - when set to 1 turn core on */
#define CNL_ADSPCS_SPA_SHIFT 16
#define CNL_ADSPCS_SPA(x) (x << CNL_ADSPCS_SPA_SHIFT)
/* current power active - power status of cores, set by hardware */
#define CNL_ADSPCS_CPA_SHIFT 24
#define CNL_ADSPCS_CPA(x) (x << CNL_ADSPCS_CPA_SHIFT)
int cnl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask);
int cnl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask);
irqreturn_t cnl_dsp_sst_interrupt(int irq, void *dev_id);
void cnl_dsp_free(struct sst_dsp *dsp);
void cnl_ipc_int_enable(struct sst_dsp *ctx);
void cnl_ipc_int_disable(struct sst_dsp *ctx);
void cnl_ipc_op_int_enable(struct sst_dsp *ctx);
void cnl_ipc_op_int_disable(struct sst_dsp *ctx);
bool cnl_ipc_int_status(struct sst_dsp *ctx);
void cnl_ipc_free(struct sst_generic_ipc *ipc);
int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
struct skl_dev **dsp);
int cnl_sst_init_fw(struct device *dev, struct skl_dev *skl);
void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl);
#endif /*__CNL_SST_DSP_H__*/

View file

@ -1,508 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* cnl-sst.c - DSP library functions for CNL platform
*
* Copyright (C) 2016-17, Intel Corporation.
*
* Author: Guneshwor Singh <guneshwor.o.singh@intel.com>
*
* Modified from:
* HDA DSP library functions for SKL platform
* Copyright (C) 2014-15, Intel Corporation.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/device.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#include "../common/sst-ipc.h"
#include "cnl-sst-dsp.h"
#include "skl.h"
#define CNL_FW_ROM_INIT 0x1
#define CNL_FW_INIT 0x5
#define CNL_IPC_PURGE 0x01004000
#define CNL_INIT_TIMEOUT 300
#define CNL_BASEFW_TIMEOUT 3000
#define CNL_ADSP_SRAM0_BASE 0x80000
/* Firmware status window */
#define CNL_ADSP_FW_STATUS CNL_ADSP_SRAM0_BASE
#define CNL_ADSP_ERROR_CODE (CNL_ADSP_FW_STATUS + 0x4)
#define CNL_INSTANCE_ID 0
#define CNL_BASE_FW_MODULE_ID 0
#define CNL_ADSP_FW_HDR_OFFSET 0x2000
#define CNL_ROM_CTRL_DMA_ID 0x9
static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize)
{
int ret, stream_tag;
stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
if (stream_tag <= 0) {
dev_err(ctx->dev, "dma prepare failed: 0%#x\n", stream_tag);
return stream_tag;
}
ctx->dsp_ops.stream_tag = stream_tag;
memcpy(ctx->dmab.area, fwdata, fwsize);
ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
dev_err(ctx->dev, "dsp core0 power up failed\n");
ret = -EIO;
goto base_fw_load_failed;
}
/* purge FW request */
sst_dsp_shim_write(ctx, CNL_ADSP_REG_HIPCIDR,
CNL_ADSP_REG_HIPCIDR_BUSY | (CNL_IPC_PURGE |
((stream_tag - 1) << CNL_ROM_CTRL_DMA_ID)));
ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
ret = -EIO;
goto base_fw_load_failed;
}
ret = sst_dsp_register_poll(ctx, CNL_ADSP_REG_HIPCIDA,
CNL_ADSP_REG_HIPCIDA_DONE,
CNL_ADSP_REG_HIPCIDA_DONE,
BXT_INIT_TIMEOUT, "HIPCIDA Done");
if (ret < 0) {
dev_err(ctx->dev, "timeout for purge request: %d\n", ret);
goto base_fw_load_failed;
}
/* enable interrupt */
cnl_ipc_int_enable(ctx);
cnl_ipc_op_int_enable(ctx);
ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
CNL_FW_ROM_INIT, CNL_INIT_TIMEOUT,
"rom load");
if (ret < 0) {
dev_err(ctx->dev, "rom init timeout, ret: %d\n", ret);
goto base_fw_load_failed;
}
return 0;
base_fw_load_failed:
ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
return ret;
}
static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
{
int ret;
ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag);
ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
CNL_FW_INIT, CNL_BASEFW_TIMEOUT,
"firmware boot");
ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag);
ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag);
return ret;
}
static int cnl_load_base_firmware(struct sst_dsp *ctx)
{
struct firmware stripped_fw;
struct skl_dev *cnl = ctx->thread_context;
int ret, i;
if (!ctx->fw) {
ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "request firmware failed: %d\n", ret);
goto cnl_load_base_firmware_failed;
}
}
/* parse uuids if first boot */
if (cnl->is_first_boot) {
ret = snd_skl_parse_uuids(ctx, ctx->fw,
CNL_ADSP_FW_HDR_OFFSET, 0);
if (ret < 0)
goto cnl_load_base_firmware_failed;
}
stripped_fw.data = ctx->fw->data;
stripped_fw.size = ctx->fw->size;
skl_dsp_strip_extended_manifest(&stripped_fw);
for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) {
ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
if (!ret)
break;
dev_dbg(ctx->dev, "prepare firmware failed: %d\n", ret);
}
if (ret < 0)
goto cnl_load_base_firmware_failed;
ret = sst_transfer_fw_host_dma(ctx);
if (ret < 0) {
dev_err(ctx->dev, "transfer firmware failed: %d\n", ret);
cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
goto cnl_load_base_firmware_failed;
}
ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0) {
dev_err(ctx->dev, "FW ready timed-out\n");
cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
ret = -EIO;
goto cnl_load_base_firmware_failed;
}
cnl->fw_loaded = true;
return 0;
cnl_load_base_firmware_failed:
dev_err(ctx->dev, "firmware load failed: %d\n", ret);
release_firmware(ctx->fw);
ctx->fw = NULL;
return ret;
}
static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_dev *cnl = ctx->thread_context;
unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
struct skl_ipc_dxstate_info dx;
int ret;
if (!cnl->fw_loaded) {
cnl->boot_complete = false;
ret = cnl_load_base_firmware(ctx);
if (ret < 0) {
dev_err(ctx->dev, "fw reload failed: %d\n", ret);
return ret;
}
cnl->cores.state[core_id] = SKL_DSP_RUNNING;
return ret;
}
ret = cnl_dsp_enable_core(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "enable dsp core %d failed: %d\n",
core_id, ret);
goto err;
}
if (core_id == SKL_DSP_CORE0_ID) {
/* enable interrupt */
cnl_ipc_int_enable(ctx);
cnl_ipc_op_int_enable(ctx);
cnl->boot_complete = false;
ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0) {
dev_err(ctx->dev,
"dsp boot timeout, status=%#x error=%#x\n",
sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
ret = -ETIMEDOUT;
goto err;
}
} else {
dx.core_mask = core_mask;
dx.dx_mask = core_mask;
ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
CNL_BASE_FW_MODULE_ID, &dx);
if (ret < 0) {
dev_err(ctx->dev, "set_dx failed, core: %d ret: %d\n",
core_id, ret);
goto err;
}
}
cnl->cores.state[core_id] = SKL_DSP_RUNNING;
return 0;
err:
cnl_dsp_disable_core(ctx, core_mask);
return ret;
}
static int cnl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_dev *cnl = ctx->thread_context;
unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
struct skl_ipc_dxstate_info dx;
int ret;
dx.core_mask = core_mask;
dx.dx_mask = SKL_IPC_D3_MASK;
ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
CNL_BASE_FW_MODULE_ID, &dx);
if (ret < 0) {
dev_err(ctx->dev,
"dsp core %d to d3 failed; continue reset\n",
core_id);
cnl->fw_loaded = false;
}
/* disable interrupts if core 0 */
if (core_id == SKL_DSP_CORE0_ID) {
skl_ipc_op_int_disable(ctx);
skl_ipc_int_disable(ctx);
}
ret = cnl_dsp_disable_core(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "disable dsp core %d failed: %d\n",
core_id, ret);
return ret;
}
cnl->cores.state[core_id] = SKL_DSP_RESET;
return ret;
}
static unsigned int cnl_get_errno(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE);
}
static const struct skl_dsp_fw_ops cnl_fw_ops = {
.set_state_D0 = cnl_set_dsp_D0,
.set_state_D3 = cnl_set_dsp_D3,
.load_fw = cnl_load_base_firmware,
.get_fw_errcode = cnl_get_errno,
};
static struct sst_ops cnl_ops = {
.irq_handler = cnl_dsp_sst_interrupt,
.write = sst_shim32_write,
.read = sst_shim32_read,
.free = cnl_dsp_free,
};
#define CNL_IPC_GLB_NOTIFY_RSP_SHIFT 29
#define CNL_IPC_GLB_NOTIFY_RSP_MASK 0x1
#define CNL_IPC_GLB_NOTIFY_RSP_TYPE(x) (((x) >> CNL_IPC_GLB_NOTIFY_RSP_SHIFT) \
& CNL_IPC_GLB_NOTIFY_RSP_MASK)
static irqreturn_t cnl_dsp_irq_thread_handler(int irq, void *context)
{
struct sst_dsp *dsp = context;
struct skl_dev *cnl = dsp->thread_context;
struct sst_generic_ipc *ipc = &cnl->ipc;
struct skl_ipc_header header = {0};
u32 hipcida, hipctdr, hipctdd;
int ipc_irq = 0;
/* here we handle ipc interrupts only */
if (!(dsp->intr_status & CNL_ADSPIS_IPC))
return IRQ_NONE;
hipcida = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDA);
hipctdr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDR);
hipctdd = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDD);
/* reply message from dsp */
if (hipcida & CNL_ADSP_REG_HIPCIDA_DONE) {
sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
CNL_ADSP_REG_HIPCCTL_DONE, 0);
/* clear done bit - tell dsp operation is complete */
sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCIDA,
CNL_ADSP_REG_HIPCIDA_DONE, CNL_ADSP_REG_HIPCIDA_DONE);
ipc_irq = 1;
/* unmask done interrupt */
sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
CNL_ADSP_REG_HIPCCTL_DONE, CNL_ADSP_REG_HIPCCTL_DONE);
}
/* new message from dsp */
if (hipctdr & CNL_ADSP_REG_HIPCTDR_BUSY) {
header.primary = hipctdr;
header.extension = hipctdd;
dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
header.primary);
dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
header.extension);
if (CNL_IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
/* Handle Immediate reply from DSP Core */
skl_ipc_process_reply(ipc, header);
} else {
dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
skl_ipc_process_notification(ipc, header);
}
/* clear busy interrupt */
sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDR,
CNL_ADSP_REG_HIPCTDR_BUSY, CNL_ADSP_REG_HIPCTDR_BUSY);
/* set done bit to ack dsp */
sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDA,
CNL_ADSP_REG_HIPCTDA_DONE, CNL_ADSP_REG_HIPCTDA_DONE);
ipc_irq = 1;
}
if (ipc_irq == 0)
return IRQ_NONE;
cnl_ipc_int_enable(dsp);
/* continue to send any remaining messages */
schedule_work(&ipc->kwork);
return IRQ_HANDLED;
}
static struct sst_dsp_device cnl_dev = {
.thread = cnl_dsp_irq_thread_handler,
.ops = &cnl_ops,
};
static void cnl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
{
struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->tx.header);
if (msg->tx.size)
sst_dsp_outbox_write(ipc->dsp, msg->tx.data, msg->tx.size);
sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDD,
header->extension);
sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDR,
header->primary | CNL_ADSP_REG_HIPCIDR_BUSY);
}
static bool cnl_ipc_is_dsp_busy(struct sst_dsp *dsp)
{
u32 hipcidr;
hipcidr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDR);
return (hipcidr & CNL_ADSP_REG_HIPCIDR_BUSY);
}
static int cnl_ipc_init(struct device *dev, struct skl_dev *cnl)
{
struct sst_generic_ipc *ipc;
int err;
ipc = &cnl->ipc;
ipc->dsp = cnl->dsp;
ipc->dev = dev;
ipc->tx_data_max_size = CNL_ADSP_W1_SZ;
ipc->rx_data_max_size = CNL_ADSP_W0_UP_SZ;
err = sst_ipc_init(ipc);
if (err)
return err;
/*
* overriding tx_msg and is_dsp_busy since
* ipc registers are different for cnl
*/
ipc->ops.tx_msg = cnl_ipc_tx_msg;
ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
ipc->ops.is_dsp_busy = cnl_ipc_is_dsp_busy;
return 0;
}
int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
struct skl_dev **dsp)
{
struct skl_dev *cnl;
struct sst_dsp *sst;
int ret;
ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &cnl_dev);
if (ret < 0) {
dev_err(dev, "%s: no device\n", __func__);
return ret;
}
cnl = *dsp;
sst = cnl->dsp;
sst->fw_ops = cnl_fw_ops;
sst->addr.lpe = mmio_base;
sst->addr.shim = mmio_base;
sst->addr.sram0_base = CNL_ADSP_SRAM0_BASE;
sst->addr.sram1_base = CNL_ADSP_SRAM1_BASE;
sst->addr.w0_stat_sz = CNL_ADSP_W0_STAT_SZ;
sst->addr.w0_up_sz = CNL_ADSP_W0_UP_SZ;
sst_dsp_mailbox_init(sst, (CNL_ADSP_SRAM0_BASE + CNL_ADSP_W0_STAT_SZ),
CNL_ADSP_W0_UP_SZ, CNL_ADSP_SRAM1_BASE,
CNL_ADSP_W1_SZ);
ret = cnl_ipc_init(dev, cnl);
if (ret) {
skl_dsp_free(sst);
return ret;
}
cnl->boot_complete = false;
init_waitqueue_head(&cnl->boot_wait);
return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(cnl_sst_dsp_init);
int cnl_sst_init_fw(struct device *dev, struct skl_dev *skl)
{
int ret;
struct sst_dsp *sst = skl->dsp;
ret = skl->dsp->fw_ops.load_fw(sst);
if (ret < 0) {
dev_err(dev, "load base fw failed: %d", ret);
return ret;
}
skl_dsp_init_core_state(sst);
skl->is_first_boot = false;
return 0;
}
EXPORT_SYMBOL_GPL(cnl_sst_init_fw);
void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl)
{
if (skl->dsp->fw)
release_firmware(skl->dsp->fw);
skl_freeup_uuid_list(skl);
cnl_ipc_free(&skl->ipc);
skl->dsp->ops->free(skl->dsp);
}
EXPORT_SYMBOL_GPL(cnl_sst_dsp_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Cannonlake IPC driver");

View file

@ -1,248 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* skl-debug.c - Debugfs for skl driver
*
* Copyright (C) 2016-17 Intel Corp
*/
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <uapi/sound/skl-tplg-interface.h>
#include "skl.h"
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
#include "skl-topology.h"
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#define MOD_BUF PAGE_SIZE
#define FW_REG_BUF PAGE_SIZE
#define FW_REG_SIZE 0x60
struct skl_debug {
struct skl_dev *skl;
struct device *dev;
struct dentry *fs;
struct dentry *modules;
u8 fw_read_buff[FW_REG_BUF];
};
static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
int max_pin, ssize_t size, bool direction)
{
int i;
ssize_t ret = 0;
for (i = 0; i < max_pin; i++) {
ret += scnprintf(buf + size, MOD_BUF - size,
"%s %d\n\tModule %d\n\tInstance %d\n\t"
"In-used %s\n\tType %s\n"
"\tState %d\n\tIndex %d\n",
direction ? "Input Pin:" : "Output Pin:",
i, m_pin[i].id.module_id,
m_pin[i].id.instance_id,
m_pin[i].in_use ? "Used" : "Unused",
m_pin[i].is_dynamic ? "Dynamic" : "Static",
m_pin[i].pin_state, i);
size += ret;
}
return ret;
}
static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf,
ssize_t size, bool direction)
{
return scnprintf(buf + size, MOD_BUF - size,
"%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t"
"Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t"
"Sample Type %d\n\tCh Map %#x\n",
direction ? "Input Format:" : "Output Format:",
fmt->channels, fmt->s_freq, fmt->bit_depth,
fmt->valid_bit_depth, fmt->ch_cfg,
fmt->interleaving_style, fmt->sample_type,
fmt->ch_map);
}
static ssize_t module_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct skl_module_cfg *mconfig = file->private_data;
struct skl_module *module = mconfig->module;
struct skl_module_res *res = &module->resources[mconfig->res_idx];
char *buf;
ssize_t ret;
buf = kzalloc(MOD_BUF, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
"\tInstance id %d\n\tPvt_id %d\n", mconfig->guid,
mconfig->id.module_id, mconfig->id.instance_id,
mconfig->id.pvt_id);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"Resources:\n\tCPC %#x\n\tIBS %#x\n\tOBS %#x\t\n",
res->cpc, res->ibs, res->obs);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"Module data:\n\tCore %d\n\tIn queue %d\n\t"
"Out queue %d\n\tType %s\n",
mconfig->core_id, mconfig->max_in_queue,
mconfig->max_out_queue,
mconfig->is_loadable ? "loadable" : "inbuilt");
ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true);
ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"Fixup:\n\tParams %#x\n\tConverter %#x\n",
mconfig->params_fixup, mconfig->converter);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n",
mconfig->dev_type, mconfig->vbus_id,
mconfig->hw_conn_type, mconfig->time_slot);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t"
"Pages %#x\n", mconfig->pipe->ppl_id,
mconfig->pipe->pipe_priority, mconfig->pipe->conn_type,
mconfig->pipe->memory_pages);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n",
mconfig->pipe->p_params->host_dma_id,
mconfig->pipe->p_params->link_dma_id);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n",
mconfig->pipe->p_params->ch,
mconfig->pipe->p_params->s_freq,
mconfig->pipe->p_params->s_fmt);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tLink %#x\n\tStream %#x\n",
mconfig->pipe->p_params->linktype,
mconfig->pipe->p_params->stream);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tState %d\n\tPassthru %s\n",
mconfig->pipe->state,
mconfig->pipe->passthru ? "true" : "false");
ret += skl_print_pins(mconfig->m_in_pin, buf,
mconfig->max_in_queue, ret, true);
ret += skl_print_pins(mconfig->m_out_pin, buf,
mconfig->max_out_queue, ret, false);
ret += scnprintf(buf + ret, MOD_BUF - ret,
"Other:\n\tDomain %d\n\tHomogeneous Input %s\n\t"
"Homogeneous Output %s\n\tIn Queue Mask %d\n\t"
"Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t"
"Module Type %d\n\tModule State %d\n",
mconfig->domain,
mconfig->homogenous_inputs ? "true" : "false",
mconfig->homogenous_outputs ? "true" : "false",
mconfig->in_queue_mask, mconfig->out_queue_mask,
mconfig->dma_id, mconfig->mem_pages, mconfig->m_state,
mconfig->m_type);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
kfree(buf);
return ret;
}
static const struct file_operations mcfg_fops = {
.open = simple_open,
.read = module_read,
.llseek = default_llseek,
};
void skl_debug_init_module(struct skl_debug *d,
struct snd_soc_dapm_widget *w,
struct skl_module_cfg *mconfig)
{
debugfs_create_file(w->name, 0444, d->modules, mconfig,
&mcfg_fops);
}
static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct skl_debug *d = file->private_data;
struct sst_dsp *sst = d->skl->dsp;
size_t w0_stat_sz = sst->addr.w0_stat_sz;
void __iomem *in_base = sst->mailbox.in_base;
void __iomem *fw_reg_addr;
unsigned int offset;
char *tmp;
ssize_t ret = 0;
tmp = kzalloc(FW_REG_BUF, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
fw_reg_addr = in_base - w0_stat_sz;
memset(d->fw_read_buff, 0, FW_REG_BUF);
if (w0_stat_sz > 0)
__ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4,
tmp + ret, FW_REG_BUF - ret, 0);
ret += strlen(tmp + ret);
/* print newline for each offset */
if (FW_REG_BUF - ret > 0)
tmp[ret++] = '\n';
}
ret = simple_read_from_buffer(user_buf, count, ppos, tmp, ret);
kfree(tmp);
return ret;
}
static const struct file_operations soft_regs_ctrl_fops = {
.open = simple_open,
.read = fw_softreg_read,
.llseek = default_llseek,
};
struct skl_debug *skl_debugfs_init(struct skl_dev *skl)
{
struct skl_debug *d;
d = devm_kzalloc(&skl->pci->dev, sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
/* create the debugfs dir with platform component's debugfs as parent */
d->fs = debugfs_create_dir("dsp", skl->component->debugfs_root);
d->skl = skl;
d->dev = &skl->pci->dev;
/* now create the module dir */
d->modules = debugfs_create_dir("modules", d->fs);
debugfs_create_file("fw_soft_regs_rd", 0444, d->fs, d,
&soft_regs_ctrl_fops);
return d;
}
void skl_debugfs_exit(struct skl_dev *skl)
{
struct skl_debug *d = skl->debugfs;
debugfs_remove_recursive(d->fs);
d = NULL;
}

View file

@ -1,87 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* skl-i2s.h - i2s blob mapping
*
* Copyright (C) 2017 Intel Corp
* Author: Subhransu S. Prusty < subhransu.s.prusty@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef __SOUND_SOC_SKL_I2S_H
#define __SOUND_SOC_SKL_I2S_H
#define SKL_I2S_MAX_TIME_SLOTS 8
#define SKL_MCLK_DIV_CLK_SRC_MASK GENMASK(17, 16)
#define SKL_MNDSS_DIV_CLK_SRC_MASK GENMASK(21, 20)
#define SKL_SHIFT(x) (ffs(x) - 1)
#define SKL_MCLK_DIV_RATIO_MASK GENMASK(11, 0)
#define is_legacy_blob(x) (x.signature != 0xEE)
#define ext_to_legacy_blob(i2s_config_blob_ext) \
((struct skl_i2s_config_blob_legacy *) i2s_config_blob_ext)
#define get_clk_src(mclk, mask) \
((mclk.mdivctrl & mask) >> SKL_SHIFT(mask))
struct skl_i2s_config {
u32 ssc0;
u32 ssc1;
u32 sscto;
u32 sspsp;
u32 sstsa;
u32 ssrsa;
u32 ssc2;
u32 sspsp2;
u32 ssc3;
u32 ssioc;
} __packed;
struct skl_i2s_config_mclk {
u32 mdivctrl;
u32 mdivr;
};
struct skl_i2s_config_mclk_ext {
u32 mdivctrl;
u32 mdivr_count;
u32 mdivr[];
} __packed;
struct skl_i2s_config_blob_signature {
u32 minor_ver : 8;
u32 major_ver : 8;
u32 resvdz : 8;
u32 signature : 8;
} __packed;
struct skl_i2s_config_blob_header {
struct skl_i2s_config_blob_signature sig;
u32 size;
};
/**
* struct skl_i2s_config_blob_legacy - Structure defines I2S Gateway
* configuration legacy blob
*
* @gtw_attr: Gateway attribute for the I2S Gateway
* @tdm_ts_group: TDM slot mapping against channels in the Gateway.
* @i2s_cfg: I2S HW registers
* @mclk: MCLK clock source and divider values
*/
struct skl_i2s_config_blob_legacy {
u32 gtw_attr;
u32 tdm_ts_group[SKL_I2S_MAX_TIME_SLOTS];
struct skl_i2s_config i2s_cfg;
struct skl_i2s_config_mclk mclk;
};
struct skl_i2s_config_blob_ext {
u32 gtw_attr;
struct skl_i2s_config_blob_header hdr;
u32 tdm_ts_group[SKL_I2S_MAX_TIME_SLOTS];
struct skl_i2s_config i2s_cfg;
struct skl_i2s_config_mclk_ext mclk;
} __packed;
#endif /* __SOUND_SOC_SKL_I2S_H */

File diff suppressed because it is too large Load diff

View file

@ -1,269 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* skl-nhlt.c - Intel SKL Platform NHLT parsing
*
* Copyright (C) 2015 Intel Corp
* Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/pci.h>
#include <sound/intel-nhlt.h>
#include "skl.h"
#include "skl-i2s.h"
static void skl_nhlt_trim_space(char *trim)
{
char *s = trim;
int cnt;
int i;
cnt = 0;
for (i = 0; s[i]; i++) {
if (!isspace(s[i]))
s[cnt++] = s[i];
}
s[cnt] = '\0';
}
int skl_nhlt_update_topology_bin(struct skl_dev *skl)
{
struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
struct hdac_bus *bus = skl_to_bus(skl);
struct device *dev = bus->dev;
dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n",
nhlt->header.oem_id, nhlt->header.oem_table_id,
nhlt->header.oem_revision);
snprintf(skl->tplg_name, sizeof(skl->tplg_name), "%x-%.6s-%.8s-%d%s",
skl->pci_id, nhlt->header.oem_id, nhlt->header.oem_table_id,
nhlt->header.oem_revision, "-tplg.bin");
skl_nhlt_trim_space(skl->tplg_name);
return 0;
}
static ssize_t platform_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_bus *bus = pci_get_drvdata(pci);
struct skl_dev *skl = bus_to_skl(bus);
struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
char platform_id[32];
sprintf(platform_id, "%x-%.6s-%.8s-%d", skl->pci_id,
nhlt->header.oem_id, nhlt->header.oem_table_id,
nhlt->header.oem_revision);
skl_nhlt_trim_space(platform_id);
return sysfs_emit(buf, "%s\n", platform_id);
}
static DEVICE_ATTR_RO(platform_id);
int skl_nhlt_create_sysfs(struct skl_dev *skl)
{
struct device *dev = &skl->pci->dev;
if (sysfs_create_file(&dev->kobj, &dev_attr_platform_id.attr))
dev_warn(dev, "Error creating sysfs entry\n");
return 0;
}
void skl_nhlt_remove_sysfs(struct skl_dev *skl)
{
struct device *dev = &skl->pci->dev;
if (skl->nhlt)
sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
}
/*
* Queries NHLT for all the fmt configuration for a particular endpoint and
* stores all possible rates supported in a rate table for the corresponding
* sclk/sclkfs.
*/
static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,
struct nhlt_fmt *fmt, u8 id)
{
struct skl_i2s_config_blob_ext *i2s_config_ext;
struct skl_i2s_config_blob_legacy *i2s_config;
struct skl_clk_parent_src *parent;
struct skl_ssp_clk *sclk, *sclkfs;
struct nhlt_fmt_cfg *fmt_cfg;
struct wav_fmt_ext *wav_fmt;
unsigned long rate;
int rate_index = 0;
u16 channels, bps;
u8 clk_src;
int i, j;
u32 fs;
sclk = &ssp_clks[SKL_SCLK_OFS];
sclkfs = &ssp_clks[SKL_SCLKFS_OFS];
if (fmt->fmt_count == 0)
return;
fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config;
for (i = 0; i < fmt->fmt_count; i++) {
struct nhlt_fmt_cfg *saved_fmt_cfg = fmt_cfg;
bool present = false;
wav_fmt = &saved_fmt_cfg->fmt_ext;
channels = wav_fmt->fmt.channels;
bps = wav_fmt->fmt.bits_per_sample;
fs = wav_fmt->fmt.samples_per_sec;
/*
* In case of TDM configuration on a ssp, there can
* be more than one blob in which channel masks are
* different for each usecase for a specific rate and bps.
* But the sclk rate will be generated for the total
* number of channels used for that endpoint.
*
* So for the given fs and bps, choose blob which has
* the superset of all channels for that endpoint and
* derive the rate.
*/
for (j = i; j < fmt->fmt_count; j++) {
struct nhlt_fmt_cfg *tmp_fmt_cfg = fmt_cfg;
wav_fmt = &tmp_fmt_cfg->fmt_ext;
if ((fs == wav_fmt->fmt.samples_per_sec) &&
(bps == wav_fmt->fmt.bits_per_sample)) {
channels = max_t(u16, channels,
wav_fmt->fmt.channels);
saved_fmt_cfg = tmp_fmt_cfg;
}
/* Move to the next nhlt_fmt_cfg */
tmp_fmt_cfg = (struct nhlt_fmt_cfg *)(tmp_fmt_cfg->config.caps +
tmp_fmt_cfg->config.size);
}
rate = channels * bps * fs;
/* check if the rate is added already to the given SSP's sclk */
for (j = 0; (j < SKL_MAX_CLK_RATES) &&
(sclk[id].rate_cfg[j].rate != 0); j++) {
if (sclk[id].rate_cfg[j].rate == rate) {
present = true;
break;
}
}
/* Fill rate and parent for sclk/sclkfs */
if (!present) {
struct nhlt_fmt_cfg *first_fmt_cfg;
first_fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config;
i2s_config_ext = (struct skl_i2s_config_blob_ext *)
first_fmt_cfg->config.caps;
/* MCLK Divider Source Select */
if (is_legacy_blob(i2s_config_ext->hdr.sig)) {
i2s_config = ext_to_legacy_blob(i2s_config_ext);
clk_src = get_clk_src(i2s_config->mclk,
SKL_MNDSS_DIV_CLK_SRC_MASK);
} else {
clk_src = get_clk_src(i2s_config_ext->mclk,
SKL_MNDSS_DIV_CLK_SRC_MASK);
}
parent = skl_get_parent_clk(clk_src);
/* Move to the next nhlt_fmt_cfg */
fmt_cfg = (struct nhlt_fmt_cfg *)(fmt_cfg->config.caps +
fmt_cfg->config.size);
/*
* Do not copy the config data if there is no parent
* clock available for this clock source select
*/
if (!parent)
continue;
sclk[id].rate_cfg[rate_index].rate = rate;
sclk[id].rate_cfg[rate_index].config = saved_fmt_cfg;
sclkfs[id].rate_cfg[rate_index].rate = rate;
sclkfs[id].rate_cfg[rate_index].config = saved_fmt_cfg;
sclk[id].parent_name = parent->name;
sclkfs[id].parent_name = parent->name;
rate_index++;
}
}
}
static void skl_get_mclk(struct skl_dev *skl, struct skl_ssp_clk *mclk,
struct nhlt_fmt *fmt, u8 id)
{
struct skl_i2s_config_blob_ext *i2s_config_ext;
struct skl_i2s_config_blob_legacy *i2s_config;
struct nhlt_fmt_cfg *fmt_cfg;
struct skl_clk_parent_src *parent;
u32 clkdiv, div_ratio;
u8 clk_src;
fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config;
i2s_config_ext = (struct skl_i2s_config_blob_ext *)fmt_cfg->config.caps;
/* MCLK Divider Source Select and divider */
if (is_legacy_blob(i2s_config_ext->hdr.sig)) {
i2s_config = ext_to_legacy_blob(i2s_config_ext);
clk_src = get_clk_src(i2s_config->mclk,
SKL_MCLK_DIV_CLK_SRC_MASK);
clkdiv = i2s_config->mclk.mdivr &
SKL_MCLK_DIV_RATIO_MASK;
} else {
clk_src = get_clk_src(i2s_config_ext->mclk,
SKL_MCLK_DIV_CLK_SRC_MASK);
clkdiv = i2s_config_ext->mclk.mdivr[0] &
SKL_MCLK_DIV_RATIO_MASK;
}
/* bypass divider */
div_ratio = 1;
if (clkdiv != SKL_MCLK_DIV_RATIO_MASK)
/* Divider is 2 + clkdiv */
div_ratio = clkdiv + 2;
/* Calculate MCLK rate from source using div value */
parent = skl_get_parent_clk(clk_src);
if (!parent)
return;
mclk[id].rate_cfg[0].rate = parent->rate/div_ratio;
mclk[id].rate_cfg[0].config = fmt_cfg;
mclk[id].parent_name = parent->name;
}
void skl_get_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks)
{
struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
struct nhlt_endpoint *epnt;
struct nhlt_fmt *fmt;
int i;
u8 id;
epnt = (struct nhlt_endpoint *)nhlt->desc;
for (i = 0; i < nhlt->endpoint_count; i++) {
if (epnt->linktype == NHLT_LINK_SSP) {
id = epnt->virtual_bus_id;
fmt = (struct nhlt_fmt *)(epnt->config.caps
+ epnt->config.size);
skl_get_ssp_clks(skl, ssp_clks, fmt, id);
skl_get_mclk(skl, ssp_clks, fmt, id);
}
epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,428 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2015-17 Intel Corporation
/*
* skl-ssp-clk.c - ASoC skylake ssp clock driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <sound/intel-nhlt.h>
#include "skl.h"
#include "skl-ssp-clk.h"
#include "skl-topology.h"
#define to_skl_clk(_hw) container_of(_hw, struct skl_clk, hw)
struct skl_clk_parent {
struct clk_hw *hw;
struct clk_lookup *lookup;
};
struct skl_clk {
struct clk_hw hw;
struct clk_lookup *lookup;
unsigned long rate;
struct skl_clk_pdata *pdata;
u32 id;
};
struct skl_clk_data {
struct skl_clk_parent parent[SKL_MAX_CLK_SRC];
struct skl_clk *clk[SKL_MAX_CLK_CNT];
u8 avail_clk_cnt;
};
static int skl_get_clk_type(u32 index)
{
switch (index) {
case 0 ... (SKL_SCLK_OFS - 1):
return SKL_MCLK;
case SKL_SCLK_OFS ... (SKL_SCLKFS_OFS - 1):
return SKL_SCLK;
case SKL_SCLKFS_OFS ... (SKL_MAX_CLK_CNT - 1):
return SKL_SCLK_FS;
default:
return -EINVAL;
}
}
static int skl_get_vbus_id(u32 index, u8 clk_type)
{
switch (clk_type) {
case SKL_MCLK:
return index;
case SKL_SCLK:
return index - SKL_SCLK_OFS;
case SKL_SCLK_FS:
return index - SKL_SCLKFS_OFS;
default:
return -EINVAL;
}
}
static void skl_fill_clk_ipc(struct skl_clk_rate_cfg_table *rcfg, u8 clk_type)
{
struct nhlt_fmt_cfg *fmt_cfg;
union skl_clk_ctrl_ipc *ipc;
struct wav_fmt *wfmt;
if (!rcfg)
return;
ipc = &rcfg->dma_ctl_ipc;
if (clk_type == SKL_SCLK_FS) {
fmt_cfg = (struct nhlt_fmt_cfg *)rcfg->config;
wfmt = &fmt_cfg->fmt_ext.fmt;
/* Remove TLV Header size */
ipc->sclk_fs.hdr.size = sizeof(struct skl_dmactrl_sclkfs_cfg) -
sizeof(struct skl_tlv_hdr);
ipc->sclk_fs.sampling_frequency = wfmt->samples_per_sec;
ipc->sclk_fs.bit_depth = wfmt->bits_per_sample;
ipc->sclk_fs.valid_bit_depth =
fmt_cfg->fmt_ext.sample.valid_bits_per_sample;
ipc->sclk_fs.number_of_channels = wfmt->channels;
} else {
ipc->mclk.hdr.type = DMA_CLK_CONTROLS;
/* Remove TLV Header size */
ipc->mclk.hdr.size = sizeof(struct skl_dmactrl_mclk_cfg) -
sizeof(struct skl_tlv_hdr);
}
}
/* Sends dma control IPC to turn the clock ON/OFF */
static int skl_send_clk_dma_control(struct skl_dev *skl,
struct skl_clk_rate_cfg_table *rcfg,
u32 vbus_id, u8 clk_type,
bool enable)
{
struct nhlt_specific_cfg *sp_cfg;
u32 i2s_config_size, node_id = 0;
struct nhlt_fmt_cfg *fmt_cfg;
union skl_clk_ctrl_ipc *ipc;
void *i2s_config = NULL;
u8 *data, size;
int ret;
if (!rcfg)
return -EIO;
ipc = &rcfg->dma_ctl_ipc;
fmt_cfg = (struct nhlt_fmt_cfg *)rcfg->config;
sp_cfg = &fmt_cfg->config;
if (clk_type == SKL_SCLK_FS) {
ipc->sclk_fs.hdr.type =
enable ? DMA_TRANSMITION_START : DMA_TRANSMITION_STOP;
data = (u8 *)&ipc->sclk_fs;
size = sizeof(struct skl_dmactrl_sclkfs_cfg);
} else {
/* 1 to enable mclk, 0 to enable sclk */
if (clk_type == SKL_SCLK)
ipc->mclk.mclk = 0;
else
ipc->mclk.mclk = 1;
ipc->mclk.keep_running = enable;
ipc->mclk.warm_up_over = enable;
ipc->mclk.clk_stop_over = !enable;
data = (u8 *)&ipc->mclk;
size = sizeof(struct skl_dmactrl_mclk_cfg);
}
i2s_config_size = sp_cfg->size + size;
i2s_config = kzalloc(i2s_config_size, GFP_KERNEL);
if (!i2s_config)
return -ENOMEM;
/* copy blob */
memcpy(i2s_config, sp_cfg->caps, sp_cfg->size);
/* copy additional dma controls information */
memcpy(i2s_config + sp_cfg->size, data, size);
node_id = ((SKL_DMA_I2S_LINK_INPUT_CLASS << 8) | (vbus_id << 4));
ret = skl_dsp_set_dma_control(skl, (u32 *)i2s_config,
i2s_config_size, node_id);
kfree(i2s_config);
return ret;
}
static struct skl_clk_rate_cfg_table *skl_get_rate_cfg(
struct skl_clk_rate_cfg_table *rcfg,
unsigned long rate)
{
int i;
for (i = 0; (i < SKL_MAX_CLK_RATES) && rcfg[i].rate; i++) {
if (rcfg[i].rate == rate)
return &rcfg[i];
}
return NULL;
}
static int skl_clk_change_status(struct skl_clk *clkdev,
bool enable)
{
struct skl_clk_rate_cfg_table *rcfg;
int vbus_id, clk_type;
clk_type = skl_get_clk_type(clkdev->id);
if (clk_type < 0)
return clk_type;
vbus_id = skl_get_vbus_id(clkdev->id, clk_type);
if (vbus_id < 0)
return vbus_id;
rcfg = skl_get_rate_cfg(clkdev->pdata->ssp_clks[clkdev->id].rate_cfg,
clkdev->rate);
if (!rcfg)
return -EINVAL;
return skl_send_clk_dma_control(clkdev->pdata->pvt_data, rcfg,
vbus_id, clk_type, enable);
}
static int skl_clk_prepare(struct clk_hw *hw)
{
struct skl_clk *clkdev = to_skl_clk(hw);
return skl_clk_change_status(clkdev, true);
}
static void skl_clk_unprepare(struct clk_hw *hw)
{
struct skl_clk *clkdev = to_skl_clk(hw);
skl_clk_change_status(clkdev, false);
}
static int skl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct skl_clk *clkdev = to_skl_clk(hw);
struct skl_clk_rate_cfg_table *rcfg;
int clk_type;
if (!rate)
return -EINVAL;
rcfg = skl_get_rate_cfg(clkdev->pdata->ssp_clks[clkdev->id].rate_cfg,
rate);
if (!rcfg)
return -EINVAL;
clk_type = skl_get_clk_type(clkdev->id);
if (clk_type < 0)
return clk_type;
skl_fill_clk_ipc(rcfg, clk_type);
clkdev->rate = rate;
return 0;
}
static unsigned long skl_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct skl_clk *clkdev = to_skl_clk(hw);
if (clkdev->rate)
return clkdev->rate;
return 0;
}
/* Not supported by clk driver. Implemented to satisfy clk fw */
static long skl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
return rate;
}
/*
* prepare/unprepare are used instead of enable/disable as IPC will be sent
* in non-atomic context.
*/
static const struct clk_ops skl_clk_ops = {
.prepare = skl_clk_prepare,
.unprepare = skl_clk_unprepare,
.set_rate = skl_clk_set_rate,
.round_rate = skl_clk_round_rate,
.recalc_rate = skl_clk_recalc_rate,
};
static void unregister_parent_src_clk(struct skl_clk_parent *pclk,
unsigned int id)
{
while (id--) {
clkdev_drop(pclk[id].lookup);
clk_hw_unregister_fixed_rate(pclk[id].hw);
}
}
static void unregister_src_clk(struct skl_clk_data *dclk)
{
while (dclk->avail_clk_cnt--)
clkdev_drop(dclk->clk[dclk->avail_clk_cnt]->lookup);
}
static int skl_register_parent_clks(struct device *dev,
struct skl_clk_parent *parent,
struct skl_clk_parent_src *pclk)
{
int i, ret;
for (i = 0; i < SKL_MAX_CLK_SRC; i++) {
/* Register Parent clock */
parent[i].hw = clk_hw_register_fixed_rate(dev, pclk[i].name,
pclk[i].parent_name, 0, pclk[i].rate);
if (IS_ERR(parent[i].hw)) {
ret = PTR_ERR(parent[i].hw);
goto err;
}
parent[i].lookup = clkdev_hw_create(parent[i].hw, pclk[i].name,
NULL);
if (!parent[i].lookup) {
clk_hw_unregister_fixed_rate(parent[i].hw);
ret = -ENOMEM;
goto err;
}
}
return 0;
err:
unregister_parent_src_clk(parent, i);
return ret;
}
/* Assign fmt_config to clk_data */
static struct skl_clk *register_skl_clk(struct device *dev,
struct skl_ssp_clk *clk,
struct skl_clk_pdata *clk_pdata, int id)
{
struct clk_init_data init;
struct skl_clk *clkdev;
int ret;
clkdev = devm_kzalloc(dev, sizeof(*clkdev), GFP_KERNEL);
if (!clkdev)
return ERR_PTR(-ENOMEM);
init.name = clk->name;
init.ops = &skl_clk_ops;
init.flags = CLK_SET_RATE_GATE;
init.parent_names = &clk->parent_name;
init.num_parents = 1;
clkdev->hw.init = &init;
clkdev->pdata = clk_pdata;
clkdev->id = id;
ret = devm_clk_hw_register(dev, &clkdev->hw);
if (ret) {
clkdev = ERR_PTR(ret);
return clkdev;
}
clkdev->lookup = clkdev_hw_create(&clkdev->hw, init.name, NULL);
if (!clkdev->lookup)
clkdev = ERR_PTR(-ENOMEM);
return clkdev;
}
static int skl_clk_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *parent_dev = dev->parent;
struct skl_clk_parent_src *parent_clks;
struct skl_clk_pdata *clk_pdata;
struct skl_clk_data *data;
struct skl_ssp_clk *clks;
int ret, i;
clk_pdata = dev_get_platdata(&pdev->dev);
parent_clks = clk_pdata->parent_clks;
clks = clk_pdata->ssp_clks;
if (!parent_clks || !clks)
return -EIO;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
/* Register Parent clock */
ret = skl_register_parent_clks(parent_dev, data->parent, parent_clks);
if (ret < 0)
return ret;
for (i = 0; i < clk_pdata->num_clks; i++) {
/*
* Only register valid clocks
* i.e. for which nhlt entry is present.
*/
if (clks[i].rate_cfg[0].rate == 0)
continue;
data->clk[data->avail_clk_cnt] = register_skl_clk(dev,
&clks[i], clk_pdata, i);
if (IS_ERR(data->clk[data->avail_clk_cnt])) {
ret = PTR_ERR(data->clk[data->avail_clk_cnt]);
goto err_unreg_skl_clk;
}
data->avail_clk_cnt++;
}
platform_set_drvdata(pdev, data);
return 0;
err_unreg_skl_clk:
unregister_src_clk(data);
unregister_parent_src_clk(data->parent, SKL_MAX_CLK_SRC);
return ret;
}
static void skl_clk_dev_remove(struct platform_device *pdev)
{
struct skl_clk_data *data;
data = platform_get_drvdata(pdev);
unregister_src_clk(data);
unregister_parent_src_clk(data->parent, SKL_MAX_CLK_SRC);
}
static struct platform_driver skl_clk_driver = {
.driver = {
.name = "skl-ssp-clk",
},
.probe = skl_clk_dev_probe,
.remove_new = skl_clk_dev_remove,
};
module_platform_driver(skl_clk_driver);
MODULE_DESCRIPTION("Skylake clock driver");
MODULE_AUTHOR("Jaikrishna Nemallapudi <jaikrishnax.nemallapudi@intel.com>");
MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:skl-ssp-clk");

View file

@ -1,108 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* skl-ssp-clk.h - Skylake ssp clock information and ipc structure
*
* Copyright (C) 2017 Intel Corp
* Author: Jaikrishna Nemallapudi <jaikrishnax.nemallapudi@intel.com>
* Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef SOUND_SOC_SKL_SSP_CLK_H
#define SOUND_SOC_SKL_SSP_CLK_H
#define SKL_MAX_SSP 6
/* xtal/cardinal/pll, parent of ssp clocks and mclk */
#define SKL_MAX_CLK_SRC 3
#define SKL_MAX_SSP_CLK_TYPES 3 /* mclk, sclk, sclkfs */
#define SKL_MAX_CLK_CNT (SKL_MAX_SSP * SKL_MAX_SSP_CLK_TYPES)
/* Max number of configurations supported for each clock */
#define SKL_MAX_CLK_RATES 10
#define SKL_SCLK_OFS SKL_MAX_SSP
#define SKL_SCLKFS_OFS (SKL_SCLK_OFS + SKL_MAX_SSP)
enum skl_clk_type {
SKL_MCLK,
SKL_SCLK,
SKL_SCLK_FS,
};
enum skl_clk_src_type {
SKL_XTAL,
SKL_CARDINAL,
SKL_PLL,
};
struct skl_clk_parent_src {
u8 clk_id;
const char *name;
unsigned long rate;
const char *parent_name;
};
struct skl_tlv_hdr {
u32 type;
u32 size;
};
struct skl_dmactrl_mclk_cfg {
struct skl_tlv_hdr hdr;
/* DMA Clk TLV params */
u32 clk_warm_up:16;
u32 mclk:1;
u32 warm_up_over:1;
u32 rsvd0:14;
u32 clk_stop_delay:16;
u32 keep_running:1;
u32 clk_stop_over:1;
u32 rsvd1:14;
};
struct skl_dmactrl_sclkfs_cfg {
struct skl_tlv_hdr hdr;
/* DMA SClk&FS TLV params */
u32 sampling_frequency;
u32 bit_depth;
u32 channel_map;
u32 channel_config;
u32 interleaving_style;
u32 number_of_channels : 8;
u32 valid_bit_depth : 8;
u32 sample_type : 8;
u32 reserved : 8;
};
union skl_clk_ctrl_ipc {
struct skl_dmactrl_mclk_cfg mclk;
struct skl_dmactrl_sclkfs_cfg sclk_fs;
};
struct skl_clk_rate_cfg_table {
unsigned long rate;
union skl_clk_ctrl_ipc dma_ctl_ipc;
void *config;
};
/*
* rate for mclk will be in rates[0]. For sclk and sclkfs, rates[] store
* all possible clocks ssp can generate for that platform.
*/
struct skl_ssp_clk {
const char *name;
const char *parent_name;
struct skl_clk_rate_cfg_table rate_cfg[SKL_MAX_CLK_RATES];
};
struct skl_clk_pdata {
struct skl_clk_parent_src *parent_clks;
int num_clks;
struct skl_ssp_clk *ssp_clks;
void *pvt_data;
};
#endif /* SOUND_SOC_SKL_SSP_CLK_H */

View file

@ -1,373 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* skl-sst-cldma.c - Code Loader DMA handler
*
* Copyright (C) 2015, Intel Corporation.
* Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <sound/hda_register.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
static void skl_cldma_int_enable(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA);
}
void skl_cldma_int_disable(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0);
}
static void skl_cldma_stream_run(struct sst_dsp *ctx, bool enable)
{
unsigned char val;
int timeout;
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(enable));
udelay(3);
timeout = 300;
do {
/* waiting for hardware to report that the stream Run bit set */
val = sst_dsp_shim_read(ctx, SKL_ADSP_REG_CL_SD_CTL) &
CL_SD_CTL_RUN_MASK;
if (enable && val)
break;
else if (!enable && !val)
break;
udelay(3);
} while (--timeout);
if (timeout == 0)
dev_err(ctx->dev, "Failed to set Run bit=%d enable=%d\n", val, enable);
}
static void skl_cldma_stream_clear(struct sst_dsp *ctx)
{
/* make sure Run bit is cleared before setting stream register */
skl_cldma_stream_run(ctx, 0);
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0));
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0));
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0);
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0);
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0);
}
/* Code loader helper APIs */
static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_data,
__le32 **bdlp, int size, int with_ioc)
{
__le32 *bdl = *bdlp;
int remaining = ctx->cl_dev.bufsize;
int offset = 0;
ctx->cl_dev.frags = 0;
while (remaining > 0) {
phys_addr_t addr;
int chunk;
addr = snd_sgbuf_get_addr(dmab_data, offset);
bdl[0] = cpu_to_le32(lower_32_bits(addr));
bdl[1] = cpu_to_le32(upper_32_bits(addr));
chunk = snd_sgbuf_get_chunk_size(dmab_data, offset, size);
bdl[2] = cpu_to_le32(chunk);
remaining -= chunk;
bdl[3] = (remaining > 0) ? 0 : cpu_to_le32(0x01);
bdl += 4;
offset += chunk;
ctx->cl_dev.frags++;
}
}
/*
* Setup controller
* Configure the registers to update the dma buffer address and
* enable interrupts.
* Note: Using the channel 1 for transfer
*/
static void skl_cldma_setup_controller(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_bdl, unsigned int max_size,
u32 count)
{
skl_cldma_stream_clear(ctx);
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL,
CL_SD_BDLPLBA(dmab_bdl->addr));
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU,
CL_SD_BDLPUBA(dmab_bdl->addr));
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size);
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1);
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER));
}
static void skl_cldma_setup_spb(struct sst_dsp *ctx,
unsigned int size, bool enable)
{
if (enable)
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
CL_SPBFIFO_SPBFCCTL_SPIBE(1));
sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size);
}
static void skl_cldma_cleanup_spb(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
CL_SPBFIFO_SPBFCCTL_SPIBE(0));
sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0);
}
static void skl_cldma_cleanup(struct sst_dsp *ctx)
{
skl_cldma_cleanup_spb(ctx);
skl_cldma_stream_clear(ctx);
ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl);
}
int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
{
int ret = 0;
if (!wait_event_timeout(ctx->cl_dev.wait_queue,
ctx->cl_dev.wait_condition,
msecs_to_jiffies(SKL_WAIT_TIMEOUT))) {
dev_err(ctx->dev, "%s: Wait timeout\n", __func__);
ret = -EIO;
goto cleanup;
}
dev_dbg(ctx->dev, "%s: Event wake\n", __func__);
if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) {
dev_err(ctx->dev, "%s: DMA Error\n", __func__);
ret = -EIO;
}
cleanup:
ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE;
return ret;
}
static void skl_cldma_stop(struct sst_dsp *ctx)
{
skl_cldma_stream_run(ctx, false);
}
static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
const void *curr_pos, bool intr_enable, bool trigger)
{
dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable);
dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n",
ctx->cl_dev.dma_buffer_offset, trigger);
dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos);
/*
* Check if the size exceeds buffer boundary. If it exceeds
* max_buffer size, then copy till buffer size and then copy
* remaining buffer from the start of ring buffer.
*/
if (ctx->cl_dev.dma_buffer_offset + size > ctx->cl_dev.bufsize) {
unsigned int size_b = ctx->cl_dev.bufsize -
ctx->cl_dev.dma_buffer_offset;
memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
curr_pos, size_b);
size -= size_b;
curr_pos += size_b;
ctx->cl_dev.dma_buffer_offset = 0;
}
memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
curr_pos, size);
if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize)
ctx->cl_dev.dma_buffer_offset = 0;
else
ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos;
ctx->cl_dev.wait_condition = false;
if (intr_enable)
skl_cldma_int_enable(ctx);
ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger);
if (trigger)
ctx->cl_dev.ops.cl_trigger(ctx, true);
}
/*
* The CL dma doesn't have any way to update the transfer status until a BDL
* buffer is fully transferred
*
* So Copying is divided in two parts.
* 1. Interrupt on buffer done where the size to be transferred is more than
* ring buffer size.
* 2. Polling on fw register to identify if data left to transferred doesn't
* fill the ring buffer. Caller takes care of polling the required status
* register to identify the transfer status.
* 3. if wait flag is set, waits for DBL interrupt to copy the next chunk till
* bytes_left is 0.
* if wait flag is not set, doesn't wait for BDL interrupt. after ccopying
* the first chunk return the no of bytes_left to be copied.
*/
static int
skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin,
u32 total_size, bool wait)
{
int ret;
bool start = true;
unsigned int excess_bytes;
u32 size;
unsigned int bytes_left = total_size;
const void *curr_pos = bin;
if (total_size <= 0)
return -EINVAL;
dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);
while (bytes_left) {
if (bytes_left > ctx->cl_dev.bufsize) {
/*
* dma transfers only till the write pointer as
* updated in spib
*/
if (ctx->cl_dev.curr_spib_pos == 0)
ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;
size = ctx->cl_dev.bufsize;
skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
if (wait) {
start = false;
ret = skl_cldma_wait_interruptible(ctx);
if (ret < 0) {
skl_cldma_stop(ctx);
return ret;
}
}
} else {
skl_cldma_int_disable(ctx);
if ((ctx->cl_dev.curr_spib_pos + bytes_left)
<= ctx->cl_dev.bufsize) {
ctx->cl_dev.curr_spib_pos += bytes_left;
} else {
excess_bytes = bytes_left -
(ctx->cl_dev.bufsize -
ctx->cl_dev.curr_spib_pos);
ctx->cl_dev.curr_spib_pos = excess_bytes;
}
size = bytes_left;
skl_cldma_fill_buffer(ctx, size,
curr_pos, false, start);
}
bytes_left -= size;
curr_pos = curr_pos + size;
if (!wait)
return bytes_left;
}
return bytes_left;
}
void skl_cldma_process_intr(struct sst_dsp *ctx)
{
u8 cl_dma_intr_status;
cl_dma_intr_status =
sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);
if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
else
ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;
ctx->cl_dev.wait_condition = true;
wake_up(&ctx->cl_dev.wait_queue);
}
int skl_cldma_prepare(struct sst_dsp *ctx)
{
int ret;
__le32 *bdl;
ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
/* Allocate cl ops */
ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
ctx->cl_dev.ops.cl_trigger = skl_cldma_stream_run;
ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
/* Allocate buffer*/
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, ctx->dev, ctx->cl_dev.bufsize,
&ctx->cl_dev.dmab_data);
if (ret < 0) {
dev_err(ctx->dev, "Alloc buffer for base fw failed: %x\n", ret);
return ret;
}
/* Setup Code loader BDL */
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, ctx->dev, BDL_SIZE, &ctx->cl_dev.dmab_bdl);
if (ret < 0) {
dev_err(ctx->dev, "Alloc buffer for blde failed: %x\n", ret);
ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
return ret;
}
bdl = (__le32 *)ctx->cl_dev.dmab_bdl.area;
/* Allocate BDLs */
ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
&bdl, ctx->cl_dev.bufsize, 1);
ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
ctx->cl_dev.bufsize, ctx->cl_dev.frags);
ctx->cl_dev.curr_spib_pos = 0;
ctx->cl_dev.dma_buffer_offset = 0;
init_waitqueue_head(&ctx->cl_dev.wait_queue);
return ret;
}

View file

@ -1,243 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Intel Code Loader DMA support
*
* Copyright (C) 2015, Intel Corporation.
*/
#ifndef SKL_SST_CLDMA_H_
#define SKL_SST_CLDMA_H_
#define FW_CL_STREAM_NUMBER 0x1
#define DMA_ADDRESS_128_BITS_ALIGNMENT 7
#define BDL_ALIGN(x) (x >> DMA_ADDRESS_128_BITS_ALIGNMENT)
#define SKL_ADSPIC_CL_DMA 0x2
#define SKL_ADSPIS_CL_DMA 0x2
#define SKL_CL_DMA_SD_INT_DESC_ERR 0x10 /* Descriptor error interrupt */
#define SKL_CL_DMA_SD_INT_FIFO_ERR 0x08 /* FIFO error interrupt */
#define SKL_CL_DMA_SD_INT_COMPLETE 0x04 /* Buffer completion interrupt */
/* Intel HD Audio Code Loader DMA Registers */
#define HDA_ADSP_LOADER_BASE 0x80
/* Stream Registers */
#define SKL_ADSP_REG_CL_SD_CTL (HDA_ADSP_LOADER_BASE + 0x00)
#define SKL_ADSP_REG_CL_SD_STS (HDA_ADSP_LOADER_BASE + 0x03)
#define SKL_ADSP_REG_CL_SD_LPIB (HDA_ADSP_LOADER_BASE + 0x04)
#define SKL_ADSP_REG_CL_SD_CBL (HDA_ADSP_LOADER_BASE + 0x08)
#define SKL_ADSP_REG_CL_SD_LVI (HDA_ADSP_LOADER_BASE + 0x0c)
#define SKL_ADSP_REG_CL_SD_FIFOW (HDA_ADSP_LOADER_BASE + 0x0e)
#define SKL_ADSP_REG_CL_SD_FIFOSIZE (HDA_ADSP_LOADER_BASE + 0x10)
#define SKL_ADSP_REG_CL_SD_FORMAT (HDA_ADSP_LOADER_BASE + 0x12)
#define SKL_ADSP_REG_CL_SD_FIFOL (HDA_ADSP_LOADER_BASE + 0x14)
#define SKL_ADSP_REG_CL_SD_BDLPL (HDA_ADSP_LOADER_BASE + 0x18)
#define SKL_ADSP_REG_CL_SD_BDLPU (HDA_ADSP_LOADER_BASE + 0x1c)
/* CL: Software Position Based FIFO Capability Registers */
#define SKL_ADSP_REG_CL_SPBFIFO (HDA_ADSP_LOADER_BASE + 0x20)
#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCH (SKL_ADSP_REG_CL_SPBFIFO + 0x0)
#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL (SKL_ADSP_REG_CL_SPBFIFO + 0x4)
#define SKL_ADSP_REG_CL_SPBFIFO_SPIB (SKL_ADSP_REG_CL_SPBFIFO + 0x8)
#define SKL_ADSP_REG_CL_SPBFIFO_MAXFIFOS (SKL_ADSP_REG_CL_SPBFIFO + 0xc)
/* CL: Stream Descriptor x Control */
/* Stream Reset */
#define CL_SD_CTL_SRST_SHIFT 0
#define CL_SD_CTL_SRST_MASK (1 << CL_SD_CTL_SRST_SHIFT)
#define CL_SD_CTL_SRST(x) \
((x << CL_SD_CTL_SRST_SHIFT) & CL_SD_CTL_SRST_MASK)
/* Stream Run */
#define CL_SD_CTL_RUN_SHIFT 1
#define CL_SD_CTL_RUN_MASK (1 << CL_SD_CTL_RUN_SHIFT)
#define CL_SD_CTL_RUN(x) \
((x << CL_SD_CTL_RUN_SHIFT) & CL_SD_CTL_RUN_MASK)
/* Interrupt On Completion Enable */
#define CL_SD_CTL_IOCE_SHIFT 2
#define CL_SD_CTL_IOCE_MASK (1 << CL_SD_CTL_IOCE_SHIFT)
#define CL_SD_CTL_IOCE(x) \
((x << CL_SD_CTL_IOCE_SHIFT) & CL_SD_CTL_IOCE_MASK)
/* FIFO Error Interrupt Enable */
#define CL_SD_CTL_FEIE_SHIFT 3
#define CL_SD_CTL_FEIE_MASK (1 << CL_SD_CTL_FEIE_SHIFT)
#define CL_SD_CTL_FEIE(x) \
((x << CL_SD_CTL_FEIE_SHIFT) & CL_SD_CTL_FEIE_MASK)
/* Descriptor Error Interrupt Enable */
#define CL_SD_CTL_DEIE_SHIFT 4
#define CL_SD_CTL_DEIE_MASK (1 << CL_SD_CTL_DEIE_SHIFT)
#define CL_SD_CTL_DEIE(x) \
((x << CL_SD_CTL_DEIE_SHIFT) & CL_SD_CTL_DEIE_MASK)
/* FIFO Limit Change */
#define CL_SD_CTL_FIFOLC_SHIFT 5
#define CL_SD_CTL_FIFOLC_MASK (1 << CL_SD_CTL_FIFOLC_SHIFT)
#define CL_SD_CTL_FIFOLC(x) \
((x << CL_SD_CTL_FIFOLC_SHIFT) & CL_SD_CTL_FIFOLC_MASK)
/* Stripe Control */
#define CL_SD_CTL_STRIPE_SHIFT 16
#define CL_SD_CTL_STRIPE_MASK (0x3 << CL_SD_CTL_STRIPE_SHIFT)
#define CL_SD_CTL_STRIPE(x) \
((x << CL_SD_CTL_STRIPE_SHIFT) & CL_SD_CTL_STRIPE_MASK)
/* Traffic Priority */
#define CL_SD_CTL_TP_SHIFT 18
#define CL_SD_CTL_TP_MASK (1 << CL_SD_CTL_TP_SHIFT)
#define CL_SD_CTL_TP(x) \
((x << CL_SD_CTL_TP_SHIFT) & CL_SD_CTL_TP_MASK)
/* Bidirectional Direction Control */
#define CL_SD_CTL_DIR_SHIFT 19
#define CL_SD_CTL_DIR_MASK (1 << CL_SD_CTL_DIR_SHIFT)
#define CL_SD_CTL_DIR(x) \
((x << CL_SD_CTL_DIR_SHIFT) & CL_SD_CTL_DIR_MASK)
/* Stream Number */
#define CL_SD_CTL_STRM_SHIFT 20
#define CL_SD_CTL_STRM_MASK (0xf << CL_SD_CTL_STRM_SHIFT)
#define CL_SD_CTL_STRM(x) \
((x << CL_SD_CTL_STRM_SHIFT) & CL_SD_CTL_STRM_MASK)
/* CL: Stream Descriptor x Status */
/* Buffer Completion Interrupt Status */
#define CL_SD_STS_BCIS(x) CL_SD_CTL_IOCE(x)
/* FIFO Error */
#define CL_SD_STS_FIFOE(x) CL_SD_CTL_FEIE(x)
/* Descriptor Error */
#define CL_SD_STS_DESE(x) CL_SD_CTL_DEIE(x)
/* FIFO Ready */
#define CL_SD_STS_FIFORDY(x) CL_SD_CTL_FIFOLC(x)
/* CL: Stream Descriptor x Last Valid Index */
#define CL_SD_LVI_SHIFT 0
#define CL_SD_LVI_MASK (0xff << CL_SD_LVI_SHIFT)
#define CL_SD_LVI(x) ((x << CL_SD_LVI_SHIFT) & CL_SD_LVI_MASK)
/* CL: Stream Descriptor x FIFO Eviction Watermark */
#define CL_SD_FIFOW_SHIFT 0
#define CL_SD_FIFOW_MASK (0x7 << CL_SD_FIFOW_SHIFT)
#define CL_SD_FIFOW(x) \
((x << CL_SD_FIFOW_SHIFT) & CL_SD_FIFOW_MASK)
/* CL: Stream Descriptor x Buffer Descriptor List Pointer Lower Base Address */
/* Protect Bits */
#define CL_SD_BDLPLBA_PROT_SHIFT 0
#define CL_SD_BDLPLBA_PROT_MASK (1 << CL_SD_BDLPLBA_PROT_SHIFT)
#define CL_SD_BDLPLBA_PROT(x) \
((x << CL_SD_BDLPLBA_PROT_SHIFT) & CL_SD_BDLPLBA_PROT_MASK)
/* Buffer Descriptor List Lower Base Address */
#define CL_SD_BDLPLBA_SHIFT 7
#define CL_SD_BDLPLBA_MASK (0x1ffffff << CL_SD_BDLPLBA_SHIFT)
#define CL_SD_BDLPLBA(x) \
((BDL_ALIGN(lower_32_bits(x)) << CL_SD_BDLPLBA_SHIFT) & CL_SD_BDLPLBA_MASK)
/* Buffer Descriptor List Upper Base Address */
#define CL_SD_BDLPUBA_SHIFT 0
#define CL_SD_BDLPUBA_MASK (0xffffffff << CL_SD_BDLPUBA_SHIFT)
#define CL_SD_BDLPUBA(x) \
((upper_32_bits(x) << CL_SD_BDLPUBA_SHIFT) & CL_SD_BDLPUBA_MASK)
/*
* Code Loader - Software Position Based FIFO
* Capability Registers x Software Position Based FIFO Header
*/
/* Next Capability Pointer */
#define CL_SPBFIFO_SPBFCH_PTR_SHIFT 0
#define CL_SPBFIFO_SPBFCH_PTR_MASK (0xff << CL_SPBFIFO_SPBFCH_PTR_SHIFT)
#define CL_SPBFIFO_SPBFCH_PTR(x) \
((x << CL_SPBFIFO_SPBFCH_PTR_SHIFT) & CL_SPBFIFO_SPBFCH_PTR_MASK)
/* Capability Identifier */
#define CL_SPBFIFO_SPBFCH_ID_SHIFT 16
#define CL_SPBFIFO_SPBFCH_ID_MASK (0xfff << CL_SPBFIFO_SPBFCH_ID_SHIFT)
#define CL_SPBFIFO_SPBFCH_ID(x) \
((x << CL_SPBFIFO_SPBFCH_ID_SHIFT) & CL_SPBFIFO_SPBFCH_ID_MASK)
/* Capability Version */
#define CL_SPBFIFO_SPBFCH_VER_SHIFT 28
#define CL_SPBFIFO_SPBFCH_VER_MASK (0xf << CL_SPBFIFO_SPBFCH_VER_SHIFT)
#define CL_SPBFIFO_SPBFCH_VER(x) \
((x << CL_SPBFIFO_SPBFCH_VER_SHIFT) & CL_SPBFIFO_SPBFCH_VER_MASK)
/* Software Position in Buffer Enable */
#define CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT 0
#define CL_SPBFIFO_SPBFCCTL_SPIBE_MASK (1 << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT)
#define CL_SPBFIFO_SPBFCCTL_SPIBE(x) \
((x << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT) & CL_SPBFIFO_SPBFCCTL_SPIBE_MASK)
/* SST IPC SKL defines */
#define SKL_WAIT_TIMEOUT 500 /* 500 msec */
#define SKL_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
enum skl_cl_dma_wake_states {
SKL_CL_DMA_STATUS_NONE = 0,
SKL_CL_DMA_BUF_COMPLETE,
SKL_CL_DMA_ERR, /* TODO: Expand the error states */
};
struct sst_dsp;
struct skl_cl_dev_ops {
void (*cl_setup_bdle)(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_data,
__le32 **bdlp, int size, int with_ioc);
void (*cl_setup_controller)(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_bdl,
unsigned int max_size, u32 page_count);
void (*cl_setup_spb)(struct sst_dsp *ctx,
unsigned int size, bool enable);
void (*cl_cleanup_spb)(struct sst_dsp *ctx);
void (*cl_trigger)(struct sst_dsp *ctx, bool enable);
void (*cl_cleanup_controller)(struct sst_dsp *ctx);
int (*cl_copy_to_dmabuf)(struct sst_dsp *ctx,
const void *bin, u32 size, bool wait);
void (*cl_stop_dma)(struct sst_dsp *ctx);
};
/**
* skl_cl_dev - holds information for code loader dma transfer
*
* @dmab_data: buffer pointer
* @dmab_bdl: buffer descriptor list
* @bufsize: ring buffer size
* @frags: Last valid buffer descriptor index in the BDL
* @curr_spib_pos: Current position in ring buffer
* @dma_buffer_offset: dma buffer offset
* @ops: operations supported on CL dma
* @wait_queue: wait queue to wake for wake event
* @wake_status: DMA wake status
* @wait_condition: condition to wait on wait queue
* @cl_dma_lock: for synchronized access to cldma
*/
struct skl_cl_dev {
struct snd_dma_buffer dmab_data;
struct snd_dma_buffer dmab_bdl;
unsigned int bufsize;
unsigned int frags;
unsigned int curr_spib_pos;
unsigned int dma_buffer_offset;
struct skl_cl_dev_ops ops;
wait_queue_head_t wait_queue;
int wake_status;
bool wait_condition;
};
#endif /* SKL_SST_CLDMA_H_ */

View file

@ -1,462 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* skl-sst-dsp.c - SKL SST library generic function
*
* Copyright (C) 2014-15, Intel Corporation.
* Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
* Jeeja KP <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <sound/pcm.h>
#include "../common/sst-dsp.h"
#include "../common/sst-ipc.h"
#include "../common/sst-dsp-priv.h"
#include "skl.h"
/* various timeout values */
#define SKL_DSP_PU_TO 50
#define SKL_DSP_PD_TO 50
#define SKL_DSP_RESET_TO 50
void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
{
mutex_lock(&ctx->mutex);
ctx->sst_state = state;
mutex_unlock(&ctx->mutex);
}
/*
* Initialize core power state and usage count. To be called after
* successful first boot. Hence core 0 will be running and other cores
* will be reset
*/
void skl_dsp_init_core_state(struct sst_dsp *ctx)
{
struct skl_dev *skl = ctx->thread_context;
int i;
skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1;
for (i = SKL_DSP_CORE0_ID + 1; i < skl->cores.count; i++) {
skl->cores.state[i] = SKL_DSP_RESET;
skl->cores.usage_count[i] = 0;
}
}
/* Get the mask for all enabled cores */
unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx)
{
struct skl_dev *skl = ctx->thread_context;
unsigned int core_mask, en_cores_mask;
u32 val;
core_mask = SKL_DSP_CORES_MASK(skl->cores.count);
val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
/* Cores having CPA bit set */
en_cores_mask = (val & SKL_ADSPCS_CPA_MASK(core_mask)) >>
SKL_ADSPCS_CPA_SHIFT;
/* And cores having CRST bit cleared */
en_cores_mask &= (~val & SKL_ADSPCS_CRST_MASK(core_mask)) >>
SKL_ADSPCS_CRST_SHIFT;
/* And cores having CSTALL bit cleared */
en_cores_mask &= (~val & SKL_ADSPCS_CSTALL_MASK(core_mask)) >>
SKL_ADSPCS_CSTALL_SHIFT;
en_cores_mask &= core_mask;
dev_dbg(ctx->dev, "DSP enabled cores mask = %x\n", en_cores_mask);
return en_cores_mask;
}
static int
skl_dsp_core_set_reset_state(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask),
SKL_ADSPCS_CRST_MASK(core_mask));
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CRST_MASK(core_mask),
SKL_ADSPCS_CRST_MASK(core_mask),
SKL_DSP_RESET_TO,
"Set reset");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
SKL_ADSPCS_CRST_MASK(core_mask)) !=
SKL_ADSPCS_CRST_MASK(core_mask)) {
dev_err(ctx->dev, "Set reset state failed: core_mask %x\n",
core_mask);
ret = -EIO;
}
return ret;
}
int skl_dsp_core_unset_reset_state(
struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
dev_dbg(ctx->dev, "In %s\n", __func__);
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CRST_MASK(core_mask), 0);
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CRST_MASK(core_mask),
0,
SKL_DSP_RESET_TO,
"Unset reset");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
SKL_ADSPCS_CRST_MASK(core_mask)) != 0) {
dev_err(ctx->dev, "Unset reset state failed: core_mask %x\n",
core_mask);
ret = -EIO;
}
return ret;
}
static bool
is_skl_dsp_core_enable(struct sst_dsp *ctx, unsigned int core_mask)
{
int val;
bool is_enable;
val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
is_enable = ((val & SKL_ADSPCS_CPA_MASK(core_mask)) &&
(val & SKL_ADSPCS_SPA_MASK(core_mask)) &&
!(val & SKL_ADSPCS_CRST_MASK(core_mask)) &&
!(val & SKL_ADSPCS_CSTALL_MASK(core_mask)));
dev_dbg(ctx->dev, "DSP core(s) enabled? %d : core_mask %x\n",
is_enable, core_mask);
return is_enable;
}
static int skl_dsp_reset_core(struct sst_dsp *ctx, unsigned int core_mask)
{
/* stall core */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CSTALL_MASK(core_mask),
SKL_ADSPCS_CSTALL_MASK(core_mask));
/* set reset state */
return skl_dsp_core_set_reset_state(ctx, core_mask);
}
int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* unset reset state */
ret = skl_dsp_core_unset_reset_state(ctx, core_mask);
if (ret < 0)
return ret;
/* run core */
dev_dbg(ctx->dev, "unstall/run core: core_mask = %x\n", core_mask);
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CSTALL_MASK(core_mask), 0);
if (!is_skl_dsp_core_enable(ctx, core_mask)) {
skl_dsp_reset_core(ctx, core_mask);
dev_err(ctx->dev, "DSP start core failed: core_mask %x\n",
core_mask);
ret = -EIO;
}
return ret;
}
int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_SPA_MASK(core_mask),
SKL_ADSPCS_SPA_MASK(core_mask));
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CPA_MASK(core_mask),
SKL_ADSPCS_CPA_MASK(core_mask),
SKL_DSP_PU_TO,
"Power up");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
SKL_ADSPCS_CPA_MASK(core_mask)) !=
SKL_ADSPCS_CPA_MASK(core_mask)) {
dev_err(ctx->dev, "DSP core power up failed: core_mask %x\n",
core_mask);
ret = -EIO;
}
return ret;
}
int skl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask)
{
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_SPA_MASK(core_mask), 0);
/* poll with timeout to check if operation successful */
return sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CPA_MASK(core_mask),
0,
SKL_DSP_PD_TO,
"Power down");
}
int skl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* power up */
ret = skl_dsp_core_power_up(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "dsp core power up failed: core_mask %x\n",
core_mask);
return ret;
}
return skl_dsp_start_core(ctx, core_mask);
}
int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
ret = skl_dsp_reset_core(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "dsp core reset failed: core_mask %x\n",
core_mask);
return ret;
}
/* power down core*/
ret = skl_dsp_core_power_down(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "dsp core power down fail mask %x: %d\n",
core_mask, ret);
return ret;
}
if (is_skl_dsp_core_enable(ctx, core_mask)) {
dev_err(ctx->dev, "dsp core disable fail mask %x: %d\n",
core_mask, ret);
ret = -EIO;
}
return ret;
}
int skl_dsp_boot(struct sst_dsp *ctx)
{
int ret;
if (is_skl_dsp_core_enable(ctx, SKL_DSP_CORE0_MASK)) {
ret = skl_dsp_reset_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
dev_err(ctx->dev, "dsp core0 reset fail: %d\n", ret);
return ret;
}
ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
dev_err(ctx->dev, "dsp core0 start fail: %d\n", ret);
return ret;
}
} else {
ret = skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
dev_err(ctx->dev, "dsp core0 disable fail: %d\n", ret);
return ret;
}
ret = skl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
}
return ret;
}
irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
{
struct sst_dsp *ctx = dev_id;
u32 val;
irqreturn_t result = IRQ_NONE;
spin_lock(&ctx->spinlock);
val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS);
ctx->intr_status = val;
if (val == 0xffffffff) {
spin_unlock(&ctx->spinlock);
return IRQ_NONE;
}
if (val & SKL_ADSPIS_IPC) {
skl_ipc_int_disable(ctx);
result = IRQ_WAKE_THREAD;
}
if (val & SKL_ADSPIS_CL_DMA) {
skl_cldma_int_disable(ctx);
result = IRQ_WAKE_THREAD;
}
spin_unlock(&ctx->spinlock);
return result;
}
/*
* skl_dsp_get_core/skl_dsp_put_core will be called inside DAPM context
* within the dapm mutex. Hence no separate lock is used.
*/
int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_dev *skl = ctx->thread_context;
int ret = 0;
if (core_id >= skl->cores.count) {
dev_err(ctx->dev, "invalid core id: %d\n", core_id);
return -EINVAL;
}
skl->cores.usage_count[core_id]++;
if (skl->cores.state[core_id] == SKL_DSP_RESET) {
ret = ctx->fw_ops.set_state_D0(ctx, core_id);
if (ret < 0) {
dev_err(ctx->dev, "unable to get core%d\n", core_id);
goto out;
}
}
out:
dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
core_id, skl->cores.state[core_id],
skl->cores.usage_count[core_id]);
return ret;
}
EXPORT_SYMBOL_GPL(skl_dsp_get_core);
int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_dev *skl = ctx->thread_context;
int ret = 0;
if (core_id >= skl->cores.count) {
dev_err(ctx->dev, "invalid core id: %d\n", core_id);
return -EINVAL;
}
if ((--skl->cores.usage_count[core_id] == 0) &&
(skl->cores.state[core_id] != SKL_DSP_RESET)) {
ret = ctx->fw_ops.set_state_D3(ctx, core_id);
if (ret < 0) {
dev_err(ctx->dev, "unable to put core %d: %d\n",
core_id, ret);
skl->cores.usage_count[core_id]++;
}
}
dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
core_id, skl->cores.state[core_id],
skl->cores.usage_count[core_id]);
return ret;
}
EXPORT_SYMBOL_GPL(skl_dsp_put_core);
int skl_dsp_wake(struct sst_dsp *ctx)
{
return skl_dsp_get_core(ctx, SKL_DSP_CORE0_ID);
}
EXPORT_SYMBOL_GPL(skl_dsp_wake);
int skl_dsp_sleep(struct sst_dsp *ctx)
{
return skl_dsp_put_core(ctx, SKL_DSP_CORE0_ID);
}
EXPORT_SYMBOL_GPL(skl_dsp_sleep);
struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
struct sst_dsp_device *sst_dev, int irq)
{
int ret;
struct sst_dsp *sst;
sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
if (sst == NULL)
return NULL;
spin_lock_init(&sst->spinlock);
mutex_init(&sst->mutex);
sst->dev = dev;
sst->sst_dev = sst_dev;
sst->irq = irq;
sst->ops = sst_dev->ops;
sst->thread_context = sst_dev->thread_context;
/* Initialise SST Audio DSP */
if (sst->ops->init) {
ret = sst->ops->init(sst);
if (ret < 0)
return NULL;
}
return sst;
}
int skl_dsp_acquire_irq(struct sst_dsp *sst)
{
struct sst_dsp_device *sst_dev = sst->sst_dev;
int ret;
/* Register the ISR */
ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
if (ret)
dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
sst->irq);
return ret;
}
void skl_dsp_free(struct sst_dsp *dsp)
{
skl_ipc_int_disable(dsp);
free_irq(dsp->irq, dsp);
skl_ipc_op_int_disable(dsp);
skl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK);
}
EXPORT_SYMBOL_GPL(skl_dsp_free);
bool is_skl_dsp_running(struct sst_dsp *ctx)
{
return (ctx->sst_state == SKL_DSP_RUNNING);
}
EXPORT_SYMBOL_GPL(is_skl_dsp_running);

View file

@ -1,256 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Skylake SST DSP Support
*
* Copyright (C) 2014-15, Intel Corporation.
*/
#ifndef __SKL_SST_DSP_H__
#define __SKL_SST_DSP_H__
#include <linux/interrupt.h>
#include <linux/uuid.h>
#include <linux/firmware.h>
#include <sound/memalloc.h>
#include "skl-sst-cldma.h"
struct sst_dsp;
struct sst_dsp_device;
struct skl_lib_info;
struct skl_dev;
/* Intel HD Audio General DSP Registers */
#define SKL_ADSP_GEN_BASE 0x0
#define SKL_ADSP_REG_ADSPCS (SKL_ADSP_GEN_BASE + 0x04)
#define SKL_ADSP_REG_ADSPIC (SKL_ADSP_GEN_BASE + 0x08)
#define SKL_ADSP_REG_ADSPIS (SKL_ADSP_GEN_BASE + 0x0C)
#define SKL_ADSP_REG_ADSPIC2 (SKL_ADSP_GEN_BASE + 0x10)
#define SKL_ADSP_REG_ADSPIS2 (SKL_ADSP_GEN_BASE + 0x14)
/* Intel HD Audio Inter-Processor Communication Registers */
#define SKL_ADSP_IPC_BASE 0x40
#define SKL_ADSP_REG_HIPCT (SKL_ADSP_IPC_BASE + 0x00)
#define SKL_ADSP_REG_HIPCTE (SKL_ADSP_IPC_BASE + 0x04)
#define SKL_ADSP_REG_HIPCI (SKL_ADSP_IPC_BASE + 0x08)
#define SKL_ADSP_REG_HIPCIE (SKL_ADSP_IPC_BASE + 0x0C)
#define SKL_ADSP_REG_HIPCCTL (SKL_ADSP_IPC_BASE + 0x10)
/* HIPCI */
#define SKL_ADSP_REG_HIPCI_BUSY BIT(31)
/* HIPCIE */
#define SKL_ADSP_REG_HIPCIE_DONE BIT(30)
/* HIPCCTL */
#define SKL_ADSP_REG_HIPCCTL_DONE BIT(1)
#define SKL_ADSP_REG_HIPCCTL_BUSY BIT(0)
/* HIPCT */
#define SKL_ADSP_REG_HIPCT_BUSY BIT(31)
/* FW base IDs */
#define SKL_INSTANCE_ID 0
#define SKL_BASE_FW_MODULE_ID 0
/* Intel HD Audio SRAM Window 1 */
#define SKL_ADSP_SRAM1_BASE 0xA000
#define SKL_ADSP_MMIO_LEN 0x10000
#define SKL_ADSP_W0_STAT_SZ 0x1000
#define SKL_ADSP_W0_UP_SZ 0x1000
#define SKL_ADSP_W1_SZ 0x1000
#define SKL_FW_STS_MASK 0xf
#define SKL_FW_INIT 0x1
#define SKL_FW_RFW_START 0xf
#define BXT_FW_ROM_INIT_RETRY 3
#define BXT_INIT_TIMEOUT 300
#define SKL_ADSPIC_IPC 1
#define SKL_ADSPIS_IPC 1
/* Core ID of core0 */
#define SKL_DSP_CORE0_ID 0
/* Mask for a given core index, c = 0.. number of supported cores - 1 */
#define SKL_DSP_CORE_MASK(c) BIT(c)
/*
* Core 0 mask = SKL_DSP_CORE_MASK(0); Defined separately
* since Core0 is primary core and it is used often
*/
#define SKL_DSP_CORE0_MASK BIT(0)
/*
* Mask for a given number of cores
* nc = number of supported cores
*/
#define SKL_DSP_CORES_MASK(nc) GENMASK((nc - 1), 0)
/* ADSPCS - Audio DSP Control & Status */
/*
* Core Reset - asserted high
* CRST Mask for a given core mask pattern, cm
*/
#define SKL_ADSPCS_CRST_SHIFT 0
#define SKL_ADSPCS_CRST_MASK(cm) ((cm) << SKL_ADSPCS_CRST_SHIFT)
/*
* Core run/stall - when set to '1' core is stalled
* CSTALL Mask for a given core mask pattern, cm
*/
#define SKL_ADSPCS_CSTALL_SHIFT 8
#define SKL_ADSPCS_CSTALL_MASK(cm) ((cm) << SKL_ADSPCS_CSTALL_SHIFT)
/*
* Set Power Active - when set to '1' turn cores on
* SPA Mask for a given core mask pattern, cm
*/
#define SKL_ADSPCS_SPA_SHIFT 16
#define SKL_ADSPCS_SPA_MASK(cm) ((cm) << SKL_ADSPCS_SPA_SHIFT)
/*
* Current Power Active - power status of cores, set by hardware
* CPA Mask for a given core mask pattern, cm
*/
#define SKL_ADSPCS_CPA_SHIFT 24
#define SKL_ADSPCS_CPA_MASK(cm) ((cm) << SKL_ADSPCS_CPA_SHIFT)
/* DSP Core state */
enum skl_dsp_states {
SKL_DSP_RUNNING = 1,
/* Running in D0i3 state; can be in streaming or non-streaming D0i3 */
SKL_DSP_RUNNING_D0I3, /* Running in D0i3 state*/
SKL_DSP_RESET,
};
/* D0i3 substates */
enum skl_dsp_d0i3_states {
SKL_DSP_D0I3_NONE = -1, /* No D0i3 */
SKL_DSP_D0I3_NON_STREAMING = 0,
SKL_DSP_D0I3_STREAMING = 1,
};
struct skl_dsp_fw_ops {
int (*load_fw)(struct sst_dsp *ctx);
/* FW module parser/loader */
int (*load_library)(struct sst_dsp *ctx,
struct skl_lib_info *linfo, int lib_count);
int (*parse_fw)(struct sst_dsp *ctx);
int (*set_state_D0)(struct sst_dsp *ctx, unsigned int core_id);
int (*set_state_D3)(struct sst_dsp *ctx, unsigned int core_id);
int (*set_state_D0i3)(struct sst_dsp *ctx);
int (*set_state_D0i0)(struct sst_dsp *ctx);
unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
int (*load_mod)(struct sst_dsp *ctx, u16 mod_id, u8 *mod_name);
int (*unload_mod)(struct sst_dsp *ctx, u16 mod_id);
};
struct skl_dsp_loader_ops {
int stream_tag;
int (*alloc_dma_buf)(struct device *dev,
struct snd_dma_buffer *dmab, size_t size);
int (*free_dma_buf)(struct device *dev,
struct snd_dma_buffer *dmab);
int (*prepare)(struct device *dev, unsigned int format,
unsigned int byte_size,
struct snd_dma_buffer *bufp);
int (*trigger)(struct device *dev, bool start, int stream_tag);
int (*cleanup)(struct device *dev, struct snd_dma_buffer *dmab,
int stream_tag);
};
#define MAX_INSTANCE_BUFF 2
struct uuid_module {
guid_t uuid;
int id;
int is_loadable;
int max_instance;
u64 pvt_id[MAX_INSTANCE_BUFF];
int *instance_id;
struct list_head list;
};
struct skl_load_module_info {
u16 mod_id;
const struct firmware *fw;
};
struct skl_module_table {
struct skl_load_module_info *mod_info;
unsigned int usage_cnt;
struct list_head list;
};
void skl_cldma_process_intr(struct sst_dsp *ctx);
void skl_cldma_int_disable(struct sst_dsp *ctx);
int skl_cldma_prepare(struct sst_dsp *ctx);
int skl_cldma_wait_interruptible(struct sst_dsp *ctx);
void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
struct sst_dsp_device *sst_dev, int irq);
int skl_dsp_acquire_irq(struct sst_dsp *sst);
bool is_skl_dsp_running(struct sst_dsp *ctx);
unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx);
void skl_dsp_init_core_state(struct sst_dsp *ctx);
int skl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask);
int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask);
int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask);
int skl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask);
int skl_dsp_core_unset_reset_state(struct sst_dsp *ctx,
unsigned int core_mask);
int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask);
irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id);
int skl_dsp_wake(struct sst_dsp *ctx);
int skl_dsp_sleep(struct sst_dsp *ctx);
void skl_dsp_free(struct sst_dsp *dsp);
int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id);
int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id);
int skl_dsp_boot(struct sst_dsp *ctx);
int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
struct skl_dev **dsp);
int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
struct skl_dev **dsp);
int skl_sst_init_fw(struct device *dev, struct skl_dev *skl);
int bxt_sst_init_fw(struct device *dev, struct skl_dev *skl);
void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl);
void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl);
int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
unsigned int offset, int index);
int skl_get_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int instance_id);
int skl_put_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int *pvt_id);
int skl_get_pvt_instance_id_map(struct skl_dev *skl,
int module_id, int instance_id);
void skl_freeup_uuid_list(struct skl_dev *skl);
int skl_dsp_strip_extended_manifest(struct firmware *fw);
void skl_dsp_set_astate_cfg(struct skl_dev *skl, u32 cnt, void *data);
int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name,
struct skl_dsp_loader_ops dsp_ops, struct skl_dev **dsp,
struct sst_dsp_device *skl_dev);
int skl_prepare_lib_load(struct skl_dev *skl, struct skl_lib_info *linfo,
struct firmware *stripped_fw,
unsigned int hdr_offset, int index);
void skl_release_library(struct skl_lib_info *linfo, int lib_count);
#endif /*__SKL_SST_DSP_H__*/

File diff suppressed because it is too large Load diff

View file

@ -1,169 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Intel SKL IPC Support
*
* Copyright (C) 2014-15, Intel Corporation.
*/
#ifndef __SKL_IPC_H
#define __SKL_IPC_H
#include <linux/irqreturn.h>
#include "../common/sst-ipc.h"
#include "skl-sst-dsp.h"
struct sst_dsp;
struct sst_generic_ipc;
enum skl_ipc_pipeline_state {
PPL_INVALID_STATE = 0,
PPL_UNINITIALIZED = 1,
PPL_RESET = 2,
PPL_PAUSED = 3,
PPL_RUNNING = 4,
PPL_ERROR_STOP = 5,
PPL_SAVED = 6,
PPL_RESTORED = 7
};
struct skl_ipc_dxstate_info {
u32 core_mask;
u32 dx_mask;
};
struct skl_ipc_header {
u32 primary;
u32 extension;
};
struct skl_dsp_cores {
unsigned int count;
enum skl_dsp_states *state;
int *usage_count;
};
/**
* skl_d0i3_data: skl D0i3 counters data struct
*
* @streaming: Count of usecases that can attempt streaming D0i3
* @non_streaming: Count of usecases that can attempt non-streaming D0i3
* @non_d0i3: Count of usecases that cannot attempt D0i3
* @state: current state
* @work: D0i3 worker thread
*/
struct skl_d0i3_data {
int streaming;
int non_streaming;
int non_d0i3;
enum skl_dsp_d0i3_states state;
struct delayed_work work;
};
#define SKL_LIB_NAME_LENGTH 128
#define SKL_MAX_LIB 16
struct skl_lib_info {
char name[SKL_LIB_NAME_LENGTH];
const struct firmware *fw;
};
struct skl_ipc_init_instance_msg {
u32 module_id;
u32 instance_id;
u16 param_data_size;
u8 ppl_instance_id;
u8 core_id;
u8 domain;
};
struct skl_ipc_bind_unbind_msg {
u32 module_id;
u32 instance_id;
u32 dst_module_id;
u32 dst_instance_id;
u8 src_queue;
u8 dst_queue;
bool bind;
};
struct skl_ipc_large_config_msg {
u32 module_id;
u32 instance_id;
u32 large_param_id;
u32 param_data_size;
};
struct skl_ipc_d0ix_msg {
u32 module_id;
u32 instance_id;
u8 streaming;
u8 wake;
};
#define SKL_IPC_BOOT_MSECS 3000
#define SKL_IPC_D3_MASK 0
#define SKL_IPC_D0_MASK 3
irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context);
int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
u16 ppl_mem_size, u8 ppl_type, u8 instance_id, u8 lp_mode);
int skl_ipc_delete_pipeline(struct sst_generic_ipc *ipc, u8 instance_id);
int skl_ipc_set_pipeline_state(struct sst_generic_ipc *ipc,
u8 instance_id, enum skl_ipc_pipeline_state state);
int skl_ipc_save_pipeline(struct sst_generic_ipc *ipc,
u8 instance_id, int dma_id);
int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id);
int skl_ipc_init_instance(struct sst_generic_ipc *ipc,
struct skl_ipc_init_instance_msg *msg, void *param_data);
int skl_ipc_bind_unbind(struct sst_generic_ipc *ipc,
struct skl_ipc_bind_unbind_msg *msg);
int skl_ipc_load_modules(struct sst_generic_ipc *ipc,
u8 module_cnt, void *data);
int skl_ipc_unload_modules(struct sst_generic_ipc *ipc,
u8 module_cnt, void *data);
int skl_ipc_set_dx(struct sst_generic_ipc *ipc,
u8 instance_id, u16 module_id, struct skl_ipc_dxstate_info *dx);
int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
struct skl_ipc_large_config_msg *msg, u32 *param);
int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
struct skl_ipc_large_config_msg *msg,
u32 **payload, size_t *bytes);
int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
u8 dma_id, u8 table_id, bool wait);
int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc,
struct skl_ipc_d0ix_msg *msg);
int skl_ipc_check_D0i0(struct sst_dsp *dsp, bool state);
void skl_ipc_int_enable(struct sst_dsp *ctx);
void skl_ipc_op_int_enable(struct sst_dsp *ctx);
void skl_ipc_op_int_disable(struct sst_dsp *ctx);
void skl_ipc_int_disable(struct sst_dsp *ctx);
bool skl_ipc_int_status(struct sst_dsp *ctx);
void skl_ipc_free(struct sst_generic_ipc *ipc);
int skl_ipc_init(struct device *dev, struct skl_dev *skl);
void skl_clear_module_cnt(struct sst_dsp *ctx);
void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
struct skl_ipc_header header);
int skl_ipc_process_notification(struct sst_generic_ipc *ipc,
struct skl_ipc_header header);
void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data,
size_t tx_size);
#endif /* __SKL_IPC_H */

View file

@ -1,425 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* skl-sst-utils.c - SKL sst utils functions
*
* Copyright (C) 2016 Intel Corp
*/
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/uuid.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#include "skl.h"
#define DEFAULT_HASH_SHA256_LEN 32
/* FW Extended Manifest Header id = $AE1 */
#define SKL_EXT_MANIFEST_HEADER_MAGIC 0x31454124
union seg_flags {
u32 ul;
struct {
u32 contents : 1;
u32 alloc : 1;
u32 load : 1;
u32 read_only : 1;
u32 code : 1;
u32 data : 1;
u32 _rsvd0 : 2;
u32 type : 4;
u32 _rsvd1 : 4;
u32 length : 16;
} r;
} __packed;
struct segment_desc {
union seg_flags flags;
u32 v_base_addr;
u32 file_offset;
};
struct module_type {
u32 load_type : 4;
u32 auto_start : 1;
u32 domain_ll : 1;
u32 domain_dp : 1;
u32 rsvd : 25;
} __packed;
struct adsp_module_entry {
u32 struct_id;
u8 name[8];
u8 uuid[16];
struct module_type type;
u8 hash1[DEFAULT_HASH_SHA256_LEN];
u32 entry_point;
u16 cfg_offset;
u16 cfg_count;
u32 affinity_mask;
u16 instance_max_count;
u16 instance_bss_size;
struct segment_desc segments[3];
} __packed;
struct adsp_fw_hdr {
u32 id;
u32 len;
u8 name[8];
u32 preload_page_count;
u32 fw_image_flags;
u32 feature_mask;
u16 major;
u16 minor;
u16 hotfix;
u16 build;
u32 num_modules;
u32 hw_buf_base;
u32 hw_buf_length;
u32 load_offset;
} __packed;
struct skl_ext_manifest_hdr {
u32 id;
u32 len;
u16 version_major;
u16 version_minor;
u32 entries;
};
static int skl_get_pvtid_map(struct uuid_module *module, int instance_id)
{
int pvt_id;
for (pvt_id = 0; pvt_id < module->max_instance; pvt_id++) {
if (module->instance_id[pvt_id] == instance_id)
return pvt_id;
}
return -EINVAL;
}
int skl_get_pvt_instance_id_map(struct skl_dev *skl,
int module_id, int instance_id)
{
struct uuid_module *module;
list_for_each_entry(module, &skl->uuid_list, list) {
if (module->id == module_id)
return skl_get_pvtid_map(module, instance_id);
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(skl_get_pvt_instance_id_map);
static inline int skl_getid_32(struct uuid_module *module, u64 *val,
int word1_mask, int word2_mask)
{
int index, max_inst, pvt_id;
u32 mask_val;
max_inst = module->max_instance;
mask_val = (u32)(*val >> word1_mask);
if (mask_val != 0xffffffff) {
index = ffz(mask_val);
pvt_id = index + word1_mask + word2_mask;
if (pvt_id <= (max_inst - 1)) {
*val |= 1ULL << (index + word1_mask);
return pvt_id;
}
}
return -EINVAL;
}
static inline int skl_pvtid_128(struct uuid_module *module)
{
int j, i, word1_mask, word2_mask = 0, pvt_id;
for (j = 0; j < MAX_INSTANCE_BUFF; j++) {
word1_mask = 0;
for (i = 0; i < 2; i++) {
pvt_id = skl_getid_32(module, &module->pvt_id[j],
word1_mask, word2_mask);
if (pvt_id >= 0)
return pvt_id;
word1_mask += 32;
if ((word1_mask + word2_mask) >= module->max_instance)
return -EINVAL;
}
word2_mask += 64;
if (word2_mask >= module->max_instance)
return -EINVAL;
}
return -EINVAL;
}
/**
* skl_get_pvt_id: generate a private id for use as module id
*
* @skl: driver context
* @uuid_mod: module's uuid
* @instance_id: module's instance id
*
* This generates a 128 bit private unique id for a module TYPE so that
* module instance is unique
*/
int skl_get_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int instance_id)
{
struct uuid_module *module;
int pvt_id;
list_for_each_entry(module, &skl->uuid_list, list) {
if (guid_equal(uuid_mod, &module->uuid)) {
pvt_id = skl_pvtid_128(module);
if (pvt_id >= 0) {
module->instance_id[pvt_id] = instance_id;
return pvt_id;
}
}
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(skl_get_pvt_id);
/**
* skl_put_pvt_id: free up the private id allocated
*
* @skl: driver context
* @uuid_mod: module's uuid
* @pvt_id: module pvt id
*
* This frees a 128 bit private unique id previously generated
*/
int skl_put_pvt_id(struct skl_dev *skl, guid_t *uuid_mod, int *pvt_id)
{
int i;
struct uuid_module *module;
list_for_each_entry(module, &skl->uuid_list, list) {
if (guid_equal(uuid_mod, &module->uuid)) {
if (*pvt_id != 0)
i = (*pvt_id) / 64;
else
i = 0;
module->pvt_id[i] &= ~(1 << (*pvt_id));
*pvt_id = -1;
return 0;
}
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(skl_put_pvt_id);
/*
* Parse the firmware binary to get the UUID, module id
* and loadable flags
*/
int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
unsigned int offset, int index)
{
struct adsp_fw_hdr *adsp_hdr;
struct adsp_module_entry *mod_entry;
int i, num_entry, size;
const char *buf;
struct skl_dev *skl = ctx->thread_context;
struct uuid_module *module;
struct firmware stripped_fw;
unsigned int safe_file;
int ret;
/* Get the FW pointer to derive ADSP header */
stripped_fw.data = fw->data;
stripped_fw.size = fw->size;
skl_dsp_strip_extended_manifest(&stripped_fw);
buf = stripped_fw.data;
/* check if we have enough space in file to move to header */
safe_file = sizeof(*adsp_hdr) + offset;
if (stripped_fw.size <= safe_file) {
dev_err(ctx->dev, "Small fw file size, No space for hdr\n");
return -EINVAL;
}
adsp_hdr = (struct adsp_fw_hdr *)(buf + offset);
/* check 1st module entry is in file */
safe_file += adsp_hdr->len + sizeof(*mod_entry);
if (stripped_fw.size <= safe_file) {
dev_err(ctx->dev, "Small fw file size, No module entry\n");
return -EINVAL;
}
mod_entry = (struct adsp_module_entry *)(buf + offset + adsp_hdr->len);
num_entry = adsp_hdr->num_modules;
/* check all entries are in file */
safe_file += num_entry * sizeof(*mod_entry);
if (stripped_fw.size <= safe_file) {
dev_err(ctx->dev, "Small fw file size, No modules\n");
return -EINVAL;
}
/*
* Read the UUID(GUID) from FW Manifest.
*
* The 16 byte UUID format is: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
* Populate the UUID table to store module_id and loadable flags
* for the module.
*/
for (i = 0; i < num_entry; i++, mod_entry++) {
module = kzalloc(sizeof(*module), GFP_KERNEL);
if (!module) {
ret = -ENOMEM;
goto free_uuid_list;
}
import_guid(&module->uuid, mod_entry->uuid);
module->id = (i | (index << 12));
module->is_loadable = mod_entry->type.load_type;
module->max_instance = mod_entry->instance_max_count;
size = sizeof(int) * mod_entry->instance_max_count;
module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
if (!module->instance_id) {
ret = -ENOMEM;
kfree(module);
goto free_uuid_list;
}
list_add_tail(&module->list, &skl->uuid_list);
dev_dbg(ctx->dev,
"Adding uuid :%pUL mod id: %d Loadable: %d\n",
&module->uuid, module->id, module->is_loadable);
}
return 0;
free_uuid_list:
skl_freeup_uuid_list(skl);
return ret;
}
void skl_freeup_uuid_list(struct skl_dev *skl)
{
struct uuid_module *uuid, *_uuid;
list_for_each_entry_safe(uuid, _uuid, &skl->uuid_list, list) {
list_del(&uuid->list);
kfree(uuid);
}
}
/*
* some firmware binary contains some extended manifest. This needs
* to be stripped in that case before we load and use that image.
*
* Get the module id for the module by checking
* the table for the UUID for the module
*/
int skl_dsp_strip_extended_manifest(struct firmware *fw)
{
struct skl_ext_manifest_hdr *hdr;
/* check if fw file is greater than header we are looking */
if (fw->size < sizeof(hdr)) {
pr_err("%s: Firmware file small, no hdr\n", __func__);
return -EINVAL;
}
hdr = (struct skl_ext_manifest_hdr *)fw->data;
if (hdr->id == SKL_EXT_MANIFEST_HEADER_MAGIC) {
fw->size -= hdr->len;
fw->data += hdr->len;
}
return 0;
}
int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name,
struct skl_dsp_loader_ops dsp_ops, struct skl_dev **dsp,
struct sst_dsp_device *skl_dev)
{
struct skl_dev *skl = *dsp;
struct sst_dsp *sst;
skl->dev = dev;
skl_dev->thread_context = skl;
INIT_LIST_HEAD(&skl->uuid_list);
skl->dsp = skl_dsp_ctx_init(dev, skl_dev, irq);
if (!skl->dsp) {
dev_err(skl->dev, "%s: no device\n", __func__);
return -ENODEV;
}
sst = skl->dsp;
sst->fw_name = fw_name;
sst->dsp_ops = dsp_ops;
init_waitqueue_head(&skl->mod_load_wait);
INIT_LIST_HEAD(&sst->module_list);
skl->is_first_boot = true;
return 0;
}
int skl_prepare_lib_load(struct skl_dev *skl, struct skl_lib_info *linfo,
struct firmware *stripped_fw,
unsigned int hdr_offset, int index)
{
int ret;
struct sst_dsp *dsp = skl->dsp;
if (linfo->fw == NULL) {
ret = request_firmware(&linfo->fw, linfo->name,
skl->dev);
if (ret < 0) {
dev_err(skl->dev, "Request lib %s failed:%d\n",
linfo->name, ret);
return ret;
}
}
if (skl->is_first_boot) {
ret = snd_skl_parse_uuids(dsp, linfo->fw, hdr_offset, index);
if (ret < 0)
return ret;
}
stripped_fw->data = linfo->fw->data;
stripped_fw->size = linfo->fw->size;
skl_dsp_strip_extended_manifest(stripped_fw);
return 0;
}
void skl_release_library(struct skl_lib_info *linfo, int lib_count)
{
int i;
/* library indices start from 1 to N. 0 represents base FW */
for (i = 1; i < lib_count; i++) {
if (linfo[i].fw) {
release_firmware(linfo[i].fw);
linfo[i].fw = NULL;
}
}
}

View file

@ -1,599 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* skl-sst.c - HDA DSP library functions for SKL platform
*
* Copyright (C) 2014-15, Intel Corporation.
* Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
* Jeeja KP <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/uuid.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#include "../common/sst-ipc.h"
#include "skl.h"
#define SKL_BASEFW_TIMEOUT 300
#define SKL_INIT_TIMEOUT 1000
/* Intel HD Audio SRAM Window 0*/
#define SKL_ADSP_SRAM0_BASE 0x8000
/* Firmware status window */
#define SKL_ADSP_FW_STATUS SKL_ADSP_SRAM0_BASE
#define SKL_ADSP_ERROR_CODE (SKL_ADSP_FW_STATUS + 0x4)
#define SKL_NUM_MODULES 1
static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
{
u32 cur_sts;
cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
return (cur_sts == status);
}
static int skl_transfer_firmware(struct sst_dsp *ctx,
const void *basefw, u32 base_fw_size)
{
int ret = 0;
ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size,
true);
if (ret < 0)
return ret;
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_FW_STATUS,
SKL_FW_STS_MASK,
SKL_FW_RFW_START,
SKL_BASEFW_TIMEOUT,
"Firmware boot");
ctx->cl_dev.ops.cl_stop_dma(ctx);
return ret;
}
#define SKL_ADSP_FW_BIN_HDR_OFFSET 0x284
static int skl_load_base_firmware(struct sst_dsp *ctx)
{
int ret = 0, i;
struct skl_dev *skl = ctx->thread_context;
struct firmware stripped_fw;
u32 reg;
skl->boot_complete = false;
init_waitqueue_head(&skl->boot_wait);
if (ctx->fw == NULL) {
ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "Request firmware failed %d\n", ret);
return -EIO;
}
}
/* prase uuids on first boot */
if (skl->is_first_boot) {
ret = snd_skl_parse_uuids(ctx, ctx->fw, SKL_ADSP_FW_BIN_HDR_OFFSET, 0);
if (ret < 0) {
dev_err(ctx->dev, "UUID parsing err: %d\n", ret);
release_firmware(ctx->fw);
skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
return ret;
}
}
/* check for extended manifest */
stripped_fw.data = ctx->fw->data;
stripped_fw.size = ctx->fw->size;
skl_dsp_strip_extended_manifest(&stripped_fw);
ret = skl_dsp_boot(ctx);
if (ret < 0) {
dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret);
goto skl_load_base_firmware_failed;
}
ret = skl_cldma_prepare(ctx);
if (ret < 0) {
dev_err(ctx->dev, "CL dma prepare failed : %d\n", ret);
goto skl_load_base_firmware_failed;
}
/* enable Interrupt */
skl_ipc_int_enable(ctx);
skl_ipc_op_int_enable(ctx);
/* check ROM Status */
for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
dev_dbg(ctx->dev,
"ROM loaded, we can continue with FW loading\n");
break;
}
mdelay(1);
}
if (!i) {
reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
dev_err(ctx->dev,
"Timeout waiting for ROM init done, reg:0x%x\n", reg);
ret = -EIO;
goto transfer_firmware_failed;
}
ret = skl_transfer_firmware(ctx, stripped_fw.data, stripped_fw.size);
if (ret < 0) {
dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
goto transfer_firmware_failed;
} else {
ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0) {
dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
ret = -EIO;
goto transfer_firmware_failed;
}
dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
skl->fw_loaded = true;
}
return 0;
transfer_firmware_failed:
ctx->cl_dev.ops.cl_cleanup_controller(ctx);
skl_load_base_firmware_failed:
skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
release_firmware(ctx->fw);
ctx->fw = NULL;
return ret;
}
static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
int ret;
struct skl_ipc_dxstate_info dx;
struct skl_dev *skl = ctx->thread_context;
unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
/* If core0 is being turned on, we need to load the FW */
if (core_id == SKL_DSP_CORE0_ID) {
ret = skl_load_base_firmware(ctx);
if (ret < 0) {
dev_err(ctx->dev, "unable to load firmware\n");
return ret;
}
/* load libs as they are also lost on D3 */
if (skl->lib_count > 1) {
ret = ctx->fw_ops.load_library(ctx, skl->lib_info,
skl->lib_count);
if (ret < 0) {
dev_err(ctx->dev, "reload libs failed: %d\n",
ret);
return ret;
}
}
}
/*
* If any core other than core 0 is being moved to D0, enable the
* core and send the set dx IPC for the core.
*/
if (core_id != SKL_DSP_CORE0_ID) {
ret = skl_dsp_enable_core(ctx, core_mask);
if (ret < 0)
return ret;
dx.core_mask = core_mask;
dx.dx_mask = core_mask;
ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
SKL_BASE_FW_MODULE_ID, &dx);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set dsp to D0:core id= %d\n",
core_id);
skl_dsp_disable_core(ctx, core_mask);
}
}
skl->cores.state[core_id] = SKL_DSP_RUNNING;
return 0;
}
static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
{
int ret;
struct skl_ipc_dxstate_info dx;
struct skl_dev *skl = ctx->thread_context;
unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
dx.core_mask = core_mask;
dx.dx_mask = SKL_IPC_D3_MASK;
ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
if (ret < 0)
dev_err(ctx->dev, "set Dx core %d fail: %d\n", core_id, ret);
if (core_id == SKL_DSP_CORE0_ID) {
/* disable Interrupt */
ctx->cl_dev.ops.cl_cleanup_controller(ctx);
skl_cldma_int_disable(ctx);
skl_ipc_op_int_disable(ctx);
skl_ipc_int_disable(ctx);
}
ret = skl_dsp_disable_core(ctx, core_mask);
if (ret < 0)
return ret;
skl->cores.state[core_id] = SKL_DSP_RESET;
return ret;
}
static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
}
/*
* since get/set_module are called from DAPM context,
* we don't need lock for usage count
*/
static int skl_get_module(struct sst_dsp *ctx, u16 mod_id)
{
struct skl_module_table *module;
list_for_each_entry(module, &ctx->module_list, list) {
if (module->mod_info->mod_id == mod_id)
return ++module->usage_cnt;
}
return -EINVAL;
}
static int skl_put_module(struct sst_dsp *ctx, u16 mod_id)
{
struct skl_module_table *module;
list_for_each_entry(module, &ctx->module_list, list) {
if (module->mod_info->mod_id == mod_id)
return --module->usage_cnt;
}
return -EINVAL;
}
static struct skl_module_table *skl_fill_module_table(struct sst_dsp *ctx,
char *mod_name, int mod_id)
{
const struct firmware *fw;
struct skl_module_table *skl_module;
unsigned int size;
int ret;
ret = request_firmware(&fw, mod_name, ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "Request Module %s failed :%d\n",
mod_name, ret);
return NULL;
}
skl_module = devm_kzalloc(ctx->dev, sizeof(*skl_module), GFP_KERNEL);
if (skl_module == NULL) {
release_firmware(fw);
return NULL;
}
size = sizeof(*skl_module->mod_info);
skl_module->mod_info = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
if (skl_module->mod_info == NULL) {
release_firmware(fw);
return NULL;
}
skl_module->mod_info->mod_id = mod_id;
skl_module->mod_info->fw = fw;
list_add(&skl_module->list, &ctx->module_list);
return skl_module;
}
/* get a module from it's unique ID */
static struct skl_module_table *skl_module_get_from_id(
struct sst_dsp *ctx, u16 mod_id)
{
struct skl_module_table *module;
if (list_empty(&ctx->module_list)) {
dev_err(ctx->dev, "Module list is empty\n");
return NULL;
}
list_for_each_entry(module, &ctx->module_list, list) {
if (module->mod_info->mod_id == mod_id)
return module;
}
return NULL;
}
static int skl_transfer_module(struct sst_dsp *ctx, const void *data,
u32 size, u16 mod_id, u8 table_id, bool is_module)
{
int ret, bytes_left, curr_pos;
struct skl_dev *skl = ctx->thread_context;
skl->mod_load_complete = false;
bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, data, size, false);
if (bytes_left < 0)
return bytes_left;
/* check is_module flag to load module or library */
if (is_module)
ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES, &mod_id);
else
ret = skl_sst_ipc_load_library(&skl->ipc, 0, table_id, false);
if (ret < 0) {
dev_err(ctx->dev, "Failed to Load %s with err %d\n",
is_module ? "module" : "lib", ret);
goto out;
}
/*
* if bytes_left > 0 then wait for BDL complete interrupt and
* copy the next chunk till bytes_left is 0. if bytes_left is
* zero, then wait for load module IPC reply
*/
while (bytes_left > 0) {
curr_pos = size - bytes_left;
ret = skl_cldma_wait_interruptible(ctx);
if (ret < 0)
goto out;
bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx,
data + curr_pos,
bytes_left, false);
}
ret = wait_event_timeout(skl->mod_load_wait, skl->mod_load_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0 || !skl->mod_load_status) {
dev_err(ctx->dev, "Module Load failed\n");
ret = -EIO;
}
out:
ctx->cl_dev.ops.cl_stop_dma(ctx);
return ret;
}
static int
skl_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count)
{
struct skl_dev *skl = ctx->thread_context;
struct firmware stripped_fw;
int ret, i;
/* library indices start from 1 to N. 0 represents base FW */
for (i = 1; i < lib_count; i++) {
ret = skl_prepare_lib_load(skl, &skl->lib_info[i], &stripped_fw,
SKL_ADSP_FW_BIN_HDR_OFFSET, i);
if (ret < 0)
goto load_library_failed;
ret = skl_transfer_module(ctx, stripped_fw.data,
stripped_fw.size, 0, i, false);
if (ret < 0)
goto load_library_failed;
}
return 0;
load_library_failed:
skl_release_library(linfo, lib_count);
return ret;
}
static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid)
{
struct skl_module_table *module_entry = NULL;
int ret = 0;
char mod_name[64]; /* guid str = 32 chars + 4 hyphens */
snprintf(mod_name, sizeof(mod_name), "intel/dsp_fw_%pUL.bin", guid);
module_entry = skl_module_get_from_id(ctx, mod_id);
if (module_entry == NULL) {
module_entry = skl_fill_module_table(ctx, mod_name, mod_id);
if (module_entry == NULL) {
dev_err(ctx->dev, "Failed to Load module\n");
return -EINVAL;
}
}
if (!module_entry->usage_cnt) {
ret = skl_transfer_module(ctx, module_entry->mod_info->fw->data,
module_entry->mod_info->fw->size,
mod_id, 0, true);
if (ret < 0) {
dev_err(ctx->dev, "Failed to Load module\n");
return ret;
}
}
ret = skl_get_module(ctx, mod_id);
return ret;
}
static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id)
{
int usage_cnt;
struct skl_dev *skl = ctx->thread_context;
int ret = 0;
usage_cnt = skl_put_module(ctx, mod_id);
if (usage_cnt < 0) {
dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt);
return -EIO;
}
/* if module is used by others return, no need to unload */
if (usage_cnt > 0)
return 0;
ret = skl_ipc_unload_modules(&skl->ipc,
SKL_NUM_MODULES, &mod_id);
if (ret < 0) {
dev_err(ctx->dev, "Failed to UnLoad module\n");
skl_get_module(ctx, mod_id);
return ret;
}
return ret;
}
void skl_clear_module_cnt(struct sst_dsp *ctx)
{
struct skl_module_table *module;
if (list_empty(&ctx->module_list))
return;
list_for_each_entry(module, &ctx->module_list, list) {
module->usage_cnt = 0;
}
}
EXPORT_SYMBOL_GPL(skl_clear_module_cnt);
static void skl_clear_module_table(struct sst_dsp *ctx)
{
struct skl_module_table *module, *tmp;
if (list_empty(&ctx->module_list))
return;
list_for_each_entry_safe(module, tmp, &ctx->module_list, list) {
list_del(&module->list);
release_firmware(module->mod_info->fw);
}
}
static const struct skl_dsp_fw_ops skl_fw_ops = {
.set_state_D0 = skl_set_dsp_D0,
.set_state_D3 = skl_set_dsp_D3,
.load_fw = skl_load_base_firmware,
.get_fw_errcode = skl_get_errorcode,
.load_library = skl_load_library,
.load_mod = skl_load_module,
.unload_mod = skl_unload_module,
};
static struct sst_ops skl_ops = {
.irq_handler = skl_dsp_sst_interrupt,
.write = sst_shim32_write,
.read = sst_shim32_read,
.free = skl_dsp_free,
};
static struct sst_dsp_device skl_dev = {
.thread = skl_dsp_irq_thread_handler,
.ops = &skl_ops,
};
int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
struct skl_dev **dsp)
{
struct skl_dev *skl;
struct sst_dsp *sst;
int ret;
ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev);
if (ret < 0) {
dev_err(dev, "%s: no device\n", __func__);
return ret;
}
skl = *dsp;
sst = skl->dsp;
sst->addr.lpe = mmio_base;
sst->addr.shim = mmio_base;
sst->addr.sram0_base = SKL_ADSP_SRAM0_BASE;
sst->addr.sram1_base = SKL_ADSP_SRAM1_BASE;
sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ;
sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ;
sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
ret = skl_ipc_init(dev, skl);
if (ret) {
skl_dsp_free(sst);
return ret;
}
sst->fw_ops = skl_fw_ops;
return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
int skl_sst_init_fw(struct device *dev, struct skl_dev *skl)
{
int ret;
struct sst_dsp *sst = skl->dsp;
ret = sst->fw_ops.load_fw(sst);
if (ret < 0) {
dev_err(dev, "Load base fw failed : %d\n", ret);
return ret;
}
skl_dsp_init_core_state(sst);
if (skl->lib_count > 1) {
ret = sst->fw_ops.load_library(sst, skl->lib_info,
skl->lib_count);
if (ret < 0) {
dev_err(dev, "Load Library failed : %x\n", ret);
return ret;
}
}
skl->is_first_boot = false;
return 0;
}
EXPORT_SYMBOL_GPL(skl_sst_init_fw);
void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl)
{
if (skl->dsp->fw)
release_firmware(skl->dsp->fw);
skl_clear_module_table(skl->dsp);
skl_freeup_uuid_list(skl);
skl_ipc_free(&skl->ipc);
skl->dsp->ops->free(skl->dsp);
if (skl->boot_complete) {
skl->dsp->cl_dev.ops.cl_cleanup_controller(skl->dsp);
skl_cldma_int_disable(skl->dsp);
}
}
EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Skylake IPC driver");

File diff suppressed because it is too large Load diff

View file

@ -1,524 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* skl_topology.h - Intel HDA Platform topology header file
*
* Copyright (C) 2014-15 Intel Corp
* Author: Jeeja KP <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef __SKL_TOPOLOGY_H__
#define __SKL_TOPOLOGY_H__
#include <linux/types.h>
#include <sound/hdaudio_ext.h>
#include <sound/soc.h>
#include <uapi/sound/skl-tplg-interface.h>
#include "skl.h"
#define BITS_PER_BYTE 8
#define MAX_TS_GROUPS 8
#define MAX_DMIC_TS_GROUPS 4
#define MAX_FIXED_DMIC_PARAMS_SIZE 727
/* Maximum number of coefficients up down mixer module */
#define UP_DOWN_MIXER_MAX_COEFF 8
#define MODULE_MAX_IN_PINS 8
#define MODULE_MAX_OUT_PINS 8
#define SKL_MIC_CH_SUPPORT 4
#define SKL_MIC_MAX_CH_SUPPORT 8
#define SKL_DEFAULT_MIC_SEL_GAIN 0x3FF
#define SKL_MIC_SEL_SWITCH 0x3
#define SKL_OUTPUT_PIN 0
#define SKL_INPUT_PIN 1
#define SKL_MAX_PATH_CONFIGS 8
#define SKL_MAX_MODULES_IN_PIPE 8
#define SKL_MAX_MODULE_FORMATS 32
#define SKL_MAX_MODULE_RESOURCES 32
enum skl_channel_index {
SKL_CHANNEL_LEFT = 0,
SKL_CHANNEL_RIGHT = 1,
SKL_CHANNEL_CENTER = 2,
SKL_CHANNEL_LEFT_SURROUND = 3,
SKL_CHANNEL_CENTER_SURROUND = 3,
SKL_CHANNEL_RIGHT_SURROUND = 4,
SKL_CHANNEL_LFE = 7,
SKL_CHANNEL_INVALID = 0xF,
};
enum skl_bitdepth {
SKL_DEPTH_8BIT = 8,
SKL_DEPTH_16BIT = 16,
SKL_DEPTH_24BIT = 24,
SKL_DEPTH_32BIT = 32,
SKL_DEPTH_INVALID
};
enum skl_s_freq {
SKL_FS_8000 = 8000,
SKL_FS_11025 = 11025,
SKL_FS_12000 = 12000,
SKL_FS_16000 = 16000,
SKL_FS_22050 = 22050,
SKL_FS_24000 = 24000,
SKL_FS_32000 = 32000,
SKL_FS_44100 = 44100,
SKL_FS_48000 = 48000,
SKL_FS_64000 = 64000,
SKL_FS_88200 = 88200,
SKL_FS_96000 = 96000,
SKL_FS_128000 = 128000,
SKL_FS_176400 = 176400,
SKL_FS_192000 = 192000,
SKL_FS_INVALID
};
#define SKL_MAX_PARAMS_TYPES 4
enum skl_widget_type {
SKL_WIDGET_VMIXER = 1,
SKL_WIDGET_MIXER = 2,
SKL_WIDGET_PGA = 3,
SKL_WIDGET_MUX = 4
};
struct skl_audio_data_format {
enum skl_s_freq s_freq;
enum skl_bitdepth bit_depth;
u32 channel_map;
enum skl_ch_cfg ch_cfg;
enum skl_interleaving interleaving;
u8 number_of_channels;
u8 valid_bit_depth;
u8 sample_type;
u8 reserved;
} __packed;
struct skl_base_cfg {
u32 cpc;
u32 ibs;
u32 obs;
u32 is_pages;
struct skl_audio_data_format audio_fmt;
};
struct skl_cpr_gtw_cfg {
u32 node_id;
u32 dma_buffer_size;
u32 config_length;
/* not mandatory; required only for DMIC/I2S */
struct {
u32 gtw_attrs;
u32 data[];
} config_data;
} __packed;
struct skl_dma_control {
u32 node_id;
u32 config_length;
u32 config_data[];
} __packed;
struct skl_cpr_cfg {
struct skl_base_cfg base_cfg;
struct skl_audio_data_format out_fmt;
u32 cpr_feature_mask;
struct skl_cpr_gtw_cfg gtw_cfg;
} __packed;
struct skl_cpr_pin_fmt {
u32 sink_id;
struct skl_audio_data_format src_fmt;
struct skl_audio_data_format dst_fmt;
} __packed;
struct skl_src_module_cfg {
struct skl_base_cfg base_cfg;
enum skl_s_freq src_cfg;
} __packed;
struct skl_up_down_mixer_cfg {
struct skl_base_cfg base_cfg;
enum skl_ch_cfg out_ch_cfg;
/* This should be set to 1 if user coefficients are required */
u32 coeff_sel;
/* Pass the user coeff in this array */
s32 coeff[UP_DOWN_MIXER_MAX_COEFF];
u32 ch_map;
} __packed;
struct skl_pin_format {
u32 pin_idx;
u32 buf_size;
struct skl_audio_data_format audio_fmt;
} __packed;
struct skl_base_cfg_ext {
u16 nr_input_pins;
u16 nr_output_pins;
u8 reserved[8];
u32 priv_param_length;
/* Input pin formats followed by output ones. */
struct skl_pin_format pins_fmt[];
} __packed;
struct skl_algo_cfg {
struct skl_base_cfg base_cfg;
char params[];
} __packed;
struct skl_base_outfmt_cfg {
struct skl_base_cfg base_cfg;
struct skl_audio_data_format out_fmt;
} __packed;
enum skl_dma_type {
SKL_DMA_HDA_HOST_OUTPUT_CLASS = 0,
SKL_DMA_HDA_HOST_INPUT_CLASS = 1,
SKL_DMA_HDA_HOST_INOUT_CLASS = 2,
SKL_DMA_HDA_LINK_OUTPUT_CLASS = 8,
SKL_DMA_HDA_LINK_INPUT_CLASS = 9,
SKL_DMA_HDA_LINK_INOUT_CLASS = 0xA,
SKL_DMA_DMIC_LINK_INPUT_CLASS = 0xB,
SKL_DMA_I2S_LINK_OUTPUT_CLASS = 0xC,
SKL_DMA_I2S_LINK_INPUT_CLASS = 0xD,
};
union skl_ssp_dma_node {
u8 val;
struct {
u8 time_slot_index:4;
u8 i2s_instance:4;
} dma_node;
};
union skl_connector_node_id {
u32 val;
struct {
u32 vindex:8;
u32 dma_type:4;
u32 rsvd:20;
} node;
};
struct skl_module_fmt {
u32 channels;
u32 s_freq;
u32 bit_depth;
u32 valid_bit_depth;
u32 ch_cfg;
u32 interleaving_style;
u32 sample_type;
u32 ch_map;
};
struct skl_module_cfg;
struct skl_mod_inst_map {
u16 mod_id;
u16 inst_id;
};
struct skl_uuid_inst_map {
u16 inst_id;
u16 reserved;
guid_t mod_uuid;
} __packed;
struct skl_kpb_params {
u32 num_modules;
union {
DECLARE_FLEX_ARRAY(struct skl_mod_inst_map, map);
DECLARE_FLEX_ARRAY(struct skl_uuid_inst_map, map_uuid);
} u;
};
struct skl_module_inst_id {
guid_t mod_uuid;
int module_id;
u32 instance_id;
int pvt_id;
};
enum skl_module_pin_state {
SKL_PIN_UNBIND = 0,
SKL_PIN_BIND_DONE = 1,
};
struct skl_module_pin {
struct skl_module_inst_id id;
bool is_dynamic;
bool in_use;
enum skl_module_pin_state pin_state;
struct skl_module_cfg *tgt_mcfg;
};
struct skl_specific_cfg {
u32 set_params;
u32 param_id;
u32 caps_size;
u32 *caps;
};
enum skl_pipe_state {
SKL_PIPE_INVALID = 0,
SKL_PIPE_CREATED = 1,
SKL_PIPE_PAUSED = 2,
SKL_PIPE_STARTED = 3,
SKL_PIPE_RESET = 4
};
struct skl_pipe_module {
struct snd_soc_dapm_widget *w;
struct list_head node;
};
struct skl_pipe_params {
u8 host_dma_id;
u8 link_dma_id;
u32 ch;
u32 s_freq;
u32 s_fmt;
u32 s_cont;
u8 linktype;
snd_pcm_format_t format;
int link_index;
int stream;
unsigned int host_bps;
unsigned int link_bps;
};
struct skl_pipe_fmt {
u32 freq;
u8 channels;
u8 bps;
};
struct skl_pipe_mcfg {
u8 res_idx;
u8 fmt_idx;
};
struct skl_path_config {
u8 mem_pages;
struct skl_pipe_fmt in_fmt;
struct skl_pipe_fmt out_fmt;
};
struct skl_pipe {
u8 ppl_id;
u8 pipe_priority;
u16 conn_type;
u32 memory_pages;
u8 lp_mode;
struct skl_pipe_params *p_params;
enum skl_pipe_state state;
u8 direction;
u8 cur_config_idx;
u8 nr_cfgs;
struct skl_path_config configs[SKL_MAX_PATH_CONFIGS];
struct list_head w_list;
bool passthru;
};
enum skl_module_state {
SKL_MODULE_UNINIT = 0,
SKL_MODULE_INIT_DONE = 1,
SKL_MODULE_BIND_DONE = 2,
};
enum d0i3_capability {
SKL_D0I3_NONE = 0,
SKL_D0I3_STREAMING = 1,
SKL_D0I3_NON_STREAMING = 2,
};
struct skl_module_pin_fmt {
u8 id;
struct skl_module_fmt fmt;
};
struct skl_module_iface {
u8 fmt_idx;
u8 nr_in_fmt;
u8 nr_out_fmt;
struct skl_module_pin_fmt inputs[MAX_IN_QUEUE];
struct skl_module_pin_fmt outputs[MAX_OUT_QUEUE];
};
struct skl_module_pin_resources {
u8 pin_index;
u32 buf_size;
};
struct skl_module_res {
u8 id;
u32 is_pages;
u32 ibs;
u32 obs;
u32 dma_buffer_size;
u32 cpc;
u8 nr_input_pins;
u8 nr_output_pins;
struct skl_module_pin_resources input[MAX_IN_QUEUE];
struct skl_module_pin_resources output[MAX_OUT_QUEUE];
};
struct skl_module {
guid_t uuid;
u8 loadable;
u8 input_pin_type;
u8 output_pin_type;
u8 max_input_pins;
u8 max_output_pins;
u8 nr_resources;
u8 nr_interfaces;
struct skl_module_res resources[SKL_MAX_MODULE_RESOURCES];
struct skl_module_iface formats[SKL_MAX_MODULE_FORMATS];
};
struct skl_module_cfg {
u8 guid[16];
struct skl_module_inst_id id;
struct skl_module *module;
int res_idx;
int fmt_idx;
int fmt_cfg_idx;
u8 domain;
bool homogenous_inputs;
bool homogenous_outputs;
struct skl_module_fmt in_fmt[MODULE_MAX_IN_PINS];
struct skl_module_fmt out_fmt[MODULE_MAX_OUT_PINS];
u8 max_in_queue;
u8 max_out_queue;
u8 in_queue_mask;
u8 out_queue_mask;
u8 in_queue;
u8 out_queue;
u8 is_loadable;
u8 core_id;
u8 dev_type;
u8 dma_id;
u8 time_slot;
u8 dmic_ch_combo_index;
u32 dmic_ch_type;
u32 params_fixup;
u32 converter;
u32 vbus_id;
u32 mem_pages;
enum d0i3_capability d0i3_caps;
u32 dma_buffer_size; /* in milli seconds */
struct skl_module_pin *m_in_pin;
struct skl_module_pin *m_out_pin;
enum skl_module_type m_type;
enum skl_hw_conn_type hw_conn_type;
enum skl_module_state m_state;
struct skl_pipe *pipe;
struct skl_specific_cfg formats_config[SKL_MAX_PARAMS_TYPES];
struct skl_pipe_mcfg mod_cfg[SKL_MAX_MODULES_IN_PIPE];
};
struct skl_algo_data {
u32 param_id;
u32 set_params;
u32 max;
u32 size;
char *params;
};
struct skl_pipeline {
struct skl_pipe *pipe;
struct list_head node;
};
struct skl_module_deferred_bind {
struct skl_module_cfg *src;
struct skl_module_cfg *dst;
struct list_head node;
};
struct skl_mic_sel_config {
u16 mic_switch;
u16 flags;
u16 blob[SKL_MIC_MAX_CH_SUPPORT][SKL_MIC_MAX_CH_SUPPORT];
} __packed;
enum skl_channel {
SKL_CH_MONO = 1,
SKL_CH_STEREO = 2,
SKL_CH_TRIO = 3,
SKL_CH_QUATRO = 4,
};
static inline struct skl_dev *get_skl_ctx(struct device *dev)
{
struct hdac_bus *bus = dev_get_drvdata(dev);
return bus_to_skl(bus);
}
int skl_tplg_be_update_params(struct snd_soc_dai *dai,
struct skl_pipe_params *params);
int skl_dsp_set_dma_control(struct skl_dev *skl, u32 *caps,
u32 caps_size, u32 node_id);
void skl_tplg_set_be_dmic_config(struct snd_soc_dai *dai,
struct skl_pipe_params *params, int stream);
int skl_tplg_init(struct snd_soc_component *component,
struct hdac_bus *bus);
void skl_tplg_exit(struct snd_soc_component *component,
struct hdac_bus *bus);
struct skl_module_cfg *skl_tplg_fe_get_cpr_module(
struct snd_soc_dai *dai, int stream);
int skl_tplg_update_pipe_params(struct device *dev,
struct skl_module_cfg *mconfig, struct skl_pipe_params *params);
void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps);
void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps);
int skl_create_pipeline(struct skl_dev *skl, struct skl_pipe *pipe);
int skl_run_pipe(struct skl_dev *skl, struct skl_pipe *pipe);
int skl_pause_pipe(struct skl_dev *skl, struct skl_pipe *pipe);
int skl_delete_pipe(struct skl_dev *skl, struct skl_pipe *pipe);
int skl_stop_pipe(struct skl_dev *skl, struct skl_pipe *pipe);
int skl_reset_pipe(struct skl_dev *skl, struct skl_pipe *pipe);
int skl_init_module(struct skl_dev *skl, struct skl_module_cfg *mconfig);
int skl_bind_modules(struct skl_dev *skl, struct skl_module_cfg
*src_mcfg, struct skl_module_cfg *dst_mcfg);
int skl_unbind_modules(struct skl_dev *skl, struct skl_module_cfg
*src_mcfg, struct skl_module_cfg *dst_mcfg);
int skl_set_module_params(struct skl_dev *skl, u32 *params, int size,
u32 param_id, struct skl_module_cfg *mcfg);
int skl_get_module_params(struct skl_dev *skl, u32 *params, int size,
u32 param_id, struct skl_module_cfg *mcfg);
struct skl_module_cfg *skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai,
int stream);
enum skl_bitdepth skl_get_bit_depth(int params);
int skl_pcm_host_dma_prepare(struct device *dev,
struct skl_pipe_params *params);
int skl_pcm_link_dma_prepare(struct device *dev,
struct skl_pipe_params *params);
int skl_dai_load(struct snd_soc_component *cmp, int index,
struct snd_soc_dai_driver *dai_drv,
struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai);
void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl,
struct snd_soc_dapm_widget *w);
#endif

File diff suppressed because it is too large Load diff

View file

@ -1,207 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* skl.h - HD Audio skylake definitions.
*
* Copyright (C) 2015 Intel Corp
* Author: Jeeja KP <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef __SOUND_SOC_SKL_H
#define __SOUND_SOC_SKL_H
#include <sound/hda_register.h>
#include <sound/hdaudio_ext.h>
#include <sound/hda_codec.h>
#include <sound/soc.h>
#include "skl-ssp-clk.h"
#include "skl-sst-ipc.h"
#define SKL_SUSPEND_DELAY 2000
#define SKL_MAX_ASTATE_CFG 3
#define AZX_PCIREG_PGCTL 0x44
#define AZX_PGCTL_LSRMD_MASK (1 << 4)
#define AZX_PGCTL_ADSPPGD BIT(2)
#define AZX_PCIREG_CGCTL 0x48
#define AZX_CGCTL_MISCBDCGE_MASK (1 << 6)
#define AZX_CGCTL_ADSPDCGE BIT(1)
/* D0I3C Register fields */
#define AZX_REG_VS_D0I3C_CIP 0x1 /* Command in progress */
#define AZX_REG_VS_D0I3C_I3 0x4 /* D0i3 enable */
#define SKL_MAX_DMACTRL_CFG 18
#define DMA_CLK_CONTROLS 1
#define DMA_TRANSMITION_START 2
#define DMA_TRANSMITION_STOP 3
#define AZX_VS_EM2_DUM BIT(23)
#define AZX_REG_VS_EM2_L1SEN BIT(13)
struct skl_debug;
struct skl_astate_param {
u32 kcps;
u32 clk_src;
};
struct skl_astate_config {
u32 count;
struct skl_astate_param astate_table[];
};
struct skl_fw_config {
struct skl_astate_config *astate_cfg;
};
struct skl_dev {
struct hda_bus hbus;
struct pci_dev *pci;
unsigned int init_done:1; /* delayed init status */
struct platform_device *dmic_dev;
struct platform_device *i2s_dev;
struct platform_device *clk_dev;
struct snd_soc_component *component;
struct snd_soc_dai_driver *dais;
struct nhlt_acpi_table *nhlt; /* nhlt ptr */
struct list_head ppl_list;
struct list_head bind_list;
const char *fw_name;
char tplg_name[64];
unsigned short pci_id;
int supend_active;
struct work_struct probe_work;
struct skl_debug *debugfs;
u8 nr_modules;
struct skl_module **modules;
bool use_tplg_pcm;
struct skl_fw_config cfg;
struct snd_soc_acpi_mach *mach;
struct device *dev;
struct sst_dsp *dsp;
/* boot */
wait_queue_head_t boot_wait;
bool boot_complete;
/* module load */
wait_queue_head_t mod_load_wait;
bool mod_load_complete;
bool mod_load_status;
/* IPC messaging */
struct sst_generic_ipc ipc;
/* callback for miscbdge */
void (*enable_miscbdcge)(struct device *dev, bool enable);
/* Is CGCTL.MISCBDCGE disabled */
bool miscbdcg_disabled;
/* Populate module information */
struct list_head uuid_list;
/* Is firmware loaded */
bool fw_loaded;
/* first boot ? */
bool is_first_boot;
/* multi-core */
struct skl_dsp_cores cores;
/* library info */
struct skl_lib_info lib_info[SKL_MAX_LIB];
int lib_count;
/* Callback to update D0i3C register */
void (*update_d0i3c)(struct device *dev, bool enable);
struct skl_d0i3_data d0i3;
const struct skl_dsp_ops *dsp_ops;
/* Callback to update dynamic clock and power gating registers */
void (*clock_power_gating)(struct device *dev, bool enable);
};
#define skl_to_bus(s) (&(s)->hbus.core)
#define bus_to_skl(bus) container_of(bus, struct skl_dev, hbus.core)
#define skl_to_hbus(s) (&(s)->hbus)
#define hbus_to_skl(hbus) container_of((hbus), struct skl_dev, (hbus))
/* to pass dai dma data */
struct skl_dma_params {
u32 format;
u8 stream_tag;
};
struct skl_machine_pdata {
bool use_tplg_pcm; /* use dais and dai links from topology */
};
struct skl_dsp_ops {
int id;
unsigned int num_cores;
struct skl_dsp_loader_ops (*loader_ops)(void);
int (*init)(struct device *dev, void __iomem *mmio_base,
int irq, const char *fw_name,
struct skl_dsp_loader_ops loader_ops,
struct skl_dev **skl_sst);
int (*init_fw)(struct device *dev, struct skl_dev *skl);
void (*cleanup)(struct device *dev, struct skl_dev *skl);
};
int skl_platform_unregister(struct device *dev);
int skl_platform_register(struct device *dev);
int skl_nhlt_update_topology_bin(struct skl_dev *skl);
int skl_init_dsp(struct skl_dev *skl);
int skl_free_dsp(struct skl_dev *skl);
int skl_suspend_late_dsp(struct skl_dev *skl);
int skl_suspend_dsp(struct skl_dev *skl);
int skl_resume_dsp(struct skl_dev *skl);
void skl_cleanup_resources(struct skl_dev *skl);
const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id);
void skl_update_d0i3c(struct device *dev, bool enable);
int skl_nhlt_create_sysfs(struct skl_dev *skl);
void skl_nhlt_remove_sysfs(struct skl_dev *skl);
void skl_get_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks);
struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id);
int skl_dsp_set_dma_control(struct skl_dev *skl, u32 *caps,
u32 caps_size, u32 node_id);
struct skl_module_cfg;
#ifdef CONFIG_DEBUG_FS
struct skl_debug *skl_debugfs_init(struct skl_dev *skl);
void skl_debugfs_exit(struct skl_dev *skl);
void skl_debug_init_module(struct skl_debug *d,
struct snd_soc_dapm_widget *w,
struct skl_module_cfg *mconfig);
#else
static inline struct skl_debug *skl_debugfs_init(struct skl_dev *skl)
{
return NULL;
}
static inline void skl_debugfs_exit(struct skl_dev *skl)
{}
static inline void skl_debug_init_module(struct skl_debug *d,
struct snd_soc_dapm_widget *w,
struct skl_module_cfg *mconfig)
{}
#endif
#endif /* __SOUND_SOC_SKL_H */