about summary refs log tree commit diff
path: root/users/flokli/ipu6-softisp/kernel/softisp.patch
From 4e34bb68beec288e6fbc71170ff6d3ed39b76ce6 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:15 +0800
Subject: [PATCH 01/31] media: intel/ipu6: add Intel IPU6 PCI device driver

Intel Image Processing Unit 6th Gen includes input and processing systems
but the hardware presents itself as a single PCI device in system.

IPU6 PCI device driver basically does PCI configurations and load
the firmware binary, initialises IPU virtual bus, and sets up platform
specific variants to support multiple IPU6 devices in single device
driver.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-2-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 .../media/pci/intel/ipu6/ipu6-platform-regs.h | 179 ++++
 drivers/media/pci/intel/ipu6/ipu6.c           | 966 ++++++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6.h           | 356 +++++++
 3 files changed, 1501 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-platform-regs.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h
new file mode 100644
index 000000000000..cae26009c9fc
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2018 - 2023 Intel Corporation */
+
+#ifndef IPU6_PLATFORM_REGS_H
+#define IPU6_PLATFORM_REGS_H
+
+#include <linux/bits.h>
+
+/*
+ * IPU6 uses uniform address within IPU6, therefore all subsystem registers
+ * locates in one single space starts from 0 but in different sctions with
+ * different addresses, the subsystem offsets are defined to 0 as the
+ * register definition will have the address offset to 0.
+ */
+#define IPU6_UNIFIED_OFFSET			0
+
+#define IPU6_ISYS_IOMMU0_OFFSET		0x2e0000
+#define IPU6_ISYS_IOMMU1_OFFSET		0x2e0500
+#define IPU6_ISYS_IOMMUI_OFFSET		0x2e0a00
+
+#define IPU6_PSYS_IOMMU0_OFFSET		0x1b0000
+#define IPU6_PSYS_IOMMU1_OFFSET		0x1b0700
+#define IPU6_PSYS_IOMMU1R_OFFSET	0x1b0e00
+#define IPU6_PSYS_IOMMUI_OFFSET		0x1b1500
+
+/* the offset from IOMMU base register */
+#define IPU6_MMU_L1_STREAM_ID_REG_OFFSET	0x0c
+#define IPU6_MMU_L2_STREAM_ID_REG_OFFSET	0x4c
+#define IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET	0x8c
+
+#define IPU6_MMU_INFO_OFFSET		0x8
+
+#define IPU6_ISYS_SPC_OFFSET		0x210000
+
+#define IPU6SE_PSYS_SPC_OFFSET		0x110000
+#define IPU6_PSYS_SPC_OFFSET		0x118000
+
+#define IPU6_ISYS_DMEM_OFFSET		0x200000
+#define IPU6_PSYS_DMEM_OFFSET		0x100000
+
+#define IPU6_REG_ISYS_UNISPART_IRQ_EDGE			0x27c000
+#define IPU6_REG_ISYS_UNISPART_IRQ_MASK			0x27c004
+#define IPU6_REG_ISYS_UNISPART_IRQ_STATUS		0x27c008
+#define IPU6_REG_ISYS_UNISPART_IRQ_CLEAR		0x27c00c
+#define IPU6_REG_ISYS_UNISPART_IRQ_ENABLE		0x27c010
+#define IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE	0x27c014
+#define IPU6_REG_ISYS_UNISPART_SW_IRQ_REG		0x27c414
+#define IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG		0x27c418
+#define IPU6_ISYS_UNISPART_IRQ_CSI0			BIT(2)
+#define IPU6_ISYS_UNISPART_IRQ_CSI1			BIT(3)
+#define IPU6_ISYS_UNISPART_IRQ_SW			BIT(22)
+
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_EDGE			0x2b0200
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_MASK			0x2b0204
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_STATUS		0x2b0208
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_CLEAR			0x2b020c
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_ENABLE		0x2b0210
+#define IPU6_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE	0x2b0214
+
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_EDGE			0x2d2100
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_MASK			0x2d2104
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_STATUS		0x2d2108
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_CLEAR		0x2d210c
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_ENABLE		0x2d2110
+#define IPU6_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE	0x2d2114
+
+/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */
+#define IPU6_REG_ISYS_CDC_THRESHOLD(i)		(0x27c400 + ((i) * 4))
+
+#define IPU6_CSI_IRQ_NUM_PER_PIPE			4
+#define IPU6SE_ISYS_CSI_PORT_NUM			4
+#define IPU6_ISYS_CSI_PORT_NUM				8
+
+#define IPU6_ISYS_CSI_PORT_IRQ(irq_num)		BIT(irq_num)
+
+/* PKG DIR OFFSET in IMR in secure mode */
+#define IPU6_PKG_DIR_IMR_OFFSET			0x40
+
+#define IPU6_ISYS_REG_SPC_STATUS_CTRL		0x0
+
+#define IPU6_ISYS_SPC_STATUS_START			BIT(1)
+#define IPU6_ISYS_SPC_STATUS_RUN			BIT(3)
+#define IPU6_ISYS_SPC_STATUS_READY			BIT(5)
+#define IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE	BIT(12)
+#define IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH		BIT(13)
+
+#define IPU6_PSYS_REG_SPC_STATUS_CTRL			0x0
+#define IPU6_PSYS_REG_SPC_START_PC			0x4
+#define IPU6_PSYS_REG_SPC_ICACHE_BASE			0x10
+#define IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER	0x14
+
+#define IPU6_PSYS_SPC_STATUS_START			BIT(1)
+#define IPU6_PSYS_SPC_STATUS_RUN			BIT(3)
+#define IPU6_PSYS_SPC_STATUS_READY			BIT(5)
+#define IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE	BIT(12)
+#define IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH		BIT(13)
+
+#define IPU6_PSYS_REG_SPP0_STATUS_CTRL			0x20000
+
+#define IPU6_INFO_ENABLE_SNOOP			BIT(0)
+#define IPU6_INFO_DEC_FORCE_FLUSH		BIT(1)
+#define IPU6_INFO_DEC_PASS_THROUGH		BIT(2)
+#define IPU6_INFO_ZLW				BIT(3)
+#define IPU6_INFO_REQUEST_DESTINATION_IOSF	BIT(9)
+#define IPU6_INFO_IMR_BASE			BIT(10)
+#define IPU6_INFO_IMR_DESTINED			BIT(11)
+
+#define IPU6_INFO_REQUEST_DESTINATION_PRIMARY IPU6_INFO_REQUEST_DESTINATION_IOSF
+
+/*
+ * s2m_pixel_soc_pixel_remapping is dedicated for the enabling of the
+ * pixel s2m remp ability.Remap here  means that s2m rearange the order
+ * of the pixels in each 4 pixels group.
+ * For examle, mirroring remping means that if input's 4 first pixels
+ * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels.
+ * 0xE4 is from s2m MAS document. It means no remapping.
+ */
+#define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4
+/*
+ * csi_be_soc_pixel_remapping is for the enabling of the pixel remapping.
+ * This remapping is exactly like the stream2mmio remapping.
+ */
+#define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING    0xe4
+
+#define IPU6_REG_DMA_TOP_AB_GROUP1_BASE_ADDR		0x1ae000
+#define IPU6_REG_DMA_TOP_AB_GROUP2_BASE_ADDR		0x1af000
+#define IPU6_REG_DMA_TOP_AB_RING_MIN_OFFSET(n)		(0x4 + (n) * 0xc)
+#define IPU6_REG_DMA_TOP_AB_RING_MAX_OFFSET(n)		(0x8 + (n) * 0xc)
+#define IPU6_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n)	(0xc + (n) * 0xc)
+
+enum ipu6_device_ab_group1_target_id {
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R16_GP,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER,
+	IPU6_DEVICE_AB_GROUP1_TARGET_ID_R18_AB,
+};
+
+enum nci_ab_access_mode {
+	NCI_AB_ACCESS_MODE_RW,	/* read & write */
+	NCI_AB_ACCESS_MODE_RO,	/* read only */
+	NCI_AB_ACCESS_MODE_WO,	/* write only */
+	NCI_AB_ACCESS_MODE_NA,	/* No access at all */
+};
+
+/* IRQ-related registers in PSYS */
+#define IPU6_REG_PSYS_GPDEV_IRQ_EDGE		0x1aa200
+#define IPU6_REG_PSYS_GPDEV_IRQ_MASK		0x1aa204
+#define IPU6_REG_PSYS_GPDEV_IRQ_STATUS		0x1aa208
+#define IPU6_REG_PSYS_GPDEV_IRQ_CLEAR		0x1aa20c
+#define IPU6_REG_PSYS_GPDEV_IRQ_ENABLE		0x1aa210
+#define IPU6_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE	0x1aa214
+/* There are 8 FW interrupts, n = 0..7 */
+#define IPU6_PSYS_GPDEV_FWIRQ0			5
+#define IPU6_PSYS_GPDEV_FWIRQ1			6
+#define IPU6_PSYS_GPDEV_FWIRQ2			7
+#define IPU6_PSYS_GPDEV_FWIRQ3			8
+#define IPU6_PSYS_GPDEV_FWIRQ4			9
+#define IPU6_PSYS_GPDEV_FWIRQ5			10
+#define IPU6_PSYS_GPDEV_FWIRQ6			11
+#define IPU6_PSYS_GPDEV_FWIRQ7			12
+#define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n)		BIT(n)
+#define IPU6_REG_PSYS_GPDEV_FWIRQ(n)		(4 * (n) + 0x1aa100)
+
+#endif /* IPU6_PLATFORM_REGS_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
new file mode 100644
index 000000000000..abd40a783729
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6.c
@@ -0,0 +1,966 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci-ats.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <media/ipu-bridge.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-buttress.h"
+#include "ipu6-cpd.h"
+#include "ipu6-isys.h"
+#include "ipu6-mmu.h"
+#include "ipu6-platform-buttress-regs.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+#include "ipu6-platform-regs.h"
+
+#define IPU6_PCI_BAR		0
+
+struct ipu6_cell_program {
+	u32 magic_number;
+
+	u32 blob_offset;
+	u32 blob_size;
+
+	u32 start[3];
+
+	u32 icache_source;
+	u32 icache_target;
+	u32 icache_size;
+
+	u32 pmem_source;
+	u32 pmem_target;
+	u32 pmem_size;
+
+	u32 data_source;
+	u32 data_target;
+	u32 data_size;
+
+	u32 bss_target;
+	u32 bss_size;
+
+	u32 cell_id;
+	u32 regs_addr;
+
+	u32 cell_pmem_data_bus_address;
+	u32 cell_dmem_data_bus_address;
+	u32 cell_pmem_control_bus_address;
+	u32 cell_dmem_control_bus_address;
+
+	u32 next;
+	u32 dummy[2];
+};
+
+static u32 ipu6se_csi_offsets[] = {
+	IPU6_CSI_PORT_A_ADDR_OFFSET,
+	IPU6_CSI_PORT_B_ADDR_OFFSET,
+	IPU6_CSI_PORT_C_ADDR_OFFSET,
+	IPU6_CSI_PORT_D_ADDR_OFFSET
+};
+
+/*
+ * IPU6 on TGL support maximum 8 csi2 ports
+ * IPU6SE on JSL and IPU6EP on ADL support maximum 4 csi2 ports
+ * IPU6EP on MTL support maximum 6 csi2 ports
+ */
+static u32 ipu6_tgl_csi_offsets[] = {
+	IPU6_CSI_PORT_A_ADDR_OFFSET,
+	IPU6_CSI_PORT_B_ADDR_OFFSET,
+	IPU6_CSI_PORT_C_ADDR_OFFSET,
+	IPU6_CSI_PORT_D_ADDR_OFFSET,
+	IPU6_CSI_PORT_E_ADDR_OFFSET,
+	IPU6_CSI_PORT_F_ADDR_OFFSET,
+	IPU6_CSI_PORT_G_ADDR_OFFSET,
+	IPU6_CSI_PORT_H_ADDR_OFFSET
+};
+
+static u32 ipu6ep_mtl_csi_offsets[] = {
+	IPU6_CSI_PORT_A_ADDR_OFFSET,
+	IPU6_CSI_PORT_B_ADDR_OFFSET,
+	IPU6_CSI_PORT_C_ADDR_OFFSET,
+	IPU6_CSI_PORT_D_ADDR_OFFSET,
+	IPU6_CSI_PORT_E_ADDR_OFFSET,
+	IPU6_CSI_PORT_F_ADDR_OFFSET
+};
+
+static u32 ipu6_csi_offsets[] = {
+	IPU6_CSI_PORT_A_ADDR_OFFSET,
+	IPU6_CSI_PORT_B_ADDR_OFFSET,
+	IPU6_CSI_PORT_C_ADDR_OFFSET,
+	IPU6_CSI_PORT_D_ADDR_OFFSET
+};
+
+static struct ipu6_isys_internal_pdata isys_ipdata = {
+	.hw_variant = {
+		.offset = IPU6_UNIFIED_OFFSET,
+		.nr_mmus = 3,
+		.mmu_hw = {
+			{
+				.offset = IPU6_ISYS_IOMMU0_OFFSET,
+				.info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF,
+				.nr_l1streams = 16,
+				.l1_block_sz = {
+					3, 8, 2, 2, 2, 2, 2, 2, 1, 1,
+					1, 1, 1, 1, 1, 1
+				},
+				.nr_l2streams = 16,
+				.l2_block_sz = {
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+					2, 2, 2, 2, 2, 2
+				},
+				.insert_read_before_invalidate = false,
+				.l1_stream_id_reg_offset =
+				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+				.l2_stream_id_reg_offset =
+				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
+			},
+			{
+				.offset = IPU6_ISYS_IOMMU1_OFFSET,
+				.info_bits = 0,
+				.nr_l1streams = 16,
+				.l1_block_sz = {
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+					2, 2, 2, 1, 1, 4
+				},
+				.nr_l2streams = 16,
+				.l2_block_sz = {
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+					2, 2, 2, 2, 2, 2
+				},
+				.insert_read_before_invalidate = false,
+				.l1_stream_id_reg_offset =
+				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+				.l2_stream_id_reg_offset =
+				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
+			},
+			{
+				.offset = IPU6_ISYS_IOMMUI_OFFSET,
+				.info_bits = 0,
+				.nr_l1streams = 0,
+				.nr_l2streams = 0,
+				.insert_read_before_invalidate = false,
+			},
+		},
+		.cdc_fifos = 3,
+		.cdc_fifo_threshold = {6, 8, 2},
+		.dmem_offset = IPU6_ISYS_DMEM_OFFSET,
+		.spc_offset = IPU6_ISYS_SPC_OFFSET,
+	},
+	.isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN,
+};
+
+static struct ipu6_psys_internal_pdata psys_ipdata = {
+	.hw_variant = {
+		.offset = IPU6_UNIFIED_OFFSET,
+		.nr_mmus = 4,
+		.mmu_hw = {
+			{
+				.offset = IPU6_PSYS_IOMMU0_OFFSET,
+				.info_bits =
+				IPU6_INFO_REQUEST_DESTINATION_IOSF,
+				.nr_l1streams = 16,
+				.l1_block_sz = {
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+					2, 2, 2, 2, 2, 2
+				},
+				.nr_l2streams = 16,
+				.l2_block_sz = {
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+					2, 2, 2, 2, 2, 2
+				},
+				.insert_read_before_invalidate = false,
+				.l1_stream_id_reg_offset =
+				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+				.l2_stream_id_reg_offset =
+				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
+			},
+			{
+				.offset = IPU6_PSYS_IOMMU1_OFFSET,
+				.info_bits = 0,
+				.nr_l1streams = 32,
+				.l1_block_sz = {
+					1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+					2, 2, 2, 2, 2, 10,
+					5, 4, 14, 6, 4, 14, 6, 4, 8,
+					4, 2, 1, 1, 1, 1, 14
+				},
+				.nr_l2streams = 32,
+				.l2_block_sz = {
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+					2, 2, 2, 2, 2, 2,
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+					2, 2, 2, 2, 2, 2
+				},
+				.insert_read_before_invalidate = false,
+				.l1_stream_id_reg_offset =
+				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+				.l2_stream_id_reg_offset =
+				IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET,
+			},
+			{
+				.offset = IPU6_PSYS_IOMMU1R_OFFSET,
+				.info_bits = 0,
+				.nr_l1streams = 16,
+				.l1_block_sz = {
+					1, 4, 4, 4, 4, 16, 8, 4, 32,
+					16, 16, 2, 2, 2, 1, 12
+				},
+				.nr_l2streams = 16,
+				.l2_block_sz = {
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+					2, 2, 2, 2, 2, 2
+				},
+				.insert_read_before_invalidate = false,
+				.l1_stream_id_reg_offset =
+				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
+				.l2_stream_id_reg_offset =
+				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
+			},
+			{
+				.offset = IPU6_PSYS_IOMMUI_OFFSET,
+				.info_bits = 0,
+				.nr_l1streams = 0,
+				.nr_l2streams = 0,
+				.insert_read_before_invalidate = false,
+			},
+		},
+		.dmem_offset = IPU6_PSYS_DMEM_OFFSET,
+	},
+};
+
+static const struct ipu6_buttress_ctrl isys_buttress_ctrl = {
+	.ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO,
+	.qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
+	.freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL,
+	.pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
+	.pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK,
+	.pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
+	.pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
+};
+
+static const struct ipu6_buttress_ctrl psys_buttress_ctrl = {
+	.ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO,
+	.qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
+	.freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL,
+	.pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
+	.pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK,
+	.pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
+	.pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
+};
+
+static void
+ipu6_pkg_dir_configure_spc(struct ipu6_device *isp,
+			   const struct ipu6_hw_variants *hw_variant,
+			   int pkg_dir_idx, void __iomem *base,
+			   u64 *pkg_dir, dma_addr_t pkg_dir_vied_address)
+{
+	struct ipu6_cell_program *prog;
+	void __iomem *spc_base;
+	u32 server_fw_addr;
+	dma_addr_t dma_addr;
+	u32 pg_offset;
+
+	server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2));
+	if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX)
+		dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl);
+	else
+		dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl);
+
+	pg_offset = server_fw_addr - dma_addr;
+	prog = (struct ipu6_cell_program *)((u64)isp->cpd_fw->data + pg_offset);
+	spc_base = base + prog->regs_addr;
+	if (spc_base != (base + hw_variant->spc_offset))
+		dev_warn(&isp->pdev->dev,
+			 "SPC reg addr %p not matching value from CPD %p\n",
+			 base + hw_variant->spc_offset, spc_base);
+	writel(server_fw_addr + prog->blob_offset +
+	       prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE);
+	writel(IPU6_INFO_REQUEST_DESTINATION_IOSF,
+	       spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
+	writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC);
+	writel(pkg_dir_vied_address, base + hw_variant->dmem_offset);
+}
+
+void ipu6_configure_spc(struct ipu6_device *isp,
+			const struct ipu6_hw_variants *hw_variant,
+			int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
+			dma_addr_t pkg_dir_dma_addr)
+{
+	void __iomem *dmem_base = base + hw_variant->dmem_offset;
+	void __iomem *spc_regs_base = base + hw_variant->spc_offset;
+	u32 val;
+
+	val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
+	val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+	writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
+
+	if (isp->secure_mode)
+		writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base);
+	else
+		ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base,
+					   pkg_dir, pkg_dir_dma_addr);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6);
+
+static void ipu6_internal_pdata_init(struct ipu6_device *isp)
+{
+	u8 hw_ver = isp->hw_ver;
+
+	isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS;
+	isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT;
+	isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE;
+	isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE;
+	isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START;
+	isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END;
+	isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS;
+	isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES;
+	isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX;
+	isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE;
+	isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6_csi_offsets);
+	isys_ipdata.csi2.offsets = ipu6_csi_offsets;
+	isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK;
+	isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
+	isys_ipdata.csi2.ctrl0_irq_clear =
+		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
+	isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
+	isys_ipdata.csi2.ctrl0_irq_enable =
+		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
+	isys_ipdata.csi2.ctrl0_irq_status =
+		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
+	isys_ipdata.csi2.ctrl0_irq_lnp =
+		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
+	isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver);
+	psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET;
+	isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS;
+
+	if (is_ipu6ep(hw_ver)) {
+		isys_ipdata.ltr = IPU6EP_LTR_VALUE;
+		isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH;
+	}
+
+	if (is_ipu6_tgl(hw_ver)) {
+		isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6_tgl_csi_offsets);
+		isys_ipdata.csi2.offsets = ipu6_tgl_csi_offsets;
+	}
+
+	if (is_ipu6ep_mtl(hw_ver)) {
+		isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6ep_mtl_csi_offsets);
+		isys_ipdata.csi2.offsets = ipu6ep_mtl_csi_offsets;
+
+		isys_ipdata.csi2.ctrl0_irq_edge =
+			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
+		isys_ipdata.csi2.ctrl0_irq_clear =
+			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
+		isys_ipdata.csi2.ctrl0_irq_mask =
+			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
+		isys_ipdata.csi2.ctrl0_irq_enable =
+			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
+		isys_ipdata.csi2.ctrl0_irq_lnp =
+			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
+		isys_ipdata.csi2.ctrl0_irq_status =
+			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
+		isys_ipdata.csi2.fw_access_port_ofs =
+			CSI_REG_HUB_FW_ACCESS_PORT_V6OFS;
+		isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE;
+		isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH;
+	}
+
+	if (is_ipu6se(hw_ver)) {
+		isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6se_csi_offsets);
+		isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK;
+		isys_ipdata.csi2.offsets = ipu6se_csi_offsets;
+		isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS;
+		isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT;
+		isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE;
+		isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE;
+		isys_ipdata.sensor_type_start =
+			IPU6SE_FW_ISYS_SENSOR_TYPE_START;
+		isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END;
+		isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS;
+		isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES;
+		isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX;
+		isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE;
+		psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET;
+	}
+}
+
+static int ipu6_isys_check_fwnode_graph(struct fwnode_handle *fwnode)
+{
+	struct fwnode_handle *endpoint;
+
+	if (IS_ERR_OR_NULL(fwnode))
+		return -EINVAL;
+
+	endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
+	if (endpoint) {
+		fwnode_handle_put(endpoint);
+		return 0;
+	}
+
+	return ipu6_isys_check_fwnode_graph(fwnode->secondary);
+}
+
+static struct ipu6_bus_device *
+ipu6_isys_init(struct pci_dev *pdev, struct device *parent,
+	       struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
+	       const struct ipu6_isys_internal_pdata *ipdata)
+{
+	struct device *dev = &pdev->dev;
+	struct fwnode_handle *fwnode = dev_fwnode(dev);
+	struct ipu6_bus_device *isys_adev;
+	struct ipu6_isys_pdata *pdata;
+	int ret;
+
+	/* check fwnode at first, fallback into bridge if no fwnode graph */
+	ret = ipu6_isys_check_fwnode_graph(fwnode);
+	if (ret) {
+		if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
+			dev_err(dev,
+				"fwnode graph has no endpoints connection\n");
+			return ERR_PTR(-EINVAL);
+		}
+
+		ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
+		if (ret) {
+			dev_err_probe(dev, ret, "IPU6 bridge init failed\n");
+			return ERR_PTR(ret);
+		}
+	}
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->base = base;
+	pdata->ipdata = ipdata;
+
+	isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
+					       IPU6_ISYS_NAME);
+	if (IS_ERR(isys_adev)) {
+		dev_err_probe(dev, PTR_ERR(isys_adev),
+			      "ipu6_bus_initialize_device isys failed\n");
+		kfree(pdata);
+		return ERR_CAST(isys_adev);
+	}
+
+	isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID,
+				       &ipdata->hw_variant);
+	if (IS_ERR(isys_adev->mmu)) {
+		dev_err_probe(dev, PTR_ERR(isys_adev),
+			      "ipu6_mmu_init(isys_adev->mmu) failed\n");
+		put_device(&isys_adev->auxdev.dev);
+		kfree(pdata);
+		return ERR_CAST(isys_adev->mmu);
+	}
+
+	isys_adev->mmu->dev = &isys_adev->auxdev.dev;
+
+	ret = ipu6_bus_add_device(isys_adev);
+	if (ret) {
+		kfree(pdata);
+		return ERR_PTR(ret);
+	}
+
+	return isys_adev;
+}
+
+static struct ipu6_bus_device *
+ipu6_psys_init(struct pci_dev *pdev, struct device *parent,
+	       struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
+	       const struct ipu6_psys_internal_pdata *ipdata)
+{
+	struct ipu6_bus_device *psys_adev;
+	struct ipu6_psys_pdata *pdata;
+	int ret;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->base = base;
+	pdata->ipdata = ipdata;
+
+	psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
+					       IPU6_PSYS_NAME);
+	if (IS_ERR(psys_adev)) {
+		dev_err_probe(&pdev->dev, PTR_ERR(psys_adev),
+			      "ipu6_bus_initialize_device psys failed\n");
+		kfree(pdata);
+		return ERR_CAST(psys_adev);
+	}
+
+	psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID,
+				       &ipdata->hw_variant);
+	if (IS_ERR(psys_adev->mmu)) {
+		dev_err_probe(&pdev->dev, PTR_ERR(psys_adev),
+			      "ipu6_mmu_init(psys_adev->mmu) failed\n");
+		put_device(&psys_adev->auxdev.dev);
+		kfree(pdata);
+		return ERR_CAST(psys_adev->mmu);
+	}
+
+	psys_adev->mmu->dev = &psys_adev->auxdev.dev;
+
+	ret = ipu6_bus_add_device(psys_adev);
+	if (ret) {
+		kfree(pdata);
+		return ERR_PTR(ret);
+	}
+
+	return psys_adev;
+}
+
+static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver)
+{
+	int ret;
+
+	/* disable IPU6 PCI ATS on mtl ES2 */
+	if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 &&
+	    pci_ats_supported(dev))
+		pci_disable_ats(dev);
+
+	/* No PCI msi capability for IPU6EP */
+	if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) {
+		/* likely do nothing as msi not enabled by default */
+		pci_disable_msi(dev);
+		return 0;
+	}
+
+	ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI);
+	if (ret < 0)
+		return dev_err_probe(&dev->dev, ret, "Request msi failed");
+
+	return 0;
+}
+
+static void ipu6_configure_vc_mechanism(struct ipu6_device *isp)
+{
+	u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
+
+	if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
+		val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
+	else
+		val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
+
+	if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
+		val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
+	else
+		val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
+
+	writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL);
+}
+
+static int request_cpd_fw(const struct firmware **firmware_p, const char *name,
+			  struct device *device)
+{
+	const struct firmware *fw;
+	struct firmware *dst;
+	int ret = 0;
+
+	ret = request_firmware(&fw, name, device);
+	if (ret)
+		return ret;
+
+	if (is_vmalloc_addr(fw->data)) {
+		*firmware_p = fw;
+		return 0;
+	}
+
+	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+	if (!dst) {
+		ret = -ENOMEM;
+		goto release_firmware;
+	}
+
+	dst->size = fw->size;
+	dst->data = vmalloc(fw->size);
+	if (!dst->data) {
+		kfree(dst);
+		ret = -ENOMEM;
+		goto release_firmware;
+	}
+
+	memcpy((void *)dst->data, fw->data, fw->size);
+	*firmware_p = dst;
+
+release_firmware:
+	release_firmware(fw);
+
+	return ret;
+}
+
+static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL;
+	struct device *dev = &pdev->dev;
+	void __iomem *isys_base = NULL;
+	void __iomem *psys_base = NULL;
+	struct ipu6_device *isp;
+	phys_addr_t phys;
+	u32 val, version, sku_id;
+	int ret;
+
+	isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
+	if (!isp)
+		return -ENOMEM;
+
+	isp->pdev = pdev;
+	INIT_LIST_HEAD(&isp->devices);
+
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return dev_err_probe(dev, ret, "Enable PCI device failed\n");
+
+	phys = pci_resource_start(pdev, IPU6_PCI_BAR);
+	dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys);
+
+	ret = pcim_iomap_regions(pdev, 1 << IPU6_PCI_BAR, pci_name(pdev));
+	if (ret)
+		return dev_err_probe(dev, ret, "Failed to I/O mem remappinp\n");
+
+	isp->base = pcim_iomap_table(pdev)[IPU6_PCI_BAR];
+	pci_set_drvdata(pdev, isp);
+	pci_set_master(pdev);
+
+	isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt);
+	switch (id->device) {
+	case PCI_DEVICE_ID_INTEL_IPU6:
+		isp->hw_ver = IPU6_VER_6;
+		isp->cpd_fw_name = IPU6_FIRMWARE_NAME;
+		break;
+	case PCI_DEVICE_ID_INTEL_IPU6SE:
+		isp->hw_ver = IPU6_VER_6SE;
+		isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME;
+		isp->cpd_metadata_cmpnt_size =
+			sizeof(struct ipu6se_cpd_metadata_cmpnt);
+		break;
+	case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP:
+	case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN:
+	case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP:
+		isp->hw_ver = IPU6_VER_6EP;
+		isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME;
+		break;
+	case PCI_DEVICE_ID_INTEL_IPU6EP_MTL:
+		isp->hw_ver = IPU6_VER_6EP_MTL;
+		isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME;
+		break;
+	default:
+		return dev_err_probe(dev, -ENODEV,
+				     "Unsupported IPU6 device %x\n",
+				     id->device);
+	}
+
+	ipu6_internal_pdata_init(isp);
+
+	isys_base = isp->base + isys_ipdata.hw_variant.offset;
+	psys_base = isp->base + psys_ipdata.hw_variant.offset;
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
+	if (ret)
+		return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
+
+	ret = dma_set_max_seg_size(dev, UINT_MAX);
+	if (ret)
+		return dev_err_probe(dev, ret, "Failed to set max_seg_size\n");
+
+	ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
+	if (ret)
+		return ret;
+
+	ret = ipu6_buttress_init(isp);
+	if (ret)
+		return ret;
+
+	ret = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name, dev);
+	if (ret) {
+		dev_err_probe(&isp->pdev->dev, ret,
+			      "Requesting signed firmware %s failed\n",
+			      isp->cpd_fw_name);
+		goto buttress_exit;
+	}
+
+	ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
+					 isp->cpd_fw->size);
+	if (ret) {
+		dev_err_probe(&isp->pdev->dev, ret,
+			      "Failed to validate cpd\n");
+		goto out_ipu6_bus_del_devices;
+	}
+
+	isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl,
+				 sizeof(isys_buttress_ctrl), GFP_KERNEL);
+	if (!isys_ctrl) {
+		ret = -ENOMEM;
+		goto out_ipu6_bus_del_devices;
+	}
+
+	isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base,
+				   &isys_ipdata);
+	if (IS_ERR(isp->isys)) {
+		ret = PTR_ERR(isp->isys);
+		goto out_ipu6_bus_del_devices;
+	}
+
+	psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl,
+				 sizeof(psys_buttress_ctrl), GFP_KERNEL);
+	if (!psys_ctrl) {
+		ret = -ENOMEM;
+		goto out_ipu6_bus_del_devices;
+	}
+
+	isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl,
+				   psys_base, &psys_ipdata);
+	if (IS_ERR(isp->psys)) {
+		ret = PTR_ERR(isp->psys);
+		goto out_ipu6_bus_del_devices;
+	}
+
+	ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
+	if (ret < 0)
+		goto out_ipu6_bus_del_devices;
+
+	ret = ipu6_mmu_hw_init(isp->psys->mmu);
+	if (ret) {
+		dev_err_probe(&isp->pdev->dev, ret,
+			      "Failed to set MMU hardware\n");
+		goto out_ipu6_bus_del_devices;
+	}
+
+	ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw,
+					 &isp->psys->fw_sgt);
+	if (ret) {
+		dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n");
+		goto out_ipu6_bus_del_devices;
+	}
+
+	ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data);
+	if (ret) {
+		dev_err_probe(&isp->pdev->dev, ret,
+			      "failed to create pkg dir\n");
+		goto out_ipu6_bus_del_devices;
+	}
+
+	ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr,
+					ipu6_buttress_isr_threaded,
+					IRQF_SHARED, IPU6_NAME, isp);
+	if (ret) {
+		dev_err_probe(dev, ret, "Requesting irq failed\n");
+		goto out_ipu6_bus_del_devices;
+	}
+
+	ret = ipu6_buttress_authenticate(isp);
+	if (ret) {
+		dev_err_probe(&isp->pdev->dev, ret,
+			      "FW authentication failed\n");
+		goto out_free_irq;
+	}
+
+	ipu6_mmu_hw_cleanup(isp->psys->mmu);
+	pm_runtime_put(&isp->psys->auxdev.dev);
+
+	/* Configure the arbitration mechanisms for VC requests */
+	ipu6_configure_vc_mechanism(isp);
+
+	val = readl(isp->base + BUTTRESS_REG_SKU);
+	sku_id = FIELD_GET(GENMASK(6, 4), val);
+	version = FIELD_GET(GENMASK(3, 0), val);
+	dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id,
+		 pdev->device, isp->hw_ver);
+
+	pm_runtime_put_noidle(dev);
+	pm_runtime_allow(dev);
+
+	isp->bus_ready_to_probe = true;
+
+	return 0;
+
+out_free_irq:
+	devm_free_irq(dev, pdev->irq, isp);
+out_ipu6_bus_del_devices:
+	if (isp->psys) {
+		ipu6_cpd_free_pkg_dir(isp->psys);
+		ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
+	}
+	if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu))
+		ipu6_mmu_cleanup(isp->psys->mmu);
+	if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu))
+		ipu6_mmu_cleanup(isp->isys->mmu);
+	ipu6_bus_del_devices(pdev);
+	release_firmware(isp->cpd_fw);
+buttress_exit:
+	ipu6_buttress_exit(isp);
+
+	return ret;
+}
+
+static void ipu6_pci_remove(struct pci_dev *pdev)
+{
+	struct ipu6_device *isp = pci_get_drvdata(pdev);
+	struct ipu6_mmu *isys_mmu = isp->isys->mmu;
+	struct ipu6_mmu *psys_mmu = isp->psys->mmu;
+
+	devm_free_irq(&pdev->dev, pdev->irq, isp);
+	ipu6_cpd_free_pkg_dir(isp->psys);
+
+	ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
+	ipu6_buttress_exit(isp);
+
+	ipu6_bus_del_devices(pdev);
+
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	release_firmware(isp->cpd_fw);
+
+	ipu6_mmu_cleanup(psys_mmu);
+	ipu6_mmu_cleanup(isys_mmu);
+}
+
+static void ipu6_pci_reset_prepare(struct pci_dev *pdev)
+{
+	struct ipu6_device *isp = pci_get_drvdata(pdev);
+
+	pm_runtime_forbid(&isp->pdev->dev);
+}
+
+static void ipu6_pci_reset_done(struct pci_dev *pdev)
+{
+	struct ipu6_device *isp = pci_get_drvdata(pdev);
+
+	ipu6_buttress_restore(isp);
+	if (isp->secure_mode)
+		ipu6_buttress_reset_authentication(isp);
+
+	isp->need_ipc_reset = true;
+	pm_runtime_allow(&isp->pdev->dev);
+}
+
+/*
+ * PCI base driver code requires driver to provide these to enable
+ * PCI device level PM state transitions (D0<->D3)
+ */
+static int ipu6_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int ipu6_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ipu6_device *isp = pci_get_drvdata(pdev);
+	struct ipu6_buttress *b = &isp->buttress;
+	int ret;
+
+	/* Configure the arbitration mechanisms for VC requests */
+	ipu6_configure_vc_mechanism(isp);
+
+	isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
+	dev_info(dev, "IPU6 in %s mode\n",
+		 isp->secure_mode ? "secure" : "non-secure");
+
+	ipu6_buttress_restore(isp);
+
+	ret = ipu6_buttress_ipc_reset(isp, &b->cse);
+	if (ret)
+		dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n");
+
+	ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
+	if (ret < 0) {
+		dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n");
+		return 0;
+	}
+
+	ret = ipu6_buttress_authenticate(isp);
+	if (ret)
+		dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret);
+
+	pm_runtime_put(&isp->psys->auxdev.dev);
+
+	return 0;
+}
+
+static int ipu6_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ipu6_device *isp = pci_get_drvdata(pdev);
+	int ret;
+
+	ipu6_configure_vc_mechanism(isp);
+	ipu6_buttress_restore(isp);
+
+	if (isp->need_ipc_reset) {
+		struct ipu6_buttress *b = &isp->buttress;
+
+		isp->need_ipc_reset = false;
+		ret = ipu6_buttress_ipc_reset(isp, &b->cse);
+		if (ret)
+			dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops ipu6_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume)
+	SET_RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL)
+};
+
+static const struct pci_device_id ipu6_pci_tbl[] = {
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6SE) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLP) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLN) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_RPLP) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_MTL) },
+	{ }
+};
+MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl);
+
+static const struct pci_error_handlers pci_err_handlers = {
+	.reset_prepare = ipu6_pci_reset_prepare,
+	.reset_done = ipu6_pci_reset_done,
+};
+
+static struct pci_driver ipu6_pci_driver = {
+	.name = IPU6_NAME,
+	.id_table = ipu6_pci_tbl,
+	.probe = ipu6_pci_probe,
+	.remove = ipu6_pci_remove,
+	.driver = {
+		.pm = pm_ptr(&ipu6_pm_ops),
+	},
+	.err_handler = &pci_err_handlers,
+};
+
+module_pci_driver(ipu6_pci_driver);
+
+MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
+MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel IPU6 PCI driver");
diff --git a/drivers/media/pci/intel/ipu6/ipu6.h b/drivers/media/pci/intel/ipu6/ipu6.h
new file mode 100644
index 000000000000..04e7e7e61ca5
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_H
+#define IPU6_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "ipu6-buttress.h"
+
+struct firmware;
+struct pci_dev;
+struct ipu6_bus_device;
+
+#define PCI_DEVICE_ID_INTEL_IPU6		0x9a19
+#define PCI_DEVICE_ID_INTEL_IPU6SE		0x4e19
+#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLP		0x465d
+#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLN		0x462e
+#define PCI_DEVICE_ID_INTEL_IPU6EP_RPLP		0xa75d
+#define PCI_DEVICE_ID_INTEL_IPU6EP_MTL		0x7d19
+
+#define IPU6_NAME			"intel-ipu6"
+#define IPU6_MEDIA_DEV_MODEL_NAME	"ipu6"
+
+#define IPU6SE_FIRMWARE_NAME		"intel/ipu6se_fw.bin"
+#define IPU6EP_FIRMWARE_NAME		"intel/ipu6ep_fw.bin"
+#define IPU6_FIRMWARE_NAME		"intel/ipu6_fw.bin"
+#define IPU6EPMTL_FIRMWARE_NAME		"intel/ipu6epmtl_fw.bin"
+
+enum ipu6_version {
+	IPU6_VER_INVALID = 0,
+	IPU6_VER_6 = 1,
+	IPU6_VER_6SE = 3,
+	IPU6_VER_6EP = 5,
+	IPU6_VER_6EP_MTL = 6,
+};
+
+/*
+ * IPU6 - TGL
+ * IPU6SE - JSL
+ * IPU6EP - ADL/RPL
+ * IPU6EP_MTL - MTL
+ */
+static inline bool is_ipu6se(u8 hw_ver)
+{
+	return hw_ver == IPU6_VER_6SE;
+}
+
+static inline bool is_ipu6ep(u8 hw_ver)
+{
+	return hw_ver == IPU6_VER_6EP;
+}
+
+static inline bool is_ipu6ep_mtl(u8 hw_ver)
+{
+	return hw_ver == IPU6_VER_6EP_MTL;
+}
+
+static inline bool is_ipu6_tgl(u8 hw_ver)
+{
+	return hw_ver == IPU6_VER_6;
+}
+
+/*
+ * ISYS DMA can overshoot. For higher resolutions over allocation is one line
+ * but it must be at minimum 1024 bytes. Value could be different in
+ * different versions / generations thus provide it via platform data.
+ */
+#define IPU6_ISYS_OVERALLOC_MIN		1024
+
+/* Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others */
+#define IPU6_DEVICE_GDA_NR_PAGES		128
+
+/* Virtualization factor to calculate the available virtual pages */
+#define IPU6_DEVICE_GDA_VIRT_FACTOR	32
+
+#define NR_OF_MMU_RESOURCES			2
+
+struct ipu6_device {
+	struct pci_dev *pdev;
+	struct list_head devices;
+	struct ipu6_bus_device *isys;
+	struct ipu6_bus_device *psys;
+	struct ipu6_buttress buttress;
+
+	const struct firmware *cpd_fw;
+	const char *cpd_fw_name;
+	u32 cpd_metadata_cmpnt_size;
+
+	void __iomem *base;
+	bool need_ipc_reset;
+	bool secure_mode;
+	u8 hw_ver;
+	bool bus_ready_to_probe;
+};
+
+#define IPU6_ISYS_NAME "isys"
+#define IPU6_PSYS_NAME "psys"
+
+#define IPU6_MMU_MAX_DEVICES		4
+#define IPU6_MMU_ADDR_BITS		32
+/* The firmware is accessible within the first 2 GiB only in non-secure mode. */
+#define IPU6_MMU_ADDR_BITS_NON_SECURE	31
+
+#define IPU6_MMU_MAX_TLB_L1_STREAMS	32
+#define IPU6_MMU_MAX_TLB_L2_STREAMS	32
+#define IPU6_MAX_LI_BLOCK_ADDR		128
+#define IPU6_MAX_L2_BLOCK_ADDR		64
+
+#define IPU6_ISYS_MAX_CSI2_LEGACY_PORTS	4
+#define IPU6_ISYS_MAX_CSI2_COMBO_PORTS	2
+
+#define IPU6_MAX_FRAME_COUNTER	0xff
+
+#define IPU6SE_ISYS_NUM_STREAMS          IPU6SE_NONSECURE_STREAM_ID_MAX
+#define IPU6_ISYS_NUM_STREAMS            IPU6_NONSECURE_STREAM_ID_MAX
+
+/*
+ * To maximize the IOSF utlization, IPU6 need to send requests in bursts.
+ * At the DMA interface with the buttress, there are CDC FIFOs with burst
+ * collection capability. CDC FIFO burst collectors have a configurable
+ * threshold and is configured based on the outcome of performance measurements.
+ *
+ * isys has 3 ports with IOSF interface for VC0, VC1 and VC2
+ * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2
+ *
+ * Threshold values are pre-defined and are arrived at after performance
+ * evaluations on a type of IPU6
+ */
+#define IPU6_MAX_VC_IOSF_PORTS		4
+
+/*
+ * IPU6 must configure correct arbitration mechanism related to the IOSF VC
+ * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on
+ * stall and 1 means stall until the request is completed.
+ */
+#define IPU6_BTRS_ARB_MODE_TYPE_REARB	0
+#define IPU6_BTRS_ARB_MODE_TYPE_STALL	1
+
+/* Currently chosen arbitration mechanism for VC0 */
+#define IPU6_BTRS_ARB_STALL_MODE_VC0	\
+			IPU6_BTRS_ARB_MODE_TYPE_REARB
+
+/* Currently chosen arbitration mechanism for VC1 */
+#define IPU6_BTRS_ARB_STALL_MODE_VC1	\
+			IPU6_BTRS_ARB_MODE_TYPE_REARB
+
+/*
+ * MMU Invalidation HW bug workaround by ZLW mechanism
+ *
+ * Old IPU6 MMUV2 has a bug in the invalidation mechanism which might result in
+ * wrong translation or replication of the translation. This will cause data
+ * corruption. So we cannot directly use the MMU V2 invalidation registers
+ * to invalidate the MMU. Instead, whenever an invalidate is called, we need to
+ * clear the TLB by evicting all the valid translations by filling it with trash
+ * buffer (which is guaranteed not to be used by any other processes). ZLW is
+ * used to fill the L1 and L2 caches with the trash buffer translations. ZLW
+ * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in
+ * advance to the L1 and L2 caches without triggering any memory operations.
+ *
+ * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream
+ * One L1 block has 16 entries, hence points to 16 * 4K pages
+ * L2 -> 16 streams and 32 blocks. 2 blocks per streams
+ * One L2 block maps to 1024 L1 entries, hence points to 4MB address range
+ * 2 blocks per L2 stream means, 1 stream points to 8MB range
+ *
+ * As we need to clear the caches and 8MB being the biggest cache size, we need
+ * to have trash buffer which points to 8MB address range. As these trash
+ * buffers are not used for any memory transactions, we need only the least
+ * amount of physical memory. So we reserve 8MB IOVA address range but only
+ * one page is reserved from physical memory. Each of this 8MB IOVA address
+ * range is then mapped to the same physical memory page.
+ */
+/* One L2 entry maps 1024 L1 entries and one L1 entry per page */
+#define IPU6_MMUV2_L2_RANGE		(1024 * PAGE_SIZE)
+/* Max L2 blocks per stream */
+#define IPU6_MMUV2_MAX_L2_BLOCKS	2
+/* Max L1 blocks per stream */
+#define IPU6_MMUV2_MAX_L1_BLOCKS	16
+#define IPU6_MMUV2_TRASH_RANGE	(IPU6_MMUV2_L2_RANGE * IPU6_MMUV2_MAX_L2_BLOCKS)
+/* Entries per L1 block */
+#define MMUV2_ENTRIES_PER_L1_BLOCK	16
+#define MMUV2_TRASH_L1_BLOCK_OFFSET	(MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE)
+#define MMUV2_TRASH_L2_BLOCK_OFFSET	IPU6_MMUV2_L2_RANGE
+
+/*
+ * In some of the IPU6 MMUs, there is provision to configure L1 and L2 page
+ * table caches. Both these L1 and L2 caches are divided into multiple sections
+ * called streams. There is maximum 16 streams for both caches. Each of these
+ * sections are subdivided into multiple blocks. When nr_l1streams = 0 and
+ * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support
+ * L1/L2 page table caches.
+ *
+ * L1 stream per block sizes are configurable and varies per usecase.
+ * L2 has constant block sizes - 2 blocks per stream.
+ *
+ * MMU1 support pre-fetching of the pages to have less cache lookup misses. To
+ * enable the pre-fetching, MMU1 AT (Address Translator) device registers
+ * need to be configured.
+ *
+ * There are four types of memory accesses which requires ZLW configuration.
+ * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU.
+ *
+ * 1. Sequential Access or 1D mode
+ *	Set ZLW_EN -> 1
+ *	set ZLW_PAGE_CROSS_1D -> 1
+ *	Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where
+ *		  N is pre-defined and hardcoded in the platform data
+ *	Set ZLW_2D -> 0
+ *
+ * 2. ZLW 2D mode
+ *	Set ZLW_EN -> 1
+ *	set ZLW_PAGE_CROSS_1D -> 1,
+ *	Set ZLW_N -> 0
+ *	Set ZLW_2D -> 1
+ *
+ * 3. ZLW Enable (no 1D or 2D mode)
+ *	Set ZLW_EN -> 1
+ *	set ZLW_PAGE_CROSS_1D -> 0,
+ *	Set ZLW_N -> 0
+ *	Set ZLW_2D -> 0
+ *
+ * 4. ZLW disable
+ *	Set ZLW_EN -> 0
+ *	set ZLW_PAGE_CROSS_1D -> 0,
+ *	Set ZLW_N -> 0
+ *	Set ZLW_2D -> 0
+ *
+ * To configure the ZLW for the above memory access, four registers are
+ * available. Hence to track these four settings, we have the following entries
+ * in the struct ipu6_mmu_hw. Each of these entries are per stream and
+ * available only for the L1 streams.
+ *
+ * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN)
+ * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary
+ * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted
+ *			Insert ZLW request N pages ahead address.
+ * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D)
+ *
+ *
+ * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined
+ * as per the usecase specific calculations. Any change to this pre-defined
+ * table has to happen in sync with IPU6 FW.
+ */
+struct ipu6_mmu_hw {
+	union {
+		unsigned long offset;
+		void __iomem *base;
+	};
+	u32 info_bits;
+	u8 nr_l1streams;
+	/*
+	 * L1 has variable blocks per stream - total of 64 blocks and maximum of
+	 * 16 blocks per stream. Configurable by using the block start address
+	 * per stream. Block start address is calculated from the block size
+	 */
+	u8 l1_block_sz[IPU6_MMU_MAX_TLB_L1_STREAMS];
+	/* Is ZLW is enabled in each stream */
+	bool l1_zlw_en[IPU6_MMU_MAX_TLB_L1_STREAMS];
+	bool l1_zlw_1d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS];
+	u8 l1_ins_zlw_ahead_pages[IPU6_MMU_MAX_TLB_L1_STREAMS];
+	bool l1_zlw_2d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS];
+
+	u32 l1_stream_id_reg_offset;
+	u32 l2_stream_id_reg_offset;
+
+	u8 nr_l2streams;
+	/*
+	 * L2 has fixed 2 blocks per stream. Block address is calculated
+	 * from the block size
+	 */
+	u8 l2_block_sz[IPU6_MMU_MAX_TLB_L2_STREAMS];
+	/* flag to track if WA is needed for successive invalidate HW bug */
+	bool insert_read_before_invalidate;
+};
+
+struct ipu6_mmu_pdata {
+	u32 nr_mmus;
+	struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES];
+	int mmid;
+};
+
+struct ipu6_isys_csi2_pdata {
+	void __iomem *base;
+};
+
+struct ipu6_isys_internal_csi2_pdata {
+	u32 nports;
+	u32 irq_mask;
+	u32 *offsets;
+	u32 ctrl0_irq_edge;
+	u32 ctrl0_irq_clear;
+	u32 ctrl0_irq_mask;
+	u32 ctrl0_irq_enable;
+	u32 ctrl0_irq_lnp;
+	u32 ctrl0_irq_status;
+	u32 fw_access_port_ofs;
+};
+
+struct ipu6_isys_internal_tpg_pdata {
+	u32 ntpgs;
+	u32 *offsets;
+	u32 *sels;
+};
+
+struct ipu6_hw_variants {
+	unsigned long offset;
+	u32 nr_mmus;
+	struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES];
+	u8 cdc_fifos;
+	u8 cdc_fifo_threshold[IPU6_MAX_VC_IOSF_PORTS];
+	u32 dmem_offset;
+	u32 spc_offset;
+};
+
+struct ipu6_isys_internal_pdata {
+	struct ipu6_isys_internal_csi2_pdata csi2;
+	struct ipu6_hw_variants hw_variant;
+	u32 num_parallel_streams;
+	u32 isys_dma_overshoot;
+	u32 sram_gran_shift;
+	u32 sram_gran_size;
+	u32 max_sram_size;
+	u32 max_streams;
+	u32 max_send_queues;
+	u32 max_sram_blocks;
+	u32 max_devq_size;
+	u32 sensor_type_start;
+	u32 sensor_type_end;
+	u32 ltr;
+	u32 memopen_threshold;
+	bool enhanced_iwake;
+};
+
+struct ipu6_isys_pdata {
+	void __iomem *base;
+	const struct ipu6_isys_internal_pdata *ipdata;
+};
+
+struct ipu6_psys_internal_pdata {
+	struct ipu6_hw_variants hw_variant;
+};
+
+struct ipu6_psys_pdata {
+	void __iomem *base;
+	const struct ipu6_psys_internal_pdata *ipdata;
+};
+
+int ipu6_fw_authenticate(void *data, u64 val);
+void ipu6_configure_spc(struct ipu6_device *isp,
+			const struct ipu6_hw_variants *hw_variant,
+			int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
+			dma_addr_t pkg_dir_dma_addr);
+#endif /* IPU6_H */
-- 
2.43.0

From 79ff4f8a1383c679fc4bbee832e51e6369cbc21c Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:16 +0800
Subject: [PATCH 02/31] media: intel/ipu6: add IPU auxiliary devices

Even the IPU input system and processing system are in a single PCI
device, each system has its own power sequence, the processing system
power up depends on the input system power up.

Besides, input system and processing system have their own MMU
hardware for IPU DMA address mapping.

Register the IS/PS devices on auxiliary bus and attach power domain
to implement the power sequence dependency.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-3-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/pci/intel/ipu6/ipu6-bus.c | 165 ++++++++++++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6-bus.h |  58 +++++++++
 2 files changed, 223 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-bus.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-bus.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.c b/drivers/media/pci/intel/ipu6/ipu6-bus.c
new file mode 100644
index 000000000000..e81b9a6518a1
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-bus.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-buttress.h"
+#include "ipu6-dma.h"
+
+static int bus_pm_runtime_suspend(struct device *dev)
+{
+	struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+	int ret;
+
+	ret = pm_generic_runtime_suspend(dev);
+	if (ret)
+		return ret;
+
+	ret = ipu6_buttress_power(dev, adev->ctrl, false);
+	if (!ret)
+		return 0;
+
+	dev_err(dev, "power down failed!\n");
+
+	/* Powering down failed, attempt to resume device now */
+	ret = pm_generic_runtime_resume(dev);
+	if (!ret)
+		return -EBUSY;
+
+	return -EIO;
+}
+
+static int bus_pm_runtime_resume(struct device *dev)
+{
+	struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+	int ret;
+
+	ret = ipu6_buttress_power(dev, adev->ctrl, true);
+	if (ret)
+		return ret;
+
+	ret = pm_generic_runtime_resume(dev);
+	if (ret)
+		goto out_err;
+
+	return 0;
+
+out_err:
+	ipu6_buttress_power(dev, adev->ctrl, false);
+
+	return -EBUSY;
+}
+
+static struct dev_pm_domain ipu6_bus_pm_domain = {
+	.ops = {
+		.runtime_suspend = bus_pm_runtime_suspend,
+		.runtime_resume = bus_pm_runtime_resume,
+	},
+};
+
+static DEFINE_MUTEX(ipu6_bus_mutex);
+
+static void ipu6_bus_release(struct device *dev)
+{
+	struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+
+	kfree(adev->pdata);
+	kfree(adev);
+}
+
+struct ipu6_bus_device *
+ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
+			   void *pdata, struct ipu6_buttress_ctrl *ctrl,
+			   char *name)
+{
+	struct auxiliary_device *auxdev;
+	struct ipu6_bus_device *adev;
+	struct ipu6_device *isp = pci_get_drvdata(pdev);
+	int ret;
+
+	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+	if (!adev)
+		return ERR_PTR(-ENOMEM);
+
+	adev->dma_mask = DMA_BIT_MASK(isp->secure_mode ? IPU6_MMU_ADDR_BITS :
+				      IPU6_MMU_ADDR_BITS_NON_SECURE);
+	adev->isp = isp;
+	adev->ctrl = ctrl;
+	adev->pdata = pdata;
+	auxdev = &adev->auxdev;
+	auxdev->name = name;
+	auxdev->id = (pci_domain_nr(pdev->bus) << 16) |
+		      PCI_DEVID(pdev->bus->number, pdev->devfn);
+
+	auxdev->dev.parent = parent;
+	auxdev->dev.release = ipu6_bus_release;
+	auxdev->dev.dma_ops = &ipu6_dma_ops;
+	auxdev->dev.dma_mask = &adev->dma_mask;
+	auxdev->dev.dma_parms = pdev->dev.dma_parms;
+	auxdev->dev.coherent_dma_mask = adev->dma_mask;
+
+	ret = auxiliary_device_init(auxdev);
+	if (ret < 0) {
+		dev_err(&isp->pdev->dev, "auxiliary device init failed (%d)\n",
+			ret);
+		kfree(adev);
+		return ERR_PTR(ret);
+	}
+
+	dev_pm_domain_set(&auxdev->dev, &ipu6_bus_pm_domain);
+
+	pm_runtime_forbid(&adev->auxdev.dev);
+	pm_runtime_enable(&adev->auxdev.dev);
+
+	return adev;
+}
+
+int ipu6_bus_add_device(struct ipu6_bus_device *adev)
+{
+	struct auxiliary_device *auxdev = &adev->auxdev;
+	int ret;
+
+	ret = auxiliary_device_add(auxdev);
+	if (ret) {
+		auxiliary_device_uninit(auxdev);
+		return ret;
+	}
+
+	mutex_lock(&ipu6_bus_mutex);
+	list_add(&adev->list, &adev->isp->devices);
+	mutex_unlock(&ipu6_bus_mutex);
+
+	pm_runtime_allow(&auxdev->dev);
+
+	return 0;
+}
+
+void ipu6_bus_del_devices(struct pci_dev *pdev)
+{
+	struct ipu6_device *isp = pci_get_drvdata(pdev);
+	struct ipu6_bus_device *adev, *save;
+
+	mutex_lock(&ipu6_bus_mutex);
+
+	list_for_each_entry_safe(adev, save, &isp->devices, list) {
+		pm_runtime_disable(&adev->auxdev.dev);
+		list_del(&adev->list);
+		auxiliary_device_delete(&adev->auxdev);
+		auxiliary_device_uninit(&adev->auxdev);
+	}
+
+	mutex_unlock(&ipu6_bus_mutex);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.h b/drivers/media/pci/intel/ipu6/ipu6-bus.h
new file mode 100644
index 000000000000..d46181354836
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-bus.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_BUS_H
+#define IPU6_BUS_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+struct firmware;
+struct pci_dev;
+
+#define IPU6_BUS_NAME	IPU6_NAME "-bus"
+
+struct ipu6_buttress_ctrl;
+
+struct ipu6_bus_device {
+	struct auxiliary_device auxdev;
+	struct auxiliary_driver *auxdrv;
+	const struct ipu6_auxdrv_data *auxdrv_data;
+	struct list_head list;
+	void *pdata;
+	struct ipu6_mmu *mmu;
+	struct ipu6_device *isp;
+	struct ipu6_buttress_ctrl *ctrl;
+	u64 dma_mask;
+	const struct firmware *fw;
+	struct sg_table fw_sgt;
+	u64 *pkg_dir;
+	dma_addr_t pkg_dir_dma_addr;
+	unsigned int pkg_dir_size;
+};
+
+struct ipu6_auxdrv_data {
+	irqreturn_t (*isr)(struct ipu6_bus_device *adev);
+	irqreturn_t (*isr_threaded)(struct ipu6_bus_device *adev);
+	bool wake_isr_thread;
+};
+
+#define to_ipu6_bus_device(_dev) \
+	container_of(to_auxiliary_dev(_dev), struct ipu6_bus_device, auxdev)
+#define auxdev_to_adev(_auxdev) \
+	container_of(_auxdev, struct ipu6_bus_device, auxdev)
+#define ipu6_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev)
+
+struct ipu6_bus_device *
+ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
+			   void *pdata, struct ipu6_buttress_ctrl *ctrl,
+			   char *name);
+int ipu6_bus_add_device(struct ipu6_bus_device *adev);
+void ipu6_bus_del_devices(struct pci_dev *pdev);
+
+#endif
-- 
2.43.0

From 5836e6b1138ff9cba76ab74c1eab0ec57e50dfc1 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:17 +0800
Subject: [PATCH 03/31] media: intel/ipu6: add IPU6 buttress interface driver

The IPU6 buttress is the interface between IPU device (input system
and processing system) with rest of the SoC. It contains overall IPU
hardware control registers, these control registers are used as the
interfaces with the Intel Converged Security Engine and Punit to do
firmware authentication and power management.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-4-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/pci/intel/ipu6/ipu6-buttress.c  | 912 ++++++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6-buttress.h  | 102 ++
 .../intel/ipu6/ipu6-platform-buttress-regs.h  | 232 +++++
 3 files changed, 1246 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-buttress.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-buttress.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
new file mode 100644
index 000000000000..2f73302812f3
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
@@ -0,0 +1,912 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pfn.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/time64.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-buttress.h"
+#include "ipu6-platform-buttress-regs.h"
+
+#define BOOTLOADER_STATUS_OFFSET       0x15c
+
+#define BOOTLOADER_MAGIC_KEY		0xb00710ad
+
+#define ENTRY	BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1
+#define EXIT	BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2
+#define QUERY	BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE
+
+#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX	10
+
+#define BUTTRESS_POWER_TIMEOUT_US		(200 * USEC_PER_MSEC)
+
+#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US	(5 * USEC_PER_SEC)
+#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US	(10 * USEC_PER_SEC)
+#define BUTTRESS_CSE_FWRESET_TIMEOUT_US		(100 * USEC_PER_MSEC)
+
+#define BUTTRESS_IPC_TX_TIMEOUT_MS		MSEC_PER_SEC
+#define BUTTRESS_IPC_RX_TIMEOUT_MS		MSEC_PER_SEC
+#define BUTTRESS_IPC_VALIDITY_TIMEOUT_US	(1 * USEC_PER_SEC)
+#define BUTTRESS_TSC_SYNC_TIMEOUT_US		(5 * USEC_PER_MSEC)
+
+#define BUTTRESS_IPC_RESET_RETRY		2000
+#define BUTTRESS_CSE_IPC_RESET_RETRY	4
+#define BUTTRESS_IPC_CMD_SEND_RETRY	1
+
+#define BUTTRESS_MAX_CONSECUTIVE_IRQS	100
+
+static const u32 ipu6_adev_irq_mask[2] = {
+	BUTTRESS_ISR_IS_IRQ,
+	BUTTRESS_ISR_PS_IRQ
+};
+
+int ipu6_buttress_ipc_reset(struct ipu6_device *isp,
+			    struct ipu6_buttress_ipc *ipc)
+{
+	unsigned int retries = BUTTRESS_IPC_RESET_RETRY;
+	struct ipu6_buttress *b = &isp->buttress;
+	u32 val = 0, csr_in_clr;
+
+	if (!isp->secure_mode) {
+		dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode");
+		return 0;
+	}
+
+	mutex_lock(&b->ipc_mutex);
+
+	/* Clear-by-1 CSR (all bits), corresponding internal states. */
+	val = readl(isp->base + ipc->csr_in);
+	writel(val, isp->base + ipc->csr_in);
+
+	/* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */
+	writel(ENTRY, isp->base + ipc->csr_out);
+	/*
+	 * Clear-by-1 all CSR bits EXCEPT following
+	 * bits:
+	 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+	 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+	 * C. Possibly custom bits, depending on
+	 * their role.
+	 */
+	csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ |
+		BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID |
+		BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY;
+
+	do {
+		usleep_range(400, 500);
+		val = readl(isp->base + ipc->csr_in);
+		switch (val) {
+		case ENTRY | EXIT:
+		case ENTRY | EXIT | QUERY:
+			/*
+			 * 1) Clear-by-1 CSR bits
+			 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+			 * IPC_PEER_COMP_ACTIONS_RST_PHASE2).
+			 * 2) Set peer CSR bit
+			 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+			 */
+			writel(ENTRY | EXIT, isp->base + ipc->csr_in);
+			writel(QUERY, isp->base + ipc->csr_out);
+			break;
+		case ENTRY:
+		case ENTRY | QUERY:
+			/*
+			 * 1) Clear-by-1 CSR bits
+			 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+			 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE).
+			 * 2) Set peer CSR bit
+			 * IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+			 */
+			writel(ENTRY | QUERY, isp->base + ipc->csr_in);
+			writel(ENTRY, isp->base + ipc->csr_out);
+			break;
+		case EXIT:
+		case EXIT | QUERY:
+			/*
+			 * Clear-by-1 CSR bit
+			 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+			 * 1) Clear incoming doorbell.
+			 * 2) Clear-by-1 all CSR bits EXCEPT following
+			 * bits:
+			 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+			 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+			 * C. Possibly custom bits, depending on
+			 * their role.
+			 * 3) Set peer CSR bit
+			 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+			 */
+			writel(EXIT, isp->base + ipc->csr_in);
+			writel(0, isp->base + ipc->db0_in);
+			writel(csr_in_clr, isp->base + ipc->csr_in);
+			writel(EXIT, isp->base + ipc->csr_out);
+
+			/*
+			 * Read csr_in again to make sure if RST_PHASE2 is done.
+			 * If csr_in is QUERY, it should be handled again.
+			 */
+			usleep_range(200, 300);
+			val = readl(isp->base + ipc->csr_in);
+			if (val & QUERY) {
+				dev_dbg(&isp->pdev->dev,
+					"RST_PHASE2 retry csr_in = %x\n", val);
+				break;
+			}
+			mutex_unlock(&b->ipc_mutex);
+			return 0;
+		case QUERY:
+			/*
+			 * 1) Clear-by-1 CSR bit
+			 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+			 * 2) Set peer CSR bit
+			 * IPC_PEER_COMP_ACTIONS_RST_PHASE1
+			 */
+			writel(QUERY, isp->base + ipc->csr_in);
+			writel(ENTRY, isp->base + ipc->csr_out);
+			break;
+		default:
+			dev_warn_ratelimited(&isp->pdev->dev,
+					     "Unexpected CSR 0x%x\n", val);
+			break;
+		}
+	} while (retries--);
+
+	mutex_unlock(&b->ipc_mutex);
+	dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n");
+
+	return -ETIMEDOUT;
+}
+
+static void ipu6_buttress_ipc_validity_close(struct ipu6_device *isp,
+					     struct ipu6_buttress_ipc *ipc)
+{
+	writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ,
+	       isp->base + ipc->csr_out);
+}
+
+static int
+ipu6_buttress_ipc_validity_open(struct ipu6_device *isp,
+				struct ipu6_buttress_ipc *ipc)
+{
+	unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID;
+	void __iomem *addr;
+	int ret;
+	u32 val;
+
+	writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ,
+	       isp->base + ipc->csr_out);
+
+	addr = isp->base + ipc->csr_in;
+	ret = readl_poll_timeout(addr, val, val & mask, 200,
+				 BUTTRESS_IPC_VALIDITY_TIMEOUT_US);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val);
+		ipu6_buttress_ipc_validity_close(isp, ipc);
+	}
+
+	return ret;
+}
+
+static void ipu6_buttress_ipc_recv(struct ipu6_device *isp,
+				   struct ipu6_buttress_ipc *ipc, u32 *ipc_msg)
+{
+	if (ipc_msg)
+		*ipc_msg = readl(isp->base + ipc->data0_in);
+	writel(0, isp->base + ipc->db0_in);
+}
+
+static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp,
+				       enum ipu6_buttress_ipc_domain ipc_domain,
+				       struct ipu6_ipc_buttress_bulk_msg *msgs,
+				       u32 size)
+{
+	unsigned long tx_timeout_jiffies, rx_timeout_jiffies;
+	unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY;
+	struct ipu6_buttress *b = &isp->buttress;
+	struct ipu6_buttress_ipc *ipc;
+	u32 val;
+	int ret;
+	int tout;
+
+	ipc = ipc_domain == IPU6_BUTTRESS_IPC_CSE ? &b->cse : &b->ish;
+
+	mutex_lock(&b->ipc_mutex);
+
+	ret = ipu6_buttress_ipc_validity_open(isp, ipc);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "IPC validity open failed\n");
+		goto out;
+	}
+
+	tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS);
+	rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS);
+
+	for (i = 0; i < size; i++) {
+		reinit_completion(&ipc->send_complete);
+		if (msgs[i].require_resp)
+			reinit_completion(&ipc->recv_complete);
+
+		dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n",
+			msgs[i].cmd);
+		writel(msgs[i].cmd, isp->base + ipc->data0_out);
+		val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size;
+		writel(val, isp->base + ipc->db0_out);
+
+		tout = wait_for_completion_timeout(&ipc->send_complete,
+						   tx_timeout_jiffies);
+		if (!tout) {
+			dev_err(&isp->pdev->dev, "send IPC response timeout\n");
+			if (!retry--) {
+				ret = -ETIMEDOUT;
+				goto out;
+			}
+
+			/* Try again if CSE is not responding on first try */
+			writel(0, isp->base + ipc->db0_out);
+			i--;
+			continue;
+		}
+
+		retry = BUTTRESS_IPC_CMD_SEND_RETRY;
+
+		if (!msgs[i].require_resp)
+			continue;
+
+		tout = wait_for_completion_timeout(&ipc->recv_complete,
+						   rx_timeout_jiffies);
+		if (!tout) {
+			dev_err(&isp->pdev->dev, "recv IPC response timeout\n");
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		if (ipc->nack_mask &&
+		    (ipc->recv_data & ipc->nack_mask) == ipc->nack) {
+			dev_err(&isp->pdev->dev,
+				"IPC NACK for cmd 0x%x\n", msgs[i].cmd);
+			ret = -EIO;
+			goto out;
+		}
+
+		if (ipc->recv_data != msgs[i].expected_resp) {
+			dev_err(&isp->pdev->dev,
+				"expected resp: 0x%x, IPC response: 0x%x ",
+				msgs[i].expected_resp, ipc->recv_data);
+			ret = -EIO;
+			goto out;
+		}
+	}
+
+	dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n");
+
+out:
+	ipu6_buttress_ipc_validity_close(isp, ipc);
+	mutex_unlock(&b->ipc_mutex);
+	return ret;
+}
+
+static int
+ipu6_buttress_ipc_send(struct ipu6_device *isp,
+		       enum ipu6_buttress_ipc_domain ipc_domain,
+		       u32 ipc_msg, u32 size, bool require_resp,
+		       u32 expected_resp)
+{
+	struct ipu6_ipc_buttress_bulk_msg msg = {
+		.cmd = ipc_msg,
+		.cmd_size = size,
+		.require_resp = require_resp,
+		.expected_resp = expected_resp,
+	};
+
+	return ipu6_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1);
+}
+
+static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev)
+{
+	irqreturn_t ret = IRQ_WAKE_THREAD;
+
+	if (!adev || !adev->auxdrv || !adev->auxdrv_data)
+		return IRQ_NONE;
+
+	if (adev->auxdrv_data->isr)
+		ret = adev->auxdrv_data->isr(adev);
+
+	if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded)
+		ret = IRQ_NONE;
+
+	return ret;
+}
+
+irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr)
+{
+	struct ipu6_device *isp = isp_ptr;
+	struct ipu6_bus_device *adev[] = { isp->isys, isp->psys };
+	struct ipu6_buttress *b = &isp->buttress;
+	u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS;
+	irqreturn_t ret = IRQ_NONE;
+	u32 disable_irqs = 0;
+	u32 irq_status;
+	u32 i, count = 0;
+
+	pm_runtime_get_noresume(&isp->pdev->dev);
+
+	irq_status = readl(isp->base + reg_irq_sts);
+	if (!irq_status) {
+		pm_runtime_put_noidle(&isp->pdev->dev);
+		return IRQ_NONE;
+	}
+
+	do {
+		writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR);
+
+		for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask); i++) {
+			irqreturn_t r = ipu6_buttress_call_isr(adev[i]);
+
+			if (!(irq_status & ipu6_adev_irq_mask[i]))
+				continue;
+
+			if (r == IRQ_WAKE_THREAD) {
+				ret = IRQ_WAKE_THREAD;
+				disable_irqs |= ipu6_adev_irq_mask[i];
+			} else if (ret == IRQ_NONE && r == IRQ_HANDLED) {
+				ret = IRQ_HANDLED;
+			}
+		}
+
+		if ((irq_status & BUTTRESS_EVENT) && ret == IRQ_NONE)
+			ret = IRQ_HANDLED;
+
+		if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) {
+			dev_dbg(&isp->pdev->dev,
+				"BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n");
+			ipu6_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data);
+			complete(&b->cse.recv_complete);
+		}
+
+		if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) {
+			dev_dbg(&isp->pdev->dev,
+				"BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n");
+			ipu6_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data);
+			complete(&b->ish.recv_complete);
+		}
+
+		if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) {
+			dev_dbg(&isp->pdev->dev,
+				"BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
+			complete(&b->cse.send_complete);
+		}
+
+		if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) {
+			dev_dbg(&isp->pdev->dev,
+				"BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
+			complete(&b->ish.send_complete);
+		}
+
+		if (irq_status & BUTTRESS_ISR_SAI_VIOLATION &&
+		    ipu6_buttress_get_secure_mode(isp))
+			dev_err(&isp->pdev->dev,
+				"BUTTRESS_ISR_SAI_VIOLATION\n");
+
+		if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR |
+				  BUTTRESS_ISR_PS_FATAL_MEM_ERR))
+			dev_err(&isp->pdev->dev,
+				"BUTTRESS_ISR_FATAL_MEM_ERR\n");
+
+		if (irq_status & BUTTRESS_ISR_UFI_ERROR)
+			dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n");
+
+		if (++count == BUTTRESS_MAX_CONSECUTIVE_IRQS) {
+			dev_err(&isp->pdev->dev, "too many consecutive IRQs\n");
+			ret = IRQ_NONE;
+			break;
+		}
+
+		irq_status = readl(isp->base + reg_irq_sts);
+	} while (irq_status);
+
+	if (disable_irqs)
+		writel(BUTTRESS_IRQS & ~disable_irqs,
+		       isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+	pm_runtime_put(&isp->pdev->dev);
+
+	return ret;
+}
+
+irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr)
+{
+	struct ipu6_device *isp = isp_ptr;
+	struct ipu6_bus_device *adev[] = { isp->isys, isp->psys };
+	const struct ipu6_auxdrv_data *drv_data = NULL;
+	irqreturn_t ret = IRQ_NONE;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask) && adev[i]; i++) {
+		drv_data = adev[i]->auxdrv_data;
+		if (!drv_data)
+			continue;
+
+		if (drv_data->wake_isr_thread &&
+		    drv_data->isr_threaded(adev[i]) == IRQ_HANDLED)
+			ret = IRQ_HANDLED;
+	}
+
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+	return ret;
+}
+
+int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
+			bool on)
+{
+	struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp;
+	u32 pwr_sts, val;
+	int ret;
+
+	if (!ctrl)
+		return 0;
+
+	mutex_lock(&isp->buttress.power_mutex);
+
+	if (!on) {
+		val = 0;
+		pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift;
+	} else {
+		val = BUTTRESS_FREQ_CTL_START |
+			FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK,
+				   ctrl->ratio) |
+			FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK,
+				   ctrl->qos_floor) |
+			BUTTRESS_FREQ_CTL_ICCMAX_LEVEL;
+
+		pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift;
+	}
+
+	writel(val, isp->base + ctrl->freq_ctl);
+
+	ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE,
+				 val, (val & ctrl->pwr_sts_mask) == pwr_sts,
+				 100, BUTTRESS_POWER_TIMEOUT_US);
+	if (ret)
+		dev_err(&isp->pdev->dev,
+			"Change power status timeout with 0x%x\n", val);
+
+	ctrl->started = !ret && on;
+
+	mutex_unlock(&isp->buttress.power_mutex);
+
+	return ret;
+}
+
+bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp)
+{
+	u32 val;
+
+	val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+	return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
+}
+
+bool ipu6_buttress_auth_done(struct ipu6_device *isp)
+{
+	u32 val;
+
+	if (!isp->secure_mode)
+		return true;
+
+	val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+	val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val);
+
+	return val == BUTTRESS_SECURITY_CTL_AUTH_DONE;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, INTEL_IPU6);
+
+int ipu6_buttress_reset_authentication(struct ipu6_device *isp)
+{
+	int ret;
+	u32 val;
+
+	if (!isp->secure_mode) {
+		dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n");
+		return 0;
+	}
+
+	writel(BUTTRESS_FW_RESET_CTL_START, isp->base +
+	       BUTTRESS_REG_FW_RESET_CTL);
+
+	ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val,
+				 val & BUTTRESS_FW_RESET_CTL_DONE, 500,
+				 BUTTRESS_CSE_FWRESET_TIMEOUT_US);
+	if (ret) {
+		dev_err(&isp->pdev->dev,
+			"Time out while resetting authentication state\n");
+		return ret;
+	}
+
+	dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n");
+	writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL);
+	/* leave some time for HW restore */
+	usleep_range(800, 1000);
+
+	return 0;
+}
+
+int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
+			       const struct firmware *fw, struct sg_table *sgt)
+{
+	struct page **pages;
+	const void *addr;
+	unsigned long n_pages;
+	unsigned int i;
+	int ret;
+
+	n_pages = PHYS_PFN(PAGE_ALIGN(fw->size));
+
+	pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	addr = fw->data;
+	for (i = 0; i < n_pages; i++) {
+		struct page *p = vmalloc_to_page(addr);
+
+		if (!p) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		pages[i] = p;
+		addr += PAGE_SIZE;
+	}
+
+	ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size,
+					GFP_KERNEL);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = dma_map_sgtable(&sys->auxdev.dev, sgt, DMA_TO_DEVICE, 0);
+	if (ret < 0) {
+		ret = -ENOMEM;
+		sg_free_table(sgt);
+		goto out;
+	}
+
+	dma_sync_sgtable_for_device(&sys->auxdev.dev, sgt, DMA_TO_DEVICE);
+
+out:
+	kfree(pages);
+
+	return ret;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, INTEL_IPU6);
+
+void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys,
+				  struct sg_table *sgt)
+{
+	dma_unmap_sgtable(&sys->auxdev.dev, sgt, DMA_TO_DEVICE, 0);
+	sg_free_table(sgt);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, INTEL_IPU6);
+
+int ipu6_buttress_authenticate(struct ipu6_device *isp)
+{
+	struct ipu6_buttress *b = &isp->buttress;
+	struct ipu6_psys_pdata *psys_pdata;
+	u32 data, mask, done, fail;
+	int ret;
+
+	if (!isp->secure_mode) {
+		dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n");
+		return 0;
+	}
+
+	psys_pdata = isp->psys->pdata;
+
+	mutex_lock(&b->auth_mutex);
+
+	if (ipu6_buttress_auth_done(isp)) {
+		ret = 0;
+		goto out_unlock;
+	}
+
+	/*
+	 * Write address of FIT table to FW_SOURCE register
+	 * Let's use fw address. I.e. not using FIT table yet
+	 */
+	data = lower_32_bits(isp->psys->pkg_dir_dma_addr);
+	writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO);
+
+	data = upper_32_bits(isp->psys->pkg_dir_dma_addr);
+	writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI);
+
+	/*
+	 * Write boot_load into IU2CSEDATA0
+	 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+	 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+	 */
+	dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n");
+
+	ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE,
+				     BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD,
+				     1, true,
+				     BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
+		goto out_unlock;
+	}
+
+	mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK;
+	done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE;
+	fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED;
+	ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+				 ((data & mask) == done ||
+				  (data & mask) == fail), 500,
+				 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "CSE boot_load timeout\n");
+		goto out_unlock;
+	}
+
+	if ((data & mask) == fail) {
+		dev_err(&isp->pdev->dev, "CSE auth failed\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	ret = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET,
+				 data, data == BOOTLOADER_MAGIC_KEY, 500,
+				 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n",
+			data);
+		goto out_unlock;
+	}
+
+	/*
+	 * Write authenticate_run into IU2CSEDATA0
+	 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+	 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+	 */
+	dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n");
+	ret = ipu6_buttress_ipc_send(isp, IPU6_BUTTRESS_IPC_CSE,
+				     BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN,
+				     1, true,
+				     BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n");
+		goto out_unlock;
+	}
+
+	done = BUTTRESS_SECURITY_CTL_AUTH_DONE;
+	ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+				 ((data & mask) == done ||
+				  (data & mask) == fail), 500,
+				 BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "CSE authenticate timeout\n");
+		goto out_unlock;
+	}
+
+	if ((data & mask) == fail) {
+		dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	dev_info(&isp->pdev->dev, "CSE authenticate_run done\n");
+
+out_unlock:
+	mutex_unlock(&b->auth_mutex);
+
+	return ret;
+}
+
+static int ipu6_buttress_send_tsc_request(struct ipu6_device *isp)
+{
+	u32 val, mask, done;
+	int ret;
+
+	mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK;
+
+	writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC,
+	       isp->base + BUTTRESS_REG_FABRIC_CMD);
+
+	val = readl(isp->base + BUTTRESS_REG_PWR_STATE);
+	val = FIELD_GET(mask, val);
+	if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) {
+		dev_err(&isp->pdev->dev, "Start tsc sync failed\n");
+		return -EINVAL;
+	}
+
+	done = BUTTRESS_PWR_STATE_HH_STATE_DONE;
+	ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val,
+				 FIELD_GET(mask, val) == done, 500,
+				 BUTTRESS_TSC_SYNC_TIMEOUT_US);
+	if (ret)
+		dev_err(&isp->pdev->dev, "Start tsc sync timeout\n");
+
+	return ret;
+}
+
+int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp)
+{
+	unsigned int i;
+
+	for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) {
+		u32 val;
+		int ret;
+
+		ret = ipu6_buttress_send_tsc_request(isp);
+		if (ret != -ETIMEDOUT)
+			return ret;
+
+		val = readl(isp->base + BUTTRESS_REG_TSW_CTL);
+		val = val | BUTTRESS_TSW_CTL_SOFT_RESET;
+		writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
+		val = val & ~BUTTRESS_TSW_CTL_SOFT_RESET;
+		writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
+	}
+
+	dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n");
+
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6);
+
+void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val)
+{
+	u32 tsc_hi_1, tsc_hi_2, tsc_lo;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI);
+	tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO);
+	tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI);
+	if (tsc_hi_1 == tsc_hi_2) {
+		*val = (u64)tsc_hi_1 << 32 | tsc_lo;
+	} else {
+		/* Check if TSC has rolled over */
+		if (tsc_lo & BIT(31))
+			*val = (u64)tsc_hi_1 << 32 | tsc_lo;
+		else
+			*val = (u64)tsc_hi_2 << 32 | tsc_lo;
+	}
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, INTEL_IPU6);
+
+u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp)
+{
+	u64 ns = ticks * 10000;
+
+	/*
+	 * converting TSC tick count to ns is calculated by:
+	 * Example (TSC clock frequency is 19.2MHz):
+	 * ns = ticks * 1000 000 000 / 19.2Mhz
+	 *    = ticks * 1000 000 000 / 19200000Hz
+	 *    = ticks * 10000 / 192 ns
+	 */
+	return div_u64(ns, isp->buttress.ref_clk);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, INTEL_IPU6);
+
+void ipu6_buttress_restore(struct ipu6_device *isp)
+{
+	struct ipu6_buttress *b = &isp->buttress;
+
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+	writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT);
+}
+
+int ipu6_buttress_init(struct ipu6_device *isp)
+{
+	int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY;
+	struct ipu6_buttress *b = &isp->buttress;
+	u32 val;
+
+	mutex_init(&b->power_mutex);
+	mutex_init(&b->auth_mutex);
+	mutex_init(&b->cons_mutex);
+	mutex_init(&b->ipc_mutex);
+	init_completion(&b->ish.send_complete);
+	init_completion(&b->cse.send_complete);
+	init_completion(&b->ish.recv_complete);
+	init_completion(&b->cse.recv_complete);
+
+	b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK;
+	b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK;
+	b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR;
+	b->cse.csr_out = BUTTRESS_REG_IU2CSECSR;
+	b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0;
+	b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0;
+	b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0;
+	b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0;
+
+	/* no ISH on IPU6 */
+	memset(&b->ish, 0, sizeof(b->ish));
+	INIT_LIST_HEAD(&b->constraints);
+
+	isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
+	dev_info(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n",
+		 isp->secure_mode ? "secure" : "non-secure",
+		 readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH),
+		 readl(isp->base + BUTTRESS_REG_CAMERA_MASK));
+
+	b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT);
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
+	writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+	/* get ref_clk frequency by reading the indication in btrs control */
+	val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
+	val = FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND, val);
+
+	switch (val) {
+	case 0x0:
+		b->ref_clk = 240;
+		break;
+	case 0x1:
+		b->ref_clk = 192;
+		break;
+	case 0x2:
+		b->ref_clk = 384;
+		break;
+	default:
+		dev_warn(&isp->pdev->dev,
+			 "Unsupported ref clock, use 19.2Mhz by default.\n");
+		b->ref_clk = 192;
+		break;
+	}
+
+	/* Retry couple of times in case of CSE initialization is delayed */
+	do {
+		ret = ipu6_buttress_ipc_reset(isp, &b->cse);
+		if (ret) {
+			dev_warn(&isp->pdev->dev,
+				 "IPC reset protocol failed, retrying\n");
+		} else {
+			dev_dbg(&isp->pdev->dev, "IPC reset done\n");
+			return 0;
+		}
+	} while (ipc_reset_retry--);
+
+	dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
+
+	mutex_destroy(&b->power_mutex);
+	mutex_destroy(&b->auth_mutex);
+	mutex_destroy(&b->cons_mutex);
+	mutex_destroy(&b->ipc_mutex);
+
+	return ret;
+}
+
+void ipu6_buttress_exit(struct ipu6_device *isp)
+{
+	struct ipu6_buttress *b = &isp->buttress;
+
+	writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+	mutex_destroy(&b->power_mutex);
+	mutex_destroy(&b->auth_mutex);
+	mutex_destroy(&b->cons_mutex);
+	mutex_destroy(&b->ipc_mutex);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/drivers/media/pci/intel/ipu6/ipu6-buttress.h
new file mode 100644
index 000000000000..558e1d70f4af
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_BUTTRESS_H
+#define IPU6_BUTTRESS_H
+
+#include <linux/completion.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct device;
+struct firmware;
+struct ipu6_device;
+struct ipu6_bus_device;
+
+#define BUTTRESS_PS_FREQ_STEP		25U
+#define BUTTRESS_MIN_FORCE_PS_FREQ	(BUTTRESS_PS_FREQ_STEP * 8)
+#define BUTTRESS_MAX_FORCE_PS_FREQ	(BUTTRESS_PS_FREQ_STEP * 32)
+
+#define BUTTRESS_IS_FREQ_STEP		25U
+#define BUTTRESS_MIN_FORCE_IS_FREQ	(BUTTRESS_IS_FREQ_STEP * 8)
+#define BUTTRESS_MAX_FORCE_IS_FREQ	(BUTTRESS_IS_FREQ_STEP * 22)
+
+struct ipu6_buttress_ctrl {
+	u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off;
+	unsigned int ratio;
+	unsigned int qos_floor;
+	bool started;
+};
+
+struct ipu6_buttress_ipc {
+	struct completion send_complete;
+	struct completion recv_complete;
+	u32 nack;
+	u32 nack_mask;
+	u32 recv_data;
+	u32 csr_out;
+	u32 csr_in;
+	u32 db0_in;
+	u32 db0_out;
+	u32 data0_out;
+	u32 data0_in;
+};
+
+struct ipu6_buttress {
+	struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex;
+	struct ipu6_buttress_ipc cse;
+	struct ipu6_buttress_ipc ish;
+	struct list_head constraints;
+	u32 wdt_cached_value;
+	bool force_suspend;
+	u32 ref_clk;
+};
+
+struct ipu6_buttress_sensor_clk_freq {
+	unsigned int rate;
+	unsigned int val;
+};
+
+enum ipu6_buttress_ipc_domain {
+	IPU6_BUTTRESS_IPC_CSE,
+	IPU6_BUTTRESS_IPC_ISH,
+};
+
+struct ipu6_buttress_constraint {
+	struct list_head list;
+	unsigned int min_freq;
+};
+
+struct ipu6_ipc_buttress_bulk_msg {
+	u32 cmd;
+	u32 expected_resp;
+	bool require_resp;
+	u8 cmd_size;
+};
+
+int ipu6_buttress_ipc_reset(struct ipu6_device *isp,
+			    struct ipu6_buttress_ipc *ipc);
+int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
+			       const struct firmware *fw,
+			       struct sg_table *sgt);
+void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys,
+				  struct sg_table *sgt);
+int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
+			bool on);
+bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp);
+int ipu6_buttress_authenticate(struct ipu6_device *isp);
+int ipu6_buttress_reset_authentication(struct ipu6_device *isp);
+bool ipu6_buttress_auth_done(struct ipu6_device *isp);
+int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp);
+void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val);
+u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp);
+
+irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr);
+irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr);
+int ipu6_buttress_init(struct ipu6_device *isp);
+void ipu6_buttress_exit(struct ipu6_device *isp);
+void ipu6_buttress_csi_port_config(struct ipu6_device *isp,
+				   u32 legacy, u32 combo);
+void ipu6_buttress_restore(struct ipu6_device *isp);
+#endif /* IPU6_BUTTRESS_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h b/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h
new file mode 100644
index 000000000000..87239af96502
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef IPU6_PLATFORM_BUTTRESS_REGS_H
+#define IPU6_PLATFORM_BUTTRESS_REGS_H
+
+#include <linux/bits.h>
+
+/* IS_WORKPOINT_REQ */
+#define IPU6_BUTTRESS_REG_IS_FREQ_CTL		0x34
+/* PS_WORKPOINT_REQ */
+#define IPU6_BUTTRESS_REG_PS_FREQ_CTL		0x38
+
+#define IPU6_IS_FREQ_MAX		533
+#define IPU6_IS_FREQ_MIN		200
+#define IPU6_PS_FREQ_MAX		450
+#define IPU6_IS_FREQ_RATIO_BASE		25
+#define IPU6_PS_FREQ_RATIO_BASE		25
+
+/* should be tuned for real silicon */
+#define IPU6_IS_FREQ_CTL_DEFAULT_RATIO		0x08
+#define IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO	0x0a
+#define IPU6_PS_FREQ_CTL_DEFAULT_RATIO		0x0d
+
+#define IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO	0x10
+#define IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO	0x0708
+
+#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT	3
+#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK	GENMASK(4, 3)
+
+#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT	6
+#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK	GENMASK(7, 6)
+
+#define IPU6_BUTTRESS_PWR_STATE_DN_DONE		0x0
+#define IPU6_BUTTRESS_PWR_STATE_UP_PROCESS	0x1
+#define IPU6_BUTTRESS_PWR_STATE_DN_PROCESS	0x2
+#define IPU6_BUTTRESS_PWR_STATE_UP_DONE		0x3
+
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_0	0x270
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_1	0x274
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_2	0x278
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_3	0x27c
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_4	0x280
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_5	0x284
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_6	0x288
+#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_7	0x28c
+
+#define BUTTRESS_REG_WDT			0x8
+#define BUTTRESS_REG_BTRS_CTRL			0xc
+#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0	BIT(0)
+#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1	BIT(1)
+#define BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND	GENMASK(9, 8)
+
+#define BUTTRESS_REG_FW_RESET_CTL	0x30
+#define BUTTRESS_FW_RESET_CTL_START	BIT(0)
+#define BUTTRESS_FW_RESET_CTL_DONE	BIT(1)
+
+#define BUTTRESS_REG_IS_FREQ_CTL	0x34
+#define BUTTRESS_REG_PS_FREQ_CTL	0x38
+
+#define BUTTRESS_FREQ_CTL_START		BIT(31)
+#define BUTTRESS_FREQ_CTL_ICCMAX_LEVEL		GENMASK(19, 16)
+#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK	GENMASK(15, 8)
+#define BUTTRESS_FREQ_CTL_RATIO_MASK	GENMASK(7, 0)
+
+#define BUTTRESS_REG_PWR_STATE	0x5c
+
+#define BUTTRESS_PWR_STATE_RESET		0x0
+#define BUTTRESS_PWR_STATE_PWR_ON_DONE		0x1
+#define BUTTRESS_PWR_STATE_PWR_RDY		0x3
+#define BUTTRESS_PWR_STATE_PWR_IDLE		0x4
+
+#define BUTTRESS_PWR_STATE_HH_STATUS_MASK	GENMASK(12, 11)
+
+enum {
+	BUTTRESS_PWR_STATE_HH_STATE_IDLE,
+	BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS,
+	BUTTRESS_PWR_STATE_HH_STATE_DONE,
+	BUTTRESS_PWR_STATE_HH_STATE_ERR,
+};
+
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK	GENMASK(23, 19)
+
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE			0x0
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP		0x1
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK		0x2
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK		0x3
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES		0x4
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1		0x5
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2		0x6
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES	0x7
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP	0x8
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT			0x9
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY			0xa
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED		0xb
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3		0xc
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD		0xd
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT		0xe
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0		0xf
+
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK	GENMASK(28, 24)
+
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE			0x0
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY	0x1
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH	0x2
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD	0x3
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH	0x4
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO		0x5
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP		0x6
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK		0x7
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES		0x8
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1		0x9
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2		0xa
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES	0xb
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT		0xc
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT		0xd
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP			0xf
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED		0x10
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3		0x11
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK		0x12
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND		0x13
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4		0x14
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP		0x15
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK		0x16
+
+#define BUTTRESS_REG_SECURITY_CTL	0x300
+#define BUTTRESS_REG_SKU		0x314
+#define BUTTRESS_REG_SECURITY_TOUCH	0x318
+#define BUTTRESS_REG_CAMERA_MASK	0x84
+
+#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE	BIT(16)
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK	GENMASK(4, 0)
+
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE		BIT(0)
+#define BUTTRESS_SECURITY_CTL_AUTH_DONE			BIT(1)
+#define BUTTRESS_SECURITY_CTL_AUTH_FAILED		BIT(3)
+
+#define BUTTRESS_REG_FW_SOURCE_BASE_LO	0x78
+#define BUTTRESS_REG_FW_SOURCE_BASE_HI	0x7C
+#define BUTTRESS_REG_FW_SOURCE_SIZE	0x80
+
+#define BUTTRESS_REG_ISR_STATUS		0x90
+#define BUTTRESS_REG_ISR_ENABLED_STATUS	0x94
+#define BUTTRESS_REG_ISR_ENABLE		0x98
+#define BUTTRESS_REG_ISR_CLEAR		0x9C
+
+#define BUTTRESS_ISR_IS_IRQ			BIT(0)
+#define BUTTRESS_ISR_PS_IRQ			BIT(1)
+#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE	BIT(2)
+#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH	BIT(3)
+#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING	BIT(4)
+#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING	BIT(5)
+#define BUTTRESS_ISR_CSE_CSR_SET		BIT(6)
+#define BUTTRESS_ISR_ISH_CSR_SET		BIT(7)
+#define BUTTRESS_ISR_SPURIOUS_CMP		BIT(8)
+#define BUTTRESS_ISR_WATCHDOG_EXPIRED		BIT(9)
+#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ		BIT(10)
+#define BUTTRESS_ISR_SAI_VIOLATION		BIT(11)
+#define BUTTRESS_ISR_HW_ASSERTION		BIT(12)
+#define BUTTRESS_ISR_IS_CORRECTABLE_MEM_ERR	BIT(13)
+#define BUTTRESS_ISR_IS_FATAL_MEM_ERR		BIT(14)
+#define BUTTRESS_ISR_IS_NON_FATAL_MEM_ERR	BIT(15)
+#define BUTTRESS_ISR_PS_CORRECTABLE_MEM_ERR	BIT(16)
+#define BUTTRESS_ISR_PS_FATAL_MEM_ERR		BIT(17)
+#define BUTTRESS_ISR_PS_NON_FATAL_MEM_ERR	BIT(18)
+#define BUTTRESS_ISR_PS_FAST_THROTTLE		BIT(19)
+#define BUTTRESS_ISR_UFI_ERROR			BIT(20)
+
+#define BUTTRESS_REG_IU2CSEDB0	0x100
+
+#define BUTTRESS_IU2CSEDB0_BUSY		BIT(31)
+#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL	2
+
+#define BUTTRESS_REG_IU2CSEDATA0	0x104
+
+#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD		1
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN		2
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE		3
+#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH	16
+
+#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE			BIT(0)
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE			BIT(1)
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE		BIT(2)
+#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE	BIT(4)
+
+#define BUTTRESS_REG_IU2CSECSR		0x108
+
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1		BIT(0)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2		BIT(1)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE	BIT(2)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ		BIT(3)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID			BIT(4)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ		BIT(5)
+
+#define BUTTRESS_REG_CSE2IUDB0		0x304
+#define BUTTRESS_REG_CSE2IUCSR		0x30C
+#define BUTTRESS_REG_CSE2IUDATA0	0x308
+
+/* 0x20 == NACK, 0xf == unknown command */
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK      0xf20
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK GENMASK(15, 0)
+
+#define BUTTRESS_REG_ISH2IUCSR		0x50
+#define BUTTRESS_REG_ISH2IUDB0		0x54
+#define BUTTRESS_REG_ISH2IUDATA0	0x58
+
+#define BUTTRESS_REG_IU2ISHDB0		0x10C
+#define BUTTRESS_REG_IU2ISHDATA0	0x110
+#define BUTTRESS_REG_IU2ISHDATA1	0x114
+#define BUTTRESS_REG_IU2ISHCSR		0x118
+
+#define BUTTRESS_REG_FABRIC_CMD		0x88
+
+#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC	BIT(0)
+#define BUTTRESS_FABRIC_CMD_IS_DRAIN		BIT(4)
+
+#define BUTTRESS_REG_TSW_CTL		0x120
+#define BUTTRESS_TSW_CTL_SOFT_RESET	BIT(8)
+
+#define BUTTRESS_REG_TSC_LO	0x164
+#define BUTTRESS_REG_TSC_HI	0x168
+
+#define BUTTRESS_IRQS		(BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \
+				 BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE |    \
+				 BUTTRESS_ISR_IS_IRQ | BUTTRESS_ISR_PS_IRQ)
+
+#define BUTTRESS_EVENT		 (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \
+				  BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | \
+				  BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE |    \
+				  BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH |    \
+				  BUTTRESS_ISR_SAI_VIOLATION)
+#endif /* IPU6_PLATFORM_BUTTRESS_REGS_H */
-- 
2.43.0

From 4e01328e611be4ba5a2c9fe2baedfeb239db9d99 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:18 +0800
Subject: [PATCH 04/31] media: intel/ipu6: CPD parsing for get firmware
 components

For IPU6, firmware is generated and released as signed
Code Partition Directory (CPD) format file, which is aligned with
the SPI flash code partition definition. CPD format include CPD
header, manifest, metadata and module data. Driver can parse them
according to the CPD layout to acquire each component.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-5-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/pci/intel/ipu6/ipu6-cpd.c | 362 ++++++++++++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6-cpd.h | 105 +++++++
 2 files changed, 467 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-cpd.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-cpd.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
new file mode 100644
index 000000000000..b0ffd04c4cd3
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp_types.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-cpd.h"
+
+/* 15 entries + header*/
+#define MAX_PKG_DIR_ENT_CNT		16
+/* 2 qword per entry/header */
+#define PKG_DIR_ENT_LEN			2
+/* PKG_DIR size in bytes */
+#define PKG_DIR_SIZE			((MAX_PKG_DIR_ENT_CNT) *	\
+					 (PKG_DIR_ENT_LEN) * sizeof(u64))
+/* _IUPKDR_ */
+#define PKG_DIR_HDR_MARK		0x5f4955504b44525fULL
+
+/* $CPD */
+#define CPD_HDR_MARK			0x44504324
+
+#define MAX_MANIFEST_SIZE		(SZ_2K * sizeof(u32))
+#define MAX_METADATA_SIZE		SZ_64K
+
+#define MAX_COMPONENT_ID		127
+#define MAX_COMPONENT_VERSION		0xffff
+
+#define MANIFEST_IDX	0
+#define METADATA_IDX	1
+#define MODULEDATA_IDX	2
+/*
+ * PKG_DIR Entry (type == id)
+ * 63:56        55      54:48   47:32   31:24   23:0
+ * Rsvd         Rsvd    Type    Version Rsvd    Size
+ */
+#define PKG_DIR_SIZE_MASK	GENMASK(23, 0)
+#define PKG_DIR_VERSION_MASK	GENMASK(47, 32)
+#define PKG_DIR_TYPE_MASK	GENMASK(54, 48)
+
+static inline const struct ipu6_cpd_ent *ipu6_cpd_get_entry(const void *cpd,
+							    u8 idx)
+{
+	const struct ipu6_cpd_hdr *cpd_hdr = cpd;
+	const struct ipu6_cpd_ent *ent;
+
+	ent = (const struct ipu6_cpd_ent *)((const u8 *)cpd + cpd_hdr->hdr_len);
+	return ent + idx;
+}
+
+#define ipu6_cpd_get_manifest(cpd) ipu6_cpd_get_entry(cpd, MANIFEST_IDX)
+#define ipu6_cpd_get_metadata(cpd) ipu6_cpd_get_entry(cpd, METADATA_IDX)
+#define ipu6_cpd_get_moduledata(cpd) ipu6_cpd_get_entry(cpd, MODULEDATA_IDX)
+
+static const struct ipu6_cpd_metadata_cmpnt_hdr *
+ipu6_cpd_metadata_get_cmpnt(struct ipu6_device *isp, const void *metadata,
+			    unsigned int metadata_size, u8 idx)
+{
+	size_t extn_size = sizeof(struct ipu6_cpd_metadata_extn);
+	size_t cmpnt_count = metadata_size - extn_size;
+
+	cmpnt_count = div_u64(cmpnt_count, isp->cpd_metadata_cmpnt_size);
+
+	if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) {
+		dev_err(&isp->pdev->dev, "Component index out of range (%d)\n",
+			idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return metadata + extn_size + idx * isp->cpd_metadata_cmpnt_size;
+}
+
+static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu6_device *isp,
+					   const void *metadata,
+					   unsigned int metadata_size, u8 idx)
+{
+	const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt;
+
+	cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx);
+	if (IS_ERR(cmpnt))
+		return PTR_ERR(cmpnt);
+
+	return cmpnt->ver;
+}
+
+static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu6_device *isp,
+					  const void *metadata,
+					  unsigned int metadata_size, u8 idx)
+{
+	const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt;
+
+	cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx);
+	if (IS_ERR(cmpnt))
+		return PTR_ERR(cmpnt);
+
+	return cmpnt->id;
+}
+
+static int ipu6_cpd_parse_module_data(struct ipu6_device *isp,
+				      const void *module_data,
+				      unsigned int module_data_size,
+				      dma_addr_t dma_addr_module_data,
+				      u64 *pkg_dir, const void *metadata,
+				      unsigned int metadata_size)
+{
+	const struct ipu6_cpd_module_data_hdr *module_data_hdr;
+	const struct ipu6_cpd_hdr *dir_hdr;
+	const struct ipu6_cpd_ent *dir_ent;
+	unsigned int i;
+	u8 len;
+
+	if (!module_data)
+		return -EINVAL;
+
+	module_data_hdr = module_data;
+	dir_hdr = module_data + module_data_hdr->hdr_len;
+	len = dir_hdr->hdr_len;
+	dir_ent = (const struct ipu6_cpd_ent *)(((u8 *)dir_hdr) + len);
+
+	pkg_dir[0] = PKG_DIR_HDR_MARK;
+	/* pkg_dir entry count = component count + pkg_dir header */
+	pkg_dir[1] = dir_hdr->ent_cnt + 1;
+
+	for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) {
+		u64 *p = &pkg_dir[PKG_DIR_ENT_LEN *  (1 + i)];
+		int ver, id;
+
+		*p++ = dma_addr_module_data + dir_ent->offset;
+		id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata,
+						    metadata_size, i);
+		if (id < 0 || id > MAX_COMPONENT_ID) {
+			dev_err(&isp->pdev->dev, "Invalid CPD component id\n");
+			return -EINVAL;
+		}
+
+		ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata,
+						      metadata_size, i);
+		if (ver < 0 || ver > MAX_COMPONENT_VERSION) {
+			dev_err(&isp->pdev->dev,
+				"Invalid CPD component version\n");
+			return -EINVAL;
+		}
+
+		*p = FIELD_PREP(PKG_DIR_SIZE_MASK, dir_ent->len) |
+			FIELD_PREP(PKG_DIR_TYPE_MASK, id) |
+			FIELD_PREP(PKG_DIR_VERSION_MASK, ver);
+	}
+
+	return 0;
+}
+
+int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src)
+{
+	dma_addr_t dma_addr_src = sg_dma_address(adev->fw_sgt.sgl);
+	const struct ipu6_cpd_ent *ent, *man_ent, *met_ent;
+	struct device *dev = &adev->auxdev.dev;
+	struct ipu6_device *isp = adev->isp;
+	unsigned int man_sz, met_sz;
+	void *pkg_dir_pos;
+	int ret;
+
+	man_ent = ipu6_cpd_get_manifest(src);
+	man_sz = man_ent->len;
+
+	met_ent = ipu6_cpd_get_metadata(src);
+	met_sz = met_ent->len;
+
+	adev->pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz;
+	adev->pkg_dir = dma_alloc_attrs(dev, adev->pkg_dir_size,
+					&adev->pkg_dir_dma_addr, GFP_KERNEL, 0);
+	if (!adev->pkg_dir)
+		return -ENOMEM;
+
+	/*
+	 * pkg_dir entry/header:
+	 * qword | 63:56 | 55   | 54:48 | 47:32 | 31:24 | 23:0
+	 * N         Address/Offset/"_IUPKDR_"
+	 * N + 1 | rsvd  | rsvd | type  | ver   | rsvd  | size
+	 *
+	 * We can ignore other fields that size in N + 1 qword as they
+	 * are 0 anyway. Just setting size for now.
+	 */
+
+	ent = ipu6_cpd_get_moduledata(src);
+
+	ret = ipu6_cpd_parse_module_data(isp, src + ent->offset,
+					 ent->len, dma_addr_src + ent->offset,
+					 adev->pkg_dir, src + met_ent->offset,
+					 met_ent->len);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "Failed to parse module data\n");
+		dma_free_attrs(dev, adev->pkg_dir_size,
+			       adev->pkg_dir, adev->pkg_dir_dma_addr, 0);
+		return ret;
+	}
+
+	/* Copy manifest after pkg_dir */
+	pkg_dir_pos = adev->pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT;
+	memcpy(pkg_dir_pos, src + man_ent->offset, man_sz);
+
+	/* Copy metadata after manifest */
+	pkg_dir_pos += man_sz;
+	memcpy(pkg_dir_pos, src + met_ent->offset, met_sz);
+
+	dma_sync_single_range_for_device(dev, adev->pkg_dir_dma_addr,
+					 0, adev->pkg_dir_size, DMA_TO_DEVICE);
+
+	return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, INTEL_IPU6);
+
+void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev)
+{
+	dma_free_attrs(&adev->auxdev.dev, adev->pkg_dir_size, adev->pkg_dir,
+		       adev->pkg_dir_dma_addr, 0);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, INTEL_IPU6);
+
+static int ipu6_cpd_validate_cpd(struct ipu6_device *isp, const void *cpd,
+				 unsigned long cpd_size,
+				 unsigned long data_size)
+{
+	const struct ipu6_cpd_hdr *cpd_hdr = cpd;
+	const struct ipu6_cpd_ent *ent;
+	unsigned int i;
+	u8 len;
+
+	len = cpd_hdr->hdr_len;
+
+	/* Ensure cpd hdr is within moduledata */
+	if (cpd_size < len) {
+		dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n");
+		return -EINVAL;
+	}
+
+	/* Sanity check for CPD header */
+	if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) {
+		dev_err(&isp->pdev->dev, "Invalid CPD header\n");
+		return -EINVAL;
+	}
+
+	/* Ensure that all entries are within moduledata */
+	ent = (const struct ipu6_cpd_ent *)(((const u8 *)cpd_hdr) + len);
+	for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) {
+		if (data_size < ent->offset ||
+		    data_size - ent->offset < ent->len) {
+			dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int ipu6_cpd_validate_moduledata(struct ipu6_device *isp,
+					const void *moduledata,
+					u32 moduledata_size)
+{
+	const struct ipu6_cpd_module_data_hdr *mod_hdr = moduledata;
+	int ret;
+
+	/* Ensure moduledata hdr is within moduledata */
+	if (moduledata_size < sizeof(*mod_hdr) ||
+	    moduledata_size < mod_hdr->hdr_len) {
+		dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n");
+		return -EINVAL;
+	}
+
+	dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date);
+	ret = ipu6_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len,
+				    moduledata_size - mod_hdr->hdr_len,
+				    moduledata_size);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ipu6_cpd_validate_metadata(struct ipu6_device *isp,
+				      const void *metadata, u32 meta_size)
+{
+	const struct ipu6_cpd_metadata_extn *extn = metadata;
+
+	/* Sanity check for metadata size */
+	if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) {
+		dev_err(&isp->pdev->dev, "Invalid CPD metadata\n");
+		return -EINVAL;
+	}
+
+	/* Validate extension and image types */
+	if (extn->extn_type != IPU6_CPD_METADATA_EXTN_TYPE_IUNIT ||
+	    extn->img_type != IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) {
+		dev_err(&isp->pdev->dev,
+			"Invalid CPD metadata descriptor img_type (%d)\n",
+			extn->img_type);
+		return -EINVAL;
+	}
+
+	/* Validate metadata size multiple of metadata components */
+	if ((meta_size - sizeof(*extn)) % isp->cpd_metadata_cmpnt_size) {
+		dev_err(&isp->pdev->dev, "Invalid CPD metadata size\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file,
+			       unsigned long cpd_file_size)
+{
+	const struct ipu6_cpd_hdr *hdr = cpd_file;
+	const struct ipu6_cpd_ent *ent;
+	int ret;
+
+	ret = ipu6_cpd_validate_cpd(isp, cpd_file, cpd_file_size,
+				    cpd_file_size);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "Invalid CPD in file\n");
+		return ret;
+	}
+
+	/* Check for CPD file marker */
+	if (hdr->hdr_mark != CPD_HDR_MARK) {
+		dev_err(&isp->pdev->dev, "Invalid CPD header\n");
+		return -EINVAL;
+	}
+
+	/* Sanity check for manifest size */
+	ent = ipu6_cpd_get_manifest(cpd_file);
+	if (ent->len > MAX_MANIFEST_SIZE) {
+		dev_err(&isp->pdev->dev, "Invalid CPD manifest size\n");
+		return -EINVAL;
+	}
+
+	/* Validate metadata */
+	ent = ipu6_cpd_get_metadata(cpd_file);
+	ret = ipu6_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len);
+	if (ret) {
+		dev_err(&isp->pdev->dev, "Invalid CPD metadata\n");
+		return ret;
+	}
+
+	/* Validate moduledata */
+	ent = ipu6_cpd_get_moduledata(cpd_file);
+	ret = ipu6_cpd_validate_moduledata(isp, cpd_file + ent->offset,
+					   ent->len);
+	if (ret)
+		dev_err(&isp->pdev->dev, "Invalid CPD moduledata\n");
+
+	return ret;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-cpd.h b/drivers/media/pci/intel/ipu6/ipu6-cpd.h
new file mode 100644
index 000000000000..37465d507386
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-cpd.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2015 - 2023 Intel Corporation */
+
+#ifndef IPU6_CPD_H
+#define IPU6_CPD_H
+
+struct ipu6_device;
+struct ipu6_bus_device;
+
+#define IPU6_CPD_SIZE_OF_FW_ARCH_VERSION	7
+#define IPU6_CPD_SIZE_OF_SYSTEM_VERSION		11
+#define IPU6_CPD_SIZE_OF_COMPONENT_NAME		12
+
+#define IPU6_CPD_METADATA_EXTN_TYPE_IUNIT	0x10
+
+#define IPU6_CPD_METADATA_IMAGE_TYPE_RESERVED		0
+#define IPU6_CPD_METADATA_IMAGE_TYPE_BOOTLOADER		1
+#define IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE	2
+
+#define IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX	0
+#define IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX	1
+
+#define IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE		3
+
+#define IPU6_CPD_METADATA_HASH_KEY_SIZE		48
+#define IPU6SE_CPD_METADATA_HASH_KEY_SIZE	32
+
+struct ipu6_cpd_module_data_hdr {
+	u32 hdr_len;
+	u32 endian;
+	u32 fw_pkg_date;
+	u32 hive_sdk_date;
+	u32 compiler_date;
+	u32 target_platform_type;
+	u8 sys_ver[IPU6_CPD_SIZE_OF_SYSTEM_VERSION];
+	u8 fw_arch_ver[IPU6_CPD_SIZE_OF_FW_ARCH_VERSION];
+	u8 rsvd[2];
+} __packed;
+
+/*
+ * ipu6_cpd_hdr structure updated as the chksum and
+ * sub_partition_name is unused on host side
+ * CSE layout version 1.6 for IPU6SE (hdr_len = 0x10)
+ * CSE layout version 1.7 for IPU6 (hdr_len = 0x14)
+ */
+struct ipu6_cpd_hdr {
+	u32 hdr_mark;
+	u32 ent_cnt;
+	u8 hdr_ver;
+	u8 ent_ver;
+	u8 hdr_len;
+} __packed;
+
+struct ipu6_cpd_ent {
+	u8 name[IPU6_CPD_SIZE_OF_COMPONENT_NAME];
+	u32 offset;
+	u32 len;
+	u8 rsvd[4];
+} __packed;
+
+struct ipu6_cpd_metadata_cmpnt_hdr {
+	u32 id;
+	u32 size;
+	u32 ver;
+} __packed;
+
+struct ipu6_cpd_metadata_cmpnt {
+	struct ipu6_cpd_metadata_cmpnt_hdr hdr;
+	u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE];
+	u32 entry_point;
+	u32 icache_base_offs;
+	u8 attrs[16];
+} __packed;
+
+struct ipu6se_cpd_metadata_cmpnt {
+	struct ipu6_cpd_metadata_cmpnt_hdr hdr;
+	u8 sha2_hash[IPU6SE_CPD_METADATA_HASH_KEY_SIZE];
+	u32 entry_point;
+	u32 icache_base_offs;
+	u8 attrs[16];
+} __packed;
+
+struct ipu6_cpd_metadata_extn {
+	u32 extn_type;
+	u32 len;
+	u32 img_type;
+	u8 rsvd[16];
+} __packed;
+
+struct ipu6_cpd_client_pkg_hdr {
+	u32 prog_list_offs;
+	u32 prog_list_size;
+	u32 prog_desc_offs;
+	u32 prog_desc_size;
+	u32 pg_manifest_offs;
+	u32 pg_manifest_size;
+	u32 prog_bin_offs;
+	u32 prog_bin_size;
+} __packed;
+
+int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src);
+void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev);
+int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file,
+			       unsigned long cpd_file_size);
+#endif /* IPU6_CPD_H */
-- 
2.43.0

From 05c36b7a6306ed7ef15b9fd6039920a7d08626cc Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:19 +0800
Subject: [PATCH 05/31] media: intel/ipu6: add IPU6 DMA mapping API and MMU
 table

he Intel IPU6 has an internal microcontroller (scalar processor, SP) which
is used to execute the firmware. The SP can access IPU internal memory and
map system DRAM to its an internal 32-bit virtual address space.

This patch adds a driver for the IPU MMU and a DMA mapping implementation
using the internal MMU. The system IOMMU may be used besides the IPU MMU.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-6-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/pci/intel/ipu6/ipu6-dma.c | 502 ++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6-dma.h |  19 +
 drivers/media/pci/intel/ipu6/ipu6-mmu.c | 845 ++++++++++++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6-mmu.h |  73 ++
 4 files changed, 1439 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-dma.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-dma.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-mmu.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-mmu.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c
new file mode 100644
index 000000000000..3d77c6e5a45e
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <linux/iova.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-dma.h"
+#include "ipu6-mmu.h"
+
+struct vm_info {
+	struct list_head list;
+	struct page **pages;
+	dma_addr_t ipu6_iova;
+	void *vaddr;
+	unsigned long size;
+};
+
+static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova)
+{
+	struct vm_info *info, *save;
+
+	list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
+		if (iova >= info->ipu6_iova &&
+		    iova < (info->ipu6_iova + info->size))
+			return info;
+	}
+
+	return NULL;
+}
+
+static void __dma_clear_buffer(struct page *page, size_t size,
+			       unsigned long attrs)
+{
+	void *ptr;
+
+	if (!page)
+		return;
+	/*
+	 * Ensure that the allocated pages are zeroed, and that any data
+	 * lurking in the kernel direct-mapped region is invalidated.
+	 */
+	ptr = page_address(page);
+	memset(ptr, 0, size);
+	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+		clflush_cache_range(ptr, size);
+}
+
+static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
+					gfp_t gfp, unsigned long attrs)
+{
+	int count = PHYS_PFN(size);
+	int array_size = count * sizeof(struct page *);
+	struct page **pages;
+	int i = 0;
+
+	pages = kvzalloc(array_size, GFP_KERNEL);
+	if (!pages)
+		return NULL;
+
+	gfp |= __GFP_NOWARN;
+
+	while (count) {
+		int j, order = __fls(count);
+
+		pages[i] = alloc_pages(gfp, order);
+		while (!pages[i] && order)
+			pages[i] = alloc_pages(gfp, --order);
+		if (!pages[i])
+			goto error;
+
+		if (order) {
+			split_page(pages[i], order);
+			j = 1 << order;
+			while (j--)
+				pages[i + j] = pages[i] + j;
+		}
+
+		__dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+		i += 1 << order;
+		count -= 1 << order;
+	}
+
+	return pages;
+error:
+	while (i--)
+		if (pages[i])
+			__free_pages(pages[i], 0);
+	kvfree(pages);
+	return NULL;
+}
+
+static void __dma_free_buffer(struct device *dev, struct page **pages,
+			      size_t size, unsigned long attrs)
+{
+	int count = PHYS_PFN(size);
+	unsigned int i;
+
+	for (i = 0; i < count && pages[i]; i++) {
+		__dma_clear_buffer(pages[i], PAGE_SIZE, attrs);
+		__free_pages(pages[i], 0);
+	}
+
+	kvfree(pages);
+}
+
+static void ipu6_dma_sync_single_for_cpu(struct device *dev,
+					 dma_addr_t dma_handle,
+					 size_t size,
+					 enum dma_data_direction dir)
+{
+	void *vaddr;
+	u32 offset;
+	struct vm_info *info;
+	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+
+	info = get_vm_info(mmu, dma_handle);
+	if (WARN_ON(!info))
+		return;
+
+	offset = dma_handle - info->ipu6_iova;
+	if (WARN_ON(size > (info->size - offset)))
+		return;
+
+	vaddr = info->vaddr + offset;
+	clflush_cache_range(vaddr, size);
+}
+
+static void ipu6_dma_sync_sg_for_cpu(struct device *dev,
+				     struct scatterlist *sglist,
+				     int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *sg;
+	int i;
+
+	for_each_sg(sglist, sg, nents, i)
+		clflush_cache_range(page_to_virt(sg_page(sg)), sg->length);
+}
+
+static void *ipu6_dma_alloc(struct device *dev, size_t size,
+			    dma_addr_t *dma_handle, gfp_t gfp,
+			    unsigned long attrs)
+{
+	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+	dma_addr_t pci_dma_addr, ipu6_iova;
+	struct vm_info *info;
+	unsigned long count;
+	struct page **pages;
+	struct iova *iova;
+	unsigned int i;
+	int ret;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return NULL;
+
+	size = PAGE_ALIGN(size);
+	count = PHYS_PFN(size);
+
+	iova = alloc_iova(&mmu->dmap->iovad, count,
+			  PHYS_PFN(dma_get_mask(dev)), 0);
+	if (!iova)
+		goto out_kfree;
+
+	pages = __dma_alloc_buffer(dev, size, gfp, attrs);
+	if (!pages)
+		goto out_free_iova;
+
+	dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n",
+		size, iova->pfn_lo, iova->pfn_hi);
+	for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
+		pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0,
+						  PAGE_SIZE, DMA_BIDIRECTIONAL,
+						  attrs);
+		dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n",
+			&pci_dma_addr);
+		if (dma_mapping_error(&pdev->dev, pci_dma_addr)) {
+			dev_err(dev, "pci_dma_mapping for page[%d] failed", i);
+			goto out_unmap;
+		}
+
+		ret = ipu6_mmu_map(mmu->dmap->mmu_info,
+				   PFN_PHYS(iova->pfn_lo + i), pci_dma_addr,
+				   PAGE_SIZE);
+		if (ret) {
+			dev_err(dev, "ipu6_mmu_map for pci_dma[%d] %pad failed",
+				i, &pci_dma_addr);
+			dma_unmap_page_attrs(&pdev->dev, pci_dma_addr,
+					     PAGE_SIZE, DMA_BIDIRECTIONAL,
+					     attrs);
+			goto out_unmap;
+		}
+	}
+
+	info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
+	if (!info->vaddr)
+		goto out_unmap;
+
+	*dma_handle = PFN_PHYS(iova->pfn_lo);
+
+	info->pages = pages;
+	info->ipu6_iova = *dma_handle;
+	info->size = size;
+	list_add(&info->list, &mmu->vma_list);
+
+	return info->vaddr;
+
+out_unmap:
+	while (i--) {
+		ipu6_iova = PFN_PHYS(iova->pfn_lo + i);
+		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+						     ipu6_iova);
+		dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
+				     DMA_BIDIRECTIONAL, attrs);
+
+		ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE);
+	}
+
+	__dma_free_buffer(dev, pages, size, attrs);
+
+out_free_iova:
+	__free_iova(&mmu->dmap->iovad, iova);
+out_kfree:
+	kfree(info);
+
+	return NULL;
+}
+
+static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr,
+			  dma_addr_t dma_handle,
+			  unsigned long attrs)
+{
+	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+	struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle));
+	dma_addr_t pci_dma_addr, ipu6_iova;
+	struct vm_info *info;
+	struct page **pages;
+	unsigned int i;
+
+	if (WARN_ON(!iova))
+		return;
+
+	info = get_vm_info(mmu, dma_handle);
+	if (WARN_ON(!info))
+		return;
+
+	if (WARN_ON(!info->vaddr))
+		return;
+
+	if (WARN_ON(!info->pages))
+		return;
+
+	list_del(&info->list);
+
+	size = PAGE_ALIGN(size);
+
+	pages = info->pages;
+
+	vunmap(vaddr);
+
+	for (i = 0; i < PHYS_PFN(size); i++) {
+		ipu6_iova = PFN_PHYS(iova->pfn_lo + i);
+		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+						     ipu6_iova);
+		dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
+				     DMA_BIDIRECTIONAL, attrs);
+	}
+
+	ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+		       PFN_PHYS(iova_size(iova)));
+
+	__dma_free_buffer(dev, pages, size, attrs);
+
+	mmu->tlb_invalidate(mmu);
+
+	__free_iova(&mmu->dmap->iovad, iova);
+
+	kfree(info);
+}
+
+static int ipu6_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+			 void *addr, dma_addr_t iova, size_t size,
+			 unsigned long attrs)
+{
+	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+	size_t count = PHYS_PFN(PAGE_ALIGN(size));
+	struct vm_info *info;
+	size_t i;
+	int ret;
+
+	info = get_vm_info(mmu, iova);
+	if (!info)
+		return -EFAULT;
+
+	if (!info->vaddr)
+		return -EFAULT;
+
+	if (vma->vm_start & ~PAGE_MASK)
+		return -EINVAL;
+
+	if (size > info->size)
+		return -EFAULT;
+
+	for (i = 0; i < count; i++) {
+		ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i),
+				     info->pages[i]);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void ipu6_dma_unmap_sg(struct device *dev,
+			      struct scatterlist *sglist,
+			      int nents, enum dma_data_direction dir,
+			      unsigned long attrs)
+{
+	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+	struct iova *iova = find_iova(&mmu->dmap->iovad,
+				      PHYS_PFN(sg_dma_address(sglist)));
+	int i, npages, count;
+	struct scatterlist *sg;
+	dma_addr_t pci_dma_addr;
+
+	if (!nents)
+		return;
+
+	if (WARN_ON(!iova))
+		return;
+
+	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+		ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+	/* get the nents as orig_nents given by caller */
+	count = 0;
+	npages = iova_size(iova);
+	for_each_sg(sglist, sg, nents, i) {
+		if (sg_dma_len(sg) == 0 ||
+		    sg_dma_address(sg) == DMA_MAPPING_ERROR)
+			break;
+
+		npages -= PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
+		count++;
+		if (npages <= 0)
+			break;
+	}
+
+	/*
+	 * Before IPU6 mmu unmap, return the pci dma address back to sg
+	 * assume the nents is less than orig_nents as the least granule
+	 * is 1 SZ_4K page
+	 */
+	dev_dbg(dev, "trying to unmap concatenated %u ents\n", count);
+	for_each_sg(sglist, sg, count, i) {
+		dev_dbg(dev, "ipu unmap sg[%d] %pad\n", i, &sg_dma_address(sg));
+		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+						     sg_dma_address(sg));
+		dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n",
+			&pci_dma_addr, i);
+		sg_dma_address(sg) = pci_dma_addr;
+	}
+
+	dev_dbg(dev, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n",
+		iova->pfn_lo, iova->pfn_hi);
+	ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+		       PFN_PHYS(iova_size(iova)));
+
+	mmu->tlb_invalidate(mmu);
+
+	dma_unmap_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
+
+	__free_iova(&mmu->dmap->iovad, iova);
+}
+
+static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+			   int nents, enum dma_data_direction dir,
+			   unsigned long attrs)
+{
+	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+	struct scatterlist *sg;
+	struct iova *iova;
+	size_t npages = 0;
+	unsigned long iova_addr;
+	int i, count;
+
+	for_each_sg(sglist, sg, nents, i) {
+		if (sg->offset) {
+			dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n",
+				i, sg->offset);
+			return -EFAULT;
+		}
+	}
+
+	dev_dbg(dev, "pci_dma_map_sg trying to map %d ents\n", nents);
+	count  = dma_map_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
+	if (count <= 0) {
+		dev_err(dev, "pci_dma_map_sg %d ents failed\n", nents);
+		return 0;
+	}
+
+	dev_dbg(dev, "pci_dma_map_sg %d ents mapped\n", count);
+
+	for_each_sg(sglist, sg, count, i)
+		npages += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
+
+	iova = alloc_iova(&mmu->dmap->iovad, npages,
+			  PHYS_PFN(dma_get_mask(dev)), 0);
+	if (!iova)
+		return 0;
+
+	dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
+		iova->pfn_hi);
+
+	iova_addr = iova->pfn_lo;
+	for_each_sg(sglist, sg, count, i) {
+		int ret;
+
+		dev_dbg(dev, "mapping entry %d: iova 0x%llx phy %pad size %d\n",
+			i, PFN_PHYS(iova_addr), &sg_dma_address(sg),
+			sg_dma_len(sg));
+
+		ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
+				   sg_dma_address(sg),
+				   PAGE_ALIGN(sg_dma_len(sg)));
+		if (ret)
+			goto out_fail;
+
+		sg_dma_address(sg) = PFN_PHYS(iova_addr);
+
+		iova_addr += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
+	}
+
+	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+		ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+	return count;
+
+out_fail:
+	ipu6_dma_unmap_sg(dev, sglist, i, dir, attrs);
+
+	return 0;
+}
+
+/*
+ * Create scatter-list for the already allocated DMA buffer
+ */
+static int ipu6_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+				void *cpu_addr, dma_addr_t handle, size_t size,
+				unsigned long attrs)
+{
+	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+	struct vm_info *info;
+	int n_pages;
+	int ret = 0;
+
+	info = get_vm_info(mmu, handle);
+	if (!info)
+		return -EFAULT;
+
+	if (!info->vaddr)
+		return -EFAULT;
+
+	if (WARN_ON(!info->pages))
+		return -ENOMEM;
+
+	n_pages = PHYS_PFN(PAGE_ALIGN(size));
+
+	ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
+					GFP_KERNEL);
+	if (ret)
+		dev_warn(dev, "IPU6 get sgt table failed\n");
+
+	return ret;
+}
+
+const struct dma_map_ops ipu6_dma_ops = {
+	.alloc = ipu6_dma_alloc,
+	.free = ipu6_dma_free,
+	.mmap = ipu6_dma_mmap,
+	.map_sg = ipu6_dma_map_sg,
+	.unmap_sg = ipu6_dma_unmap_sg,
+	.sync_single_for_cpu = ipu6_dma_sync_single_for_cpu,
+	.sync_single_for_device = ipu6_dma_sync_single_for_cpu,
+	.sync_sg_for_cpu = ipu6_dma_sync_sg_for_cpu,
+	.sync_sg_for_device = ipu6_dma_sync_sg_for_cpu,
+	.get_sgtable = ipu6_dma_get_sgtable,
+};
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.h b/drivers/media/pci/intel/ipu6/ipu6-dma.h
new file mode 100644
index 000000000000..c75ad2462368
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_DMA_H
+#define IPU6_DMA_H
+
+#include <linux/dma-map-ops.h>
+#include <linux/iova.h>
+
+struct ipu6_mmu_info;
+
+struct ipu6_dma_mapping {
+	struct ipu6_mmu_info *mmu_info;
+	struct iova_domain iovad;
+};
+
+extern const struct dma_map_ops ipu6_dma_ops;
+
+#endif /* IPU6_DMA_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
new file mode 100644
index 000000000000..dc16d45187a8
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+#include <asm/barrier.h>
+
+#include <linux/align.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/iova.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "ipu6.h"
+#include "ipu6-dma.h"
+#include "ipu6-mmu.h"
+#include "ipu6-platform-regs.h"
+
+#define ISP_PAGE_SHIFT		12
+#define ISP_PAGE_SIZE		BIT(ISP_PAGE_SHIFT)
+#define ISP_PAGE_MASK		(~(ISP_PAGE_SIZE - 1))
+
+#define ISP_L1PT_SHIFT		22
+#define ISP_L1PT_MASK		(~((1U << ISP_L1PT_SHIFT) - 1))
+
+#define ISP_L2PT_SHIFT		12
+#define ISP_L2PT_MASK		(~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
+
+#define ISP_L1PT_PTES           1024
+#define ISP_L2PT_PTES           1024
+
+#define ISP_PADDR_SHIFT		12
+
+#define REG_TLB_INVALIDATE	0x0000
+
+#define REG_L1_PHYS		0x0004	/* 27-bit pfn */
+#define REG_INFO		0x0008
+
+#define TBL_PHYS_ADDR(a)	((phys_addr_t)(a) << ISP_PADDR_SHIFT)
+
+static void tlb_invalidate(struct ipu6_mmu *mmu)
+{
+	unsigned long flags;
+	unsigned int i;
+
+	spin_lock_irqsave(&mmu->ready_lock, flags);
+	if (!mmu->ready) {
+		spin_unlock_irqrestore(&mmu->ready_lock, flags);
+		return;
+	}
+
+	for (i = 0; i < mmu->nr_mmus; i++) {
+		/*
+		 * To avoid the HW bug induced dead lock in some of the IPU6
+		 * MMUs on successive invalidate calls, we need to first do a
+		 * read to the page table base before writing the invalidate
+		 * register. MMUs which need to implement this WA, will have
+		 * the insert_read_before_invalidate flags set as true.
+		 * Disregard the return value of the read.
+		 */
+		if (mmu->mmu_hw[i].insert_read_before_invalidate)
+			readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+		writel(0xffffffff, mmu->mmu_hw[i].base +
+		       REG_TLB_INVALIDATE);
+		/*
+		 * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
+		 * When the actual MMIO write reaches the IPU6 TLB Invalidate
+		 * register, wmb() will force the TLB invalidate out if the CPU
+		 * attempts to update the IOMMU page table (or sooner).
+		 */
+		wmb();
+	}
+	spin_unlock_irqrestore(&mmu->ready_lock, flags);
+}
+
+#ifdef DEBUG
+static void page_table_dump(struct ipu6_mmu_info *mmu_info)
+{
+	u32 l1_idx;
+
+	dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n");
+
+	for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+		u32 l2_idx;
+		u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
+
+		if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval)
+			continue;
+		dev_dbg(mmu_info->dev,
+			"l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pa\n",
+			l1_idx, iova, iova + ISP_PAGE_SIZE,
+			TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]));
+
+		for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
+			u32 *l2_pt = mmu_info->l2_pts[l1_idx];
+			u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT);
+
+			if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval)
+				continue;
+
+			dev_dbg(mmu_info->dev,
+				"\tl2 entry %u; iova 0x%8.8x, phys %pa\n",
+				l2_idx, iova2,
+				TBL_PHYS_ADDR(l2_pt[l2_idx]));
+		}
+	}
+
+	dev_dbg(mmu_info->dev, "end IOMMU page table dump\n");
+}
+#endif /* DEBUG */
+
+static dma_addr_t map_single(struct ipu6_mmu_info *mmu_info, void *ptr)
+{
+	dma_addr_t dma;
+
+	dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(mmu_info->dev, dma))
+		return 0;
+
+	return dma;
+}
+
+static int get_dummy_page(struct ipu6_mmu_info *mmu_info)
+{
+	void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+	dma_addr_t dma;
+
+	if (!pt)
+		return -ENOMEM;
+
+	dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt);
+
+	dma = map_single(mmu_info, pt);
+	if (!dma) {
+		dev_err(mmu_info->dev, "Failed to map dummy page\n");
+		goto err_free_page;
+	}
+
+	mmu_info->dummy_page = pt;
+	mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT;
+
+	return 0;
+
+err_free_page:
+	free_page((unsigned long)pt);
+	return -ENOMEM;
+}
+
+static void free_dummy_page(struct ipu6_mmu_info *mmu_info)
+{
+	dma_unmap_single(mmu_info->dev,
+			 TBL_PHYS_ADDR(mmu_info->dummy_page_pteval),
+			 PAGE_SIZE, DMA_BIDIRECTIONAL);
+	free_page((unsigned long)mmu_info->dummy_page);
+}
+
+static int alloc_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
+{
+	u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+	dma_addr_t dma;
+	unsigned int i;
+
+	if (!pt)
+		return -ENOMEM;
+
+	dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt);
+
+	dma = map_single(mmu_info, pt);
+	if (!dma) {
+		dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+		goto err_free_page;
+	}
+
+	for (i = 0; i < ISP_L2PT_PTES; i++)
+		pt[i] = mmu_info->dummy_page_pteval;
+
+	mmu_info->dummy_l2_pt = pt;
+	mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT;
+
+	return 0;
+
+err_free_page:
+	free_page((unsigned long)pt);
+	return -ENOMEM;
+}
+
+static void free_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
+{
+	dma_unmap_single(mmu_info->dev,
+			 TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval),
+			 PAGE_SIZE, DMA_BIDIRECTIONAL);
+	free_page((unsigned long)mmu_info->dummy_l2_pt);
+}
+
+static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info)
+{
+	u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+	dma_addr_t dma;
+	unsigned int i;
+
+	if (!pt)
+		return NULL;
+
+	dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt);
+
+	for (i = 0; i < ISP_L1PT_PTES; i++)
+		pt[i] = mmu_info->dummy_l2_pteval;
+
+	dma = map_single(mmu_info, pt);
+	if (!dma) {
+		dev_err(mmu_info->dev, "Failed to map l1pt page\n");
+		goto err_free_page;
+	}
+
+	mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
+	dev_dbg(mmu_info->dev, "l1 pt %p mapped at %llx\n", pt, dma);
+
+	return pt;
+
+err_free_page:
+	free_page((unsigned long)pt);
+	return NULL;
+}
+
+static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
+{
+	u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+	unsigned int i;
+
+	if (!pt)
+		return NULL;
+
+	dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt);
+
+	for (i = 0; i < ISP_L1PT_PTES; i++)
+		pt[i] = mmu_info->dummy_page_pteval;
+
+	return pt;
+}
+
+static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+		  phys_addr_t paddr, size_t size)
+{
+	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+	u32 iova_start = iova;
+	u32 *l2_pt, *l2_virt;
+	unsigned int l2_idx;
+	unsigned long flags;
+	dma_addr_t dma;
+	u32 l1_entry;
+
+	dev_dbg(mmu_info->dev,
+		"mapping l2 page table for l1 index %u (iova %8.8x)\n",
+		l1_idx, (u32)iova);
+
+	spin_lock_irqsave(&mmu_info->lock, flags);
+	l1_entry = mmu_info->l1_pt[l1_idx];
+	if (l1_entry == mmu_info->dummy_l2_pteval) {
+		l2_virt = mmu_info->l2_pts[l1_idx];
+		if (likely(!l2_virt)) {
+			l2_virt = alloc_l2_pt(mmu_info);
+			if (!l2_virt) {
+				spin_unlock_irqrestore(&mmu_info->lock, flags);
+				return -ENOMEM;
+			}
+		}
+
+		dma = map_single(mmu_info, l2_virt);
+		if (!dma) {
+			dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+			free_page((unsigned long)l2_virt);
+			spin_unlock_irqrestore(&mmu_info->lock, flags);
+			return -EINVAL;
+		}
+
+		l1_entry = dma >> ISP_PADDR_SHIFT;
+
+		dev_dbg(mmu_info->dev, "page for l1_idx %u %p allocated\n",
+			l1_idx, l2_virt);
+		mmu_info->l1_pt[l1_idx] = l1_entry;
+		mmu_info->l2_pts[l1_idx] = l2_virt;
+		clflush_cache_range((void *)&mmu_info->l1_pt[l1_idx],
+				    sizeof(mmu_info->l1_pt[l1_idx]));
+	}
+
+	l2_pt = mmu_info->l2_pts[l1_idx];
+
+	dev_dbg(mmu_info->dev, "l2_pt at %p with dma 0x%x\n", l2_pt, l1_entry);
+
+	paddr = ALIGN(paddr, ISP_PAGE_SIZE);
+
+	l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+
+	dev_dbg(mmu_info->dev, "l2_idx %u, phys 0x%8.8x\n", l2_idx,
+		l2_pt[l2_idx]);
+	if (l2_pt[l2_idx] != mmu_info->dummy_page_pteval) {
+		spin_unlock_irqrestore(&mmu_info->lock, flags);
+		return -EINVAL;
+	}
+
+	l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
+
+	clflush_cache_range((void *)&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
+	spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+	dev_dbg(mmu_info->dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx,
+		l2_pt[l2_idx]);
+
+	return 0;
+}
+
+static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+			  phys_addr_t paddr, size_t size)
+{
+	u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
+	u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
+
+	dev_dbg(mmu_info->dev,
+		"mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
+		iova_start, iova_end, size, paddr);
+
+	return l2_map(mmu_info, iova_start, paddr, size);
+}
+
+static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+		       phys_addr_t dummy, size_t size)
+{
+	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+	u32 iova_start = iova;
+	unsigned int l2_idx;
+	size_t unmapped = 0;
+	unsigned long flags;
+	u32 *l2_pt;
+
+	dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
+		l1_idx, iova);
+
+	spin_lock_irqsave(&mmu_info->lock, flags);
+	if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
+		spin_unlock_irqrestore(&mmu_info->lock, flags);
+		dev_err(mmu_info->dev,
+			"unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
+			iova, l1_idx);
+		return 0;
+	}
+
+	for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+	     (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
+		     < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+		l2_pt = mmu_info->l2_pts[l1_idx];
+		dev_dbg(mmu_info->dev,
+			"unmap l2 index %u with pteval 0x%10.10llx\n",
+			l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
+		l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
+
+		clflush_cache_range((void *)&l2_pt[l2_idx],
+				    sizeof(l2_pt[l2_idx]));
+		unmapped++;
+	}
+	spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+	return unmapped << ISP_PAGE_SHIFT;
+}
+
+static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
+			       unsigned long iova, size_t size)
+{
+	return l2_unmap(mmu_info, iova, 0, size);
+}
+
+static int allocate_trash_buffer(struct ipu6_mmu *mmu)
+{
+	unsigned int n_pages = PHYS_PFN(PAGE_ALIGN(IPU6_MMUV2_TRASH_RANGE));
+	struct iova *iova;
+	unsigned int i;
+	dma_addr_t dma;
+	unsigned long iova_addr;
+	int ret;
+
+	/* Allocate 8MB in iova range */
+	iova = alloc_iova(&mmu->dmap->iovad, n_pages,
+			  PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
+	if (!iova) {
+		dev_err(mmu->dev, "cannot allocate iova range for trash\n");
+		return -ENOMEM;
+	}
+
+	dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
+			   PAGE_SIZE, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
+		dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
+		ret = -ENOMEM;
+		goto out_free_iova;
+	}
+
+	mmu->pci_trash_page = dma;
+
+	/*
+	 * Map the 8MB iova address range to the same physical trash page
+	 * mmu->trash_page which is already reserved at the probe
+	 */
+	iova_addr = iova->pfn_lo;
+	for (i = 0; i < n_pages; i++) {
+		ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
+				   mmu->pci_trash_page, PAGE_SIZE);
+		if (ret) {
+			dev_err(mmu->dev,
+				"mapping trash buffer range failed\n");
+			goto out_unmap;
+		}
+
+		iova_addr++;
+	}
+
+	mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo);
+	dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
+		mmu->mmid, (unsigned int)mmu->iova_trash_page);
+	return 0;
+
+out_unmap:
+	ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+		       PFN_PHYS(iova_size(iova)));
+	dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
+		       PAGE_SIZE, DMA_BIDIRECTIONAL);
+out_free_iova:
+	__free_iova(&mmu->dmap->iovad, iova);
+	return ret;
+}
+
+int ipu6_mmu_hw_init(struct ipu6_mmu *mmu)
+{
+	struct ipu6_mmu_info *mmu_info;
+	unsigned long flags;
+	unsigned int i;
+
+	mmu_info = mmu->dmap->mmu_info;
+
+	/* Initialise the each MMU HW block */
+	for (i = 0; i < mmu->nr_mmus; i++) {
+		struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
+		unsigned int j;
+		u16 block_addr;
+
+		/* Write page table address per MMU */
+		writel((phys_addr_t)mmu_info->l1_pt_dma,
+		       mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+		/* Set info bits per MMU */
+		writel(mmu->mmu_hw[i].info_bits,
+		       mmu->mmu_hw[i].base + REG_INFO);
+
+		/* Configure MMU TLB stream configuration for L1 */
+		for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams;
+		     block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) {
+			if (block_addr > IPU6_MAX_LI_BLOCK_ADDR) {
+				dev_err(mmu->dev, "invalid L1 configuration\n");
+				return -EINVAL;
+			}
+
+			/* Write block start address for each streams */
+			writel(block_addr, mmu_hw->base +
+			       mmu_hw->l1_stream_id_reg_offset + 4 * j);
+		}
+
+		/* Configure MMU TLB stream configuration for L2 */
+		for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams;
+		     block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) {
+			if (block_addr > IPU6_MAX_L2_BLOCK_ADDR) {
+				dev_err(mmu->dev, "invalid L2 configuration\n");
+				return -EINVAL;
+			}
+
+			writel(block_addr, mmu_hw->base +
+			       mmu_hw->l2_stream_id_reg_offset + 4 * j);
+		}
+	}
+
+	if (!mmu->trash_page) {
+		int ret;
+
+		mmu->trash_page = alloc_page(GFP_KERNEL);
+		if (!mmu->trash_page) {
+			dev_err(mmu->dev, "insufficient memory for trash buffer\n");
+			return -ENOMEM;
+		}
+
+		ret = allocate_trash_buffer(mmu);
+		if (ret) {
+			__free_page(mmu->trash_page);
+			mmu->trash_page = NULL;
+			dev_err(mmu->dev, "trash buffer allocation failed\n");
+			return ret;
+		}
+	}
+
+	spin_lock_irqsave(&mmu->ready_lock, flags);
+	mmu->ready = true;
+	spin_unlock_irqrestore(&mmu->ready_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, INTEL_IPU6);
+
+static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp)
+{
+	struct ipu6_mmu_info *mmu_info;
+	int ret;
+
+	mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
+	if (!mmu_info)
+		return NULL;
+
+	mmu_info->aperture_start = 0;
+	mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
+					      IPU6_MMU_ADDR_BITS :
+					      IPU6_MMU_ADDR_BITS_NON_SECURE);
+	mmu_info->pgsize_bitmap = SZ_4K;
+	mmu_info->dev = &isp->pdev->dev;
+
+	ret = get_dummy_page(mmu_info);
+	if (ret)
+		goto err_free_info;
+
+	ret = alloc_dummy_l2_pt(mmu_info);
+	if (ret)
+		goto err_free_dummy_page;
+
+	mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts));
+	if (!mmu_info->l2_pts)
+		goto err_free_dummy_l2_pt;
+
+	/*
+	 * We always map the L1 page table (a single page as well as
+	 * the L2 page tables).
+	 */
+	mmu_info->l1_pt = alloc_l1_pt(mmu_info);
+	if (!mmu_info->l1_pt)
+		goto err_free_l2_pts;
+
+	spin_lock_init(&mmu_info->lock);
+
+	dev_dbg(mmu_info->dev, "domain initialised\n");
+
+	return mmu_info;
+
+err_free_l2_pts:
+	vfree(mmu_info->l2_pts);
+err_free_dummy_l2_pt:
+	free_dummy_l2_pt(mmu_info);
+err_free_dummy_page:
+	free_dummy_page(mmu_info);
+err_free_info:
+	kfree(mmu_info);
+
+	return NULL;
+}
+
+void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mmu->ready_lock, flags);
+	mmu->ready = false;
+	spin_unlock_irqrestore(&mmu->ready_lock, flags);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, INTEL_IPU6);
+
+static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp)
+{
+	struct ipu6_dma_mapping *dmap;
+
+	dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
+	if (!dmap)
+		return NULL;
+
+	dmap->mmu_info = ipu6_mmu_alloc(isp);
+	if (!dmap->mmu_info) {
+		kfree(dmap);
+		return NULL;
+	}
+
+	init_iova_domain(&dmap->iovad, SZ_4K, 1);
+	dmap->mmu_info->dmap = dmap;
+
+	dev_dbg(&isp->pdev->dev, "alloc mapping\n");
+
+	iova_cache_get();
+
+	return dmap;
+}
+
+phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
+				  dma_addr_t iova)
+{
+	phys_addr_t phy_addr;
+	unsigned long flags;
+	u32 *l2_pt;
+
+	spin_lock_irqsave(&mmu_info->lock, flags);
+	l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT];
+	phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT];
+	phy_addr <<= ISP_PAGE_SHIFT;
+	spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+	return phy_addr;
+}
+
+static size_t ipu6_mmu_pgsize(unsigned long pgsize_bitmap,
+			      unsigned long addr_merge, size_t size)
+{
+	unsigned int pgsize_idx;
+	size_t pgsize;
+
+	/* Max page size that still fits into 'size' */
+	pgsize_idx = __fls(size);
+
+	if (likely(addr_merge)) {
+		/* Max page size allowed by address */
+		unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+	}
+
+	pgsize = (1UL << (pgsize_idx + 1)) - 1;
+	pgsize &= pgsize_bitmap;
+
+	WARN_ON(!pgsize);
+
+	/* pick the biggest page */
+	pgsize_idx = __fls(pgsize);
+	pgsize = 1UL << pgsize_idx;
+
+	return pgsize;
+}
+
+size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+		      size_t size)
+{
+	size_t unmapped_page, unmapped = 0;
+	unsigned int min_pagesz;
+
+	/* find out the minimum page size supported */
+	min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+	/*
+	 * The virtual address and the size of the mapping must be
+	 * aligned (at least) to the size of the smallest page supported
+	 * by the hardware
+	 */
+	if (!IS_ALIGNED(iova | size, min_pagesz)) {
+		dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+			iova, size, min_pagesz);
+		return -EINVAL;
+	}
+
+	/*
+	 * Keep iterating until we either unmap 'size' bytes (or more)
+	 * or we hit an area that isn't mapped.
+	 */
+	while (unmapped < size) {
+		size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap,
+						iova, size - unmapped);
+
+		unmapped_page = __ipu6_mmu_unmap(mmu_info, iova, pgsize);
+		if (!unmapped_page)
+			break;
+
+		dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n",
+			iova, unmapped_page);
+
+		iova += unmapped_page;
+		unmapped += unmapped_page;
+	}
+
+	return unmapped;
+}
+
+int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+		 phys_addr_t paddr, size_t size)
+{
+	unsigned long orig_iova = iova;
+	unsigned int min_pagesz;
+	size_t orig_size = size;
+	int ret = 0;
+
+	if (mmu_info->pgsize_bitmap == 0UL)
+		return -ENODEV;
+
+	/* find out the minimum page size supported */
+	min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+	/*
+	 * both the virtual address and the physical one, as well as
+	 * the size of the mapping, must be aligned (at least) to the
+	 * size of the smallest page supported by the hardware
+	 */
+	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+		dev_err(mmu_info->dev,
+			"unaligned: iova %lx pa %pa size %zx min_pagesz %x\n",
+			iova, &paddr, size, min_pagesz);
+		return -EINVAL;
+	}
+
+	dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
+		iova, &paddr, size);
+
+	while (size) {
+		size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap,
+						iova | paddr, size);
+
+		dev_dbg(mmu_info->dev,
+			"mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
+			iova, &paddr, pgsize);
+
+		ret = __ipu6_mmu_map(mmu_info, iova, paddr, pgsize);
+		if (ret)
+			break;
+
+		iova += pgsize;
+		paddr += pgsize;
+		size -= pgsize;
+	}
+
+	/* unroll mapping in case something went wrong */
+	if (ret)
+		ipu6_mmu_unmap(mmu_info, orig_iova, orig_size - size);
+
+	return ret;
+}
+
+static void ipu6_mmu_destroy(struct ipu6_mmu *mmu)
+{
+	struct ipu6_dma_mapping *dmap = mmu->dmap;
+	struct ipu6_mmu_info *mmu_info = dmap->mmu_info;
+	struct iova *iova;
+	u32 l1_idx;
+
+	if (mmu->iova_trash_page) {
+		iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page));
+		if (iova) {
+			/* unmap and free the trash buffer iova */
+			ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo),
+				       PFN_PHYS(iova_size(iova)));
+			__free_iova(&dmap->iovad, iova);
+		} else {
+			dev_err(mmu->dev, "trash buffer iova not found.\n");
+		}
+
+		mmu->iova_trash_page = 0;
+		dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
+			       PAGE_SIZE, DMA_BIDIRECTIONAL);
+		mmu->pci_trash_page = 0;
+		__free_page(mmu->trash_page);
+	}
+
+	for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+		if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) {
+			dma_unmap_single(mmu_info->dev,
+					 TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]),
+					 PAGE_SIZE, DMA_BIDIRECTIONAL);
+			free_page((unsigned long)mmu_info->l2_pts[l1_idx]);
+		}
+	}
+
+	vfree(mmu_info->l2_pts);
+	free_dummy_page(mmu_info);
+	dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma),
+			 PAGE_SIZE, DMA_BIDIRECTIONAL);
+	free_page((unsigned long)mmu_info->dummy_l2_pt);
+	free_page((unsigned long)mmu_info->l1_pt);
+	kfree(mmu_info);
+}
+
+struct ipu6_mmu *ipu6_mmu_init(struct device *dev,
+			       void __iomem *base, int mmid,
+			       const struct ipu6_hw_variants *hw)
+{
+	struct ipu6_device *isp = pci_get_drvdata(to_pci_dev(dev));
+	struct ipu6_mmu_pdata *pdata;
+	struct ipu6_mmu *mmu;
+	unsigned int i;
+
+	if (hw->nr_mmus > IPU6_MMU_MAX_DEVICES)
+		return ERR_PTR(-EINVAL);
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < hw->nr_mmus; i++) {
+		struct ipu6_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
+		const struct ipu6_mmu_hw *src_mmu = &hw->mmu_hw[i];
+
+		if (src_mmu->nr_l1streams > IPU6_MMU_MAX_TLB_L1_STREAMS ||
+		    src_mmu->nr_l2streams > IPU6_MMU_MAX_TLB_L2_STREAMS)
+			return ERR_PTR(-EINVAL);
+
+		*pdata_mmu = *src_mmu;
+		pdata_mmu->base = base + src_mmu->offset;
+	}
+
+	mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
+	if (!mmu)
+		return ERR_PTR(-ENOMEM);
+
+	mmu->mmid = mmid;
+	mmu->mmu_hw = pdata->mmu_hw;
+	mmu->nr_mmus = hw->nr_mmus;
+	mmu->tlb_invalidate = tlb_invalidate;
+	mmu->ready = false;
+	INIT_LIST_HEAD(&mmu->vma_list);
+	spin_lock_init(&mmu->ready_lock);
+
+	mmu->dmap = alloc_dma_mapping(isp);
+	if (!mmu->dmap) {
+		dev_err(dev, "can't alloc dma mapping\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return mmu;
+}
+
+void ipu6_mmu_cleanup(struct ipu6_mmu *mmu)
+{
+	struct ipu6_dma_mapping *dmap = mmu->dmap;
+
+	ipu6_mmu_destroy(mmu);
+	mmu->dmap = NULL;
+	iova_cache_put();
+	put_iova_domain(&dmap->iovad);
+	kfree(dmap);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/drivers/media/pci/intel/ipu6/ipu6-mmu.h
new file mode 100644
index 000000000000..95df7931a2e5
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_MMU_H
+#define IPU6_MMU_H
+
+#define ISYS_MMID 1
+#define PSYS_MMID 0
+
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+struct device;
+struct page;
+struct ipu6_hw_variants;
+
+struct ipu6_mmu_info {
+	struct device *dev;
+
+	u32 *l1_pt;
+	u32 l1_pt_dma;
+	u32 **l2_pts;
+
+	u32 *dummy_l2_pt;
+	u32 dummy_l2_pteval;
+	void *dummy_page;
+	u32 dummy_page_pteval;
+
+	dma_addr_t aperture_start;
+	dma_addr_t aperture_end;
+	unsigned long pgsize_bitmap;
+
+	spinlock_t lock;	/* Serialize access to users */
+	struct ipu6_dma_mapping *dmap;
+};
+
+struct ipu6_mmu {
+	struct list_head node;
+
+	struct ipu6_mmu_hw *mmu_hw;
+	unsigned int nr_mmus;
+	unsigned int mmid;
+
+	phys_addr_t pgtbl;
+	struct device *dev;
+
+	struct ipu6_dma_mapping *dmap;
+	struct list_head vma_list;
+
+	struct page *trash_page;
+	dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */
+	dma_addr_t iova_trash_page; /* IOVA for IPU6 child nodes to use */
+
+	bool ready;
+	spinlock_t ready_lock;	/* Serialize access to bool ready */
+
+	void (*tlb_invalidate)(struct ipu6_mmu *mmu);
+};
+
+struct ipu6_mmu *ipu6_mmu_init(struct device *dev,
+			       void __iomem *base, int mmid,
+			       const struct ipu6_hw_variants *hw);
+void ipu6_mmu_cleanup(struct ipu6_mmu *mmu);
+int ipu6_mmu_hw_init(struct ipu6_mmu *mmu);
+void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu);
+int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+		 phys_addr_t paddr, size_t size);
+size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+		      size_t size);
+phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
+				  dma_addr_t iova);
+#endif
-- 
2.43.0

From 6e686c2ff23611004d6e4f9a543116d65cbcf5f3 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:20 +0800
Subject: [PATCH 06/31] media: intel/ipu6: add syscom interfaces between
 firmware and driver

Syscom is an inter-process(or) communication mechanism between an IPU
and host. Syscom uses message queues for message exchange between IPU
and host. Each message queue has its consumer and producer, host queue
messages to firmware as the producer and then firmware to dequeue the
messages as consumer and vice versa. IPU and host use shared registers
or memory to reside the read and write indices which are updated by
consumer and producer.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-7-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/pci/intel/ipu6/ipu6-fw-com.c | 413 +++++++++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6-fw-com.h |  47 +++
 2 files changed, 460 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-com.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-com.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/drivers/media/pci/intel/ipu6/ipu6-fw-com.c
new file mode 100644
index 000000000000..0f893f44e04c
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-fw-com.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-fw-com.h"
+
+/*
+ * FWCOM layer is a shared resource between FW and driver. It consist
+ * of token queues to both send and receive directions. Queue is simply
+ * an array of structures with read and write indexes to the queue.
+ * There are 1...n queues to both directions. Queues locates in
+ * system RAM and are mapped to ISP MMU so that both CPU and ISP can
+ * see the same buffer. Indexes are located in ISP DMEM so that FW code
+ * can poll those with very low latency and cost. CPU access to indexes is
+ * more costly but that happens only at message sending time and
+ * interrupt triggered message handling. CPU doesn't need to poll indexes.
+ * wr_reg / rd_reg are offsets to those dmem location. They are not
+ * the indexes itself.
+ */
+
+/* Shared structure between driver and FW - do not modify */
+struct ipu6_fw_sys_queue {
+	u64 host_address;
+	u32 vied_address;
+	u32 size;
+	u32 token_size;
+	u32 wr_reg;	/* reg number in subsystem's regmem */
+	u32 rd_reg;
+	u32 _align;
+} __packed;
+
+struct ipu6_fw_sys_queue_res {
+	u64 host_address;
+	u32 vied_address;
+	u32 reg;
+} __packed;
+
+enum syscom_state {
+	/* Program load or explicit host setting should init to this */
+	SYSCOM_STATE_UNINIT = 0x57a7e000,
+	/* SP Syscom sets this when it is ready for use */
+	SYSCOM_STATE_READY = 0x57a7e001,
+	/* SP Syscom sets this when no more syscom accesses will happen */
+	SYSCOM_STATE_INACTIVE = 0x57a7e002,
+};
+
+enum syscom_cmd {
+	/* Program load or explicit host setting should init to this */
+	SYSCOM_COMMAND_UNINIT = 0x57a7f000,
+	/* Host Syscom requests syscom to become inactive */
+	SYSCOM_COMMAND_INACTIVE = 0x57a7f001,
+};
+
+/* firmware config: data that sent from the host to SP via DDR */
+/* Cell copies data into a context */
+
+struct ipu6_fw_syscom_config {
+	u32 firmware_address;
+
+	u32 num_input_queues;
+	u32 num_output_queues;
+
+	/* ISP pointers to an array of ipu6_fw_sys_queue structures */
+	u32 input_queue;
+	u32 output_queue;
+
+	/* ISYS / PSYS private data */
+	u32 specific_addr;
+	u32 specific_size;
+};
+
+struct ipu6_fw_com_context {
+	struct ipu6_bus_device *adev;
+	void __iomem *dmem_addr;
+	int (*cell_ready)(struct ipu6_bus_device *adev);
+	void (*cell_start)(struct ipu6_bus_device *adev);
+
+	void *dma_buffer;
+	dma_addr_t dma_addr;
+	unsigned int dma_size;
+	unsigned long attrs;
+
+	struct ipu6_fw_sys_queue *input_queue;	/* array of host to SP queues */
+	struct ipu6_fw_sys_queue *output_queue;	/* array of SP to host */
+
+	u32 config_vied_addr;
+
+	unsigned int buttress_boot_offset;
+	void __iomem *base_addr;
+};
+
+#define FW_COM_WR_REG 0
+#define FW_COM_RD_REG 4
+
+#define REGMEM_OFFSET 0
+#define TUNIT_MAGIC_PATTERN 0x5a5a5a5a
+
+enum regmem_id {
+	/* pass pkg_dir address to SPC in non-secure mode */
+	PKG_DIR_ADDR_REG = 0,
+	/* Tunit CFG blob for secure - provided by host.*/
+	TUNIT_CFG_DWR_REG = 1,
+	/* syscom commands - modified by the host */
+	SYSCOM_COMMAND_REG = 2,
+	/* Store interrupt status - updated by SP */
+	SYSCOM_IRQ_REG = 3,
+	/* first syscom queue pointer register */
+	SYSCOM_QPR_BASE_REG = 4
+};
+
+#define BUTTRESS_FW_BOOT_PARAMS_0 0x4000
+#define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id)			\
+	((base) + BUTTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4)
+
+enum buttress_syscom_id {
+	/* pass syscom configuration to SPC */
+	SYSCOM_CONFIG_ID		= 0,
+	/* syscom state - modified by SP */
+	SYSCOM_STATE_ID			= 1,
+	/* syscom vtl0 addr mask */
+	SYSCOM_VTL0_ADDR_MASK_ID	= 2,
+	SYSCOM_ID_MAX
+};
+
+static void ipu6_sys_queue_init(struct ipu6_fw_sys_queue *q, unsigned int size,
+				unsigned int token_size,
+				struct ipu6_fw_sys_queue_res *res)
+{
+	unsigned int buf_size = (size + 1) * token_size;
+
+	q->size = size + 1;
+	q->token_size = token_size;
+
+	/* acquire the shared buffer space */
+	q->host_address = res->host_address;
+	res->host_address += buf_size;
+	q->vied_address = res->vied_address;
+	res->vied_address += buf_size;
+
+	/* acquire the shared read and writer pointers */
+	q->wr_reg = res->reg;
+	res->reg++;
+	q->rd_reg = res->reg;
+	res->reg++;
+}
+
+void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg,
+			  struct ipu6_bus_device *adev, void __iomem *base)
+{
+	size_t conf_size, inq_size, outq_size, specific_size;
+	struct ipu6_fw_syscom_config *config_host_addr;
+	unsigned int sizeinput = 0, sizeoutput = 0;
+	struct ipu6_fw_sys_queue_res res;
+	struct ipu6_fw_com_context *ctx;
+	struct device *dev = &adev->auxdev.dev;
+	size_t sizeall, offset;
+	unsigned long attrs = 0;
+	void *specific_host_addr;
+	unsigned int i;
+
+	if (!cfg || !cfg->cell_start || !cfg->cell_ready)
+		return NULL;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return NULL;
+	ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET;
+	ctx->adev = adev;
+	ctx->cell_start = cfg->cell_start;
+	ctx->cell_ready = cfg->cell_ready;
+	ctx->buttress_boot_offset = cfg->buttress_boot_offset;
+	ctx->base_addr  = base;
+
+	/*
+	 * Allocate DMA mapped memory. Allocate one big chunk.
+	 */
+	/* Base cfg for FW */
+	conf_size = roundup(sizeof(struct ipu6_fw_syscom_config), 8);
+	/* Descriptions of the queues */
+	inq_size = size_mul(cfg->num_input_queues,
+			    sizeof(struct ipu6_fw_sys_queue));
+	outq_size = size_mul(cfg->num_output_queues,
+			     sizeof(struct ipu6_fw_sys_queue));
+	/* FW specific information structure */
+	specific_size = roundup(cfg->specific_size, 8);
+
+	sizeall = conf_size + inq_size + outq_size + specific_size;
+
+	for (i = 0; i < cfg->num_input_queues; i++)
+		sizeinput += size_mul(cfg->input[i].queue_size + 1,
+				      cfg->input[i].token_size);
+
+	for (i = 0; i < cfg->num_output_queues; i++)
+		sizeoutput += size_mul(cfg->output[i].queue_size + 1,
+				       cfg->output[i].token_size);
+
+	sizeall += sizeinput + sizeoutput;
+
+	ctx->dma_buffer = dma_alloc_attrs(dev, sizeall, &ctx->dma_addr,
+					  GFP_KERNEL, attrs);
+	ctx->attrs = attrs;
+	if (!ctx->dma_buffer) {
+		dev_err(dev, "failed to allocate dma memory\n");
+		kfree(ctx);
+		return NULL;
+	}
+
+	ctx->dma_size = sizeall;
+
+	config_host_addr = ctx->dma_buffer;
+	ctx->config_vied_addr = ctx->dma_addr;
+
+	offset = conf_size;
+	ctx->input_queue = ctx->dma_buffer + offset;
+	config_host_addr->input_queue = ctx->dma_addr + offset;
+	config_host_addr->num_input_queues = cfg->num_input_queues;
+
+	offset += inq_size;
+	ctx->output_queue = ctx->dma_buffer + offset;
+	config_host_addr->output_queue = ctx->dma_addr + offset;
+	config_host_addr->num_output_queues = cfg->num_output_queues;
+
+	/* copy firmware specific data */
+	offset += outq_size;
+	specific_host_addr = ctx->dma_buffer + offset;
+	config_host_addr->specific_addr = ctx->dma_addr + offset;
+	config_host_addr->specific_size = cfg->specific_size;
+	if (cfg->specific_addr && cfg->specific_size)
+		memcpy(specific_host_addr, cfg->specific_addr,
+		       cfg->specific_size);
+
+	/* initialize input queues */
+	offset += specific_size;
+	res.reg = SYSCOM_QPR_BASE_REG;
+	res.host_address = (u64)(ctx->dma_buffer + offset);
+	res.vied_address = ctx->dma_addr + offset;
+	for (i = 0; i < cfg->num_input_queues; i++)
+		ipu6_sys_queue_init(ctx->input_queue + i,
+				    cfg->input[i].queue_size,
+				    cfg->input[i].token_size, &res);
+
+	/* initialize output queues */
+	offset += sizeinput;
+	res.host_address = (u64)(ctx->dma_buffer + offset);
+	res.vied_address = ctx->dma_addr + offset;
+	for (i = 0; i < cfg->num_output_queues; i++) {
+		ipu6_sys_queue_init(ctx->output_queue + i,
+				    cfg->output[i].queue_size,
+				    cfg->output[i].token_size, &res);
+	}
+
+	return ctx;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, INTEL_IPU6);
+
+int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx)
+{
+	/* write magic pattern to disable the tunit trace */
+	writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4);
+	/* Check if SP is in valid state */
+	if (!ctx->cell_ready(ctx->adev))
+		return -EIO;
+
+	/* store syscom uninitialized command */
+	writel(SYSCOM_COMMAND_UNINIT, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4);
+
+	/* store syscom uninitialized state */
+	writel(SYSCOM_STATE_UNINIT,
+	       BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+					  ctx->buttress_boot_offset,
+					  SYSCOM_STATE_ID));
+
+	/* store firmware configuration address */
+	writel(ctx->config_vied_addr,
+	       BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+					  ctx->buttress_boot_offset,
+					  SYSCOM_CONFIG_ID));
+	ctx->cell_start(ctx->adev);
+
+	return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, INTEL_IPU6);
+
+int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx)
+{
+	int state;
+
+	state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+						 ctx->buttress_boot_offset,
+						 SYSCOM_STATE_ID));
+	if (state != SYSCOM_STATE_READY)
+		return -EBUSY;
+
+	/* set close request flag */
+	writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr +
+	       SYSCOM_COMMAND_REG * 4);
+
+	return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, INTEL_IPU6);
+
+int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force)
+{
+	/* check if release is forced, an verify cell state if it is not */
+	if (!force && !ctx->cell_ready(ctx->adev))
+		return -EBUSY;
+
+	dma_free_attrs(&ctx->adev->auxdev.dev, ctx->dma_size,
+		       ctx->dma_buffer, ctx->dma_addr, ctx->attrs);
+	kfree(ctx);
+	return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, INTEL_IPU6);
+
+bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx)
+{
+	int state;
+
+	state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+						 ctx->buttress_boot_offset,
+						 SYSCOM_STATE_ID));
+
+	return state == SYSCOM_STATE_READY;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, INTEL_IPU6);
+
+void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr)
+{
+	struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr];
+	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+	unsigned int wr, rd;
+	unsigned int packets;
+	unsigned int index;
+
+	wr = readl(q_dmem + FW_COM_WR_REG);
+	rd = readl(q_dmem + FW_COM_RD_REG);
+
+	if (WARN_ON_ONCE(wr >= q->size || rd >= q->size))
+		return NULL;
+
+	if (wr < rd)
+		packets = rd - wr - 1;
+	else
+		packets = q->size - (wr - rd + 1);
+
+	if (!packets)
+		return NULL;
+
+	index = readl(q_dmem + FW_COM_WR_REG);
+
+	return (void *)(q->host_address + index * q->token_size);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, INTEL_IPU6);
+
+void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr)
+{
+	struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr];
+	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+	unsigned int wr = readl(q_dmem + FW_COM_WR_REG) + 1;
+
+	if (wr >= q->size)
+		wr = 0;
+
+	writel(wr, q_dmem + FW_COM_WR_REG);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, INTEL_IPU6);
+
+void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr)
+{
+	struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr];
+	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+	unsigned int wr, rd;
+	unsigned int packets;
+
+	wr = readl(q_dmem + FW_COM_WR_REG);
+	rd = readl(q_dmem + FW_COM_RD_REG);
+
+	if (WARN_ON_ONCE(wr >= q->size || rd >= q->size))
+		return NULL;
+
+	if (wr < rd)
+		wr += q->size;
+
+	packets = wr - rd;
+	if (!packets)
+		return NULL;
+
+	return (void *)(q->host_address + rd * q->token_size);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, INTEL_IPU6);
+
+void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr)
+{
+	struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr];
+	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+	unsigned int rd = readl(q_dmem + FW_COM_RD_REG) + 1;
+
+	if (rd >= q->size)
+		rd = 0;
+
+	writel(rd, q_dmem + FW_COM_RD_REG);
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, INTEL_IPU6);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-com.h b/drivers/media/pci/intel/ipu6/ipu6-fw-com.h
new file mode 100644
index 000000000000..660c406b3ac9
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-fw-com.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_FW_COM_H
+#define IPU6_FW_COM_H
+
+struct ipu6_fw_com_context;
+struct ipu6_bus_device;
+
+struct ipu6_fw_syscom_queue_config {
+	unsigned int queue_size;	/* tokens per queue */
+	unsigned int token_size;	/* bytes per token */
+};
+
+#define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET	0
+
+struct ipu6_fw_com_cfg {
+	unsigned int num_input_queues;
+	unsigned int num_output_queues;
+	struct ipu6_fw_syscom_queue_config *input;
+	struct ipu6_fw_syscom_queue_config *output;
+
+	unsigned int dmem_addr;
+
+	/* firmware-specific configuration data */
+	void *specific_addr;
+	unsigned int specific_size;
+	int (*cell_ready)(struct ipu6_bus_device *adev);
+	void (*cell_start)(struct ipu6_bus_device *adev);
+
+	unsigned int buttress_boot_offset;
+};
+
+void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg,
+			  struct ipu6_bus_device *adev, void __iomem *base);
+
+int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx);
+bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx);
+int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx);
+int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force);
+
+void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr);
+void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr);
+void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr);
+void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr);
+
+#endif
-- 
2.43.0

From 6784d60f5fe68eca8bfc92785b5badd0bf415048 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:21 +0800
Subject: [PATCH 07/31] media: intel/ipu6: input system ABI between firmware
 and driver

Implement the input system firmware ABIs between the firmware and
driver - include stream configuration, control command, capture
request and response, etc.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-8-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/pci/intel/ipu6/ipu6-fw-isys.c | 487 +++++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6-fw-isys.h | 573 ++++++++++++++++++++
 2 files changed, 1060 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-isys.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-isys.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c b/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c
new file mode 100644
index 000000000000..e06c1c955d38
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-fw-isys.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-fw-com.h"
+#include "ipu6-isys.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+#include "ipu6-platform-regs.h"
+
+static const char send_msg_types[N_IPU6_FW_ISYS_SEND_TYPE][32] = {
+	"STREAM_OPEN",
+	"STREAM_START",
+	"STREAM_START_AND_CAPTURE",
+	"STREAM_CAPTURE",
+	"STREAM_STOP",
+	"STREAM_FLUSH",
+	"STREAM_CLOSE"
+};
+
+static int handle_proxy_response(struct ipu6_isys *isys, unsigned int req_id)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct ipu6_fw_isys_proxy_resp_info_abi *resp;
+	int ret;
+
+	resp = ipu6_recv_get_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES);
+	if (!resp)
+		return 1;
+
+	dev_dbg(dev, "Proxy response: id %u, error %u, details %u\n",
+		resp->request_id, resp->error_info.error,
+		resp->error_info.error_details);
+
+	ret = req_id == resp->request_id ? 0 : -EIO;
+
+	ipu6_recv_put_token(isys->fwcom, IPU6_BASE_PROXY_RECV_QUEUES);
+
+	return ret;
+}
+
+int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys,
+				  unsigned int req_id,
+				  unsigned int index,
+				  unsigned int offset, u32 value)
+{
+	struct ipu6_fw_com_context *ctx = isys->fwcom;
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct ipu6_fw_proxy_send_queue_token *token;
+	unsigned int timeout = 1000;
+	int ret;
+
+	dev_dbg(dev,
+		"proxy send: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n",
+		req_id, index, offset, value);
+
+	token = ipu6_send_get_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES);
+	if (!token)
+		return -EBUSY;
+
+	token->request_id = req_id;
+	token->region_index = index;
+	token->offset = offset;
+	token->value = value;
+	ipu6_send_put_token(ctx, IPU6_BASE_PROXY_SEND_QUEUES);
+
+	do {
+		usleep_range(100, 110);
+		ret = handle_proxy_response(isys, req_id);
+		if (!ret)
+			break;
+		if (ret == -EIO) {
+			dev_err(dev, "Proxy respond with unexpected id\n");
+			break;
+		}
+		timeout--;
+	} while (ret && timeout);
+
+	if (!timeout)
+		dev_err(dev, "Proxy response timed out\n");
+
+	return ret;
+}
+
+int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys,
+			     const unsigned int stream_handle,
+			     void *cpu_mapped_buf,
+			     dma_addr_t dma_mapped_buf,
+			     size_t size, u16 send_type)
+{
+	struct ipu6_fw_com_context *ctx = isys->fwcom;
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct ipu6_fw_send_queue_token *token;
+
+	if (send_type >= N_IPU6_FW_ISYS_SEND_TYPE)
+		return -EINVAL;
+
+	dev_dbg(dev, "send_token: %s\n", send_msg_types[send_type]);
+
+	/*
+	 * Time to flush cache in case we have some payload. Not all messages
+	 * have that
+	 */
+	if (cpu_mapped_buf)
+		clflush_cache_range(cpu_mapped_buf, size);
+
+	token = ipu6_send_get_token(ctx,
+				    stream_handle + IPU6_BASE_MSG_SEND_QUEUES);
+	if (!token)
+		return -EBUSY;
+
+	token->payload = dma_mapped_buf;
+	token->buf_handle = (unsigned long)cpu_mapped_buf;
+	token->send_type = send_type;
+
+	ipu6_send_put_token(ctx, stream_handle + IPU6_BASE_MSG_SEND_QUEUES);
+
+	return 0;
+}
+
+int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys,
+			    const unsigned int stream_handle, u16 send_type)
+{
+	return ipu6_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0,
+					send_type);
+}
+
+int ipu6_fw_isys_close(struct ipu6_isys *isys)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	int retry = IPU6_ISYS_CLOSE_RETRY;
+	unsigned long flags;
+	void *fwcom;
+	int ret;
+
+	/*
+	 * Stop the isys fw. Actual close takes
+	 * some time as the FW must stop its actions including code fetch
+	 * to SP icache.
+	 * spinlock to wait the interrupt handler to be finished
+	 */
+	spin_lock_irqsave(&isys->power_lock, flags);
+	ret = ipu6_fw_com_close(isys->fwcom);
+	fwcom = isys->fwcom;
+	isys->fwcom = NULL;
+	spin_unlock_irqrestore(&isys->power_lock, flags);
+	if (ret)
+		dev_err(dev, "Device close failure: %d\n", ret);
+
+	/* release probably fails if the close failed. Let's try still */
+	do {
+		usleep_range(400, 500);
+		ret = ipu6_fw_com_release(fwcom, 0);
+		retry--;
+	} while (ret && retry);
+
+	if (ret) {
+		dev_err(dev, "Device release time out %d\n", ret);
+		spin_lock_irqsave(&isys->power_lock, flags);
+		isys->fwcom = fwcom;
+		spin_unlock_irqrestore(&isys->power_lock, flags);
+	}
+
+	return ret;
+}
+
+void ipu6_fw_isys_cleanup(struct ipu6_isys *isys)
+{
+	int ret;
+
+	ret = ipu6_fw_com_release(isys->fwcom, 1);
+	if (ret < 0)
+		dev_warn(&isys->adev->auxdev.dev,
+			 "Device busy, fw_com release failed.");
+	isys->fwcom = NULL;
+}
+
+static void start_sp(struct ipu6_bus_device *adev)
+{
+	struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+	void __iomem *spc_regs_base = isys->pdata->base +
+		isys->pdata->ipdata->hw_variant.spc_offset;
+	u32 val = IPU6_ISYS_SPC_STATUS_START |
+		IPU6_ISYS_SPC_STATUS_RUN |
+		IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+
+	val |= isys->icache_prefetch ? IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0;
+
+	writel(val, spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL);
+}
+
+static int query_sp(struct ipu6_bus_device *adev)
+{
+	struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+	void __iomem *spc_regs_base = isys->pdata->base +
+		isys->pdata->ipdata->hw_variant.spc_offset;
+	u32 val;
+
+	val = readl(spc_regs_base + IPU6_ISYS_REG_SPC_STATUS_CTRL);
+	/* return true when READY == 1, START == 0 */
+	val &= IPU6_ISYS_SPC_STATUS_READY | IPU6_ISYS_SPC_STATUS_START;
+
+	return val == IPU6_ISYS_SPC_STATUS_READY;
+}
+
+static int ipu6_isys_fwcom_cfg_init(struct ipu6_isys *isys,
+				    struct ipu6_fw_com_cfg *fwcom,
+				    unsigned int num_streams)
+{
+	unsigned int max_send_queues, max_sram_blocks, max_devq_size;
+	struct ipu6_fw_syscom_queue_config *input_queue_cfg;
+	struct ipu6_fw_syscom_queue_config *output_queue_cfg;
+	struct device *dev = &isys->adev->auxdev.dev;
+	int type_proxy = IPU6_FW_ISYS_QUEUE_TYPE_PROXY;
+	int type_dev = IPU6_FW_ISYS_QUEUE_TYPE_DEV;
+	int type_msg = IPU6_FW_ISYS_QUEUE_TYPE_MSG;
+	int base_dev_send = IPU6_BASE_DEV_SEND_QUEUES;
+	int base_msg_send = IPU6_BASE_MSG_SEND_QUEUES;
+	int base_msg_recv = IPU6_BASE_MSG_RECV_QUEUES;
+	struct ipu6_fw_isys_fw_config *isys_fw_cfg;
+	u32 num_in_message_queues;
+	unsigned int max_streams;
+	unsigned int size;
+	unsigned int i;
+
+	max_streams = isys->pdata->ipdata->max_streams;
+	max_send_queues = isys->pdata->ipdata->max_send_queues;
+	max_sram_blocks = isys->pdata->ipdata->max_sram_blocks;
+	max_devq_size = isys->pdata->ipdata->max_devq_size;
+	num_in_message_queues = clamp(num_streams, 1U, max_streams);
+	isys_fw_cfg = devm_kzalloc(dev, sizeof(*isys_fw_cfg), GFP_KERNEL);
+	if (!isys_fw_cfg)
+		return -ENOMEM;
+
+	isys_fw_cfg->num_send_queues[type_proxy] = IPU6_N_MAX_PROXY_SEND_QUEUES;
+	isys_fw_cfg->num_send_queues[type_dev] = IPU6_N_MAX_DEV_SEND_QUEUES;
+	isys_fw_cfg->num_send_queues[type_msg] = num_in_message_queues;
+	isys_fw_cfg->num_recv_queues[type_proxy] = IPU6_N_MAX_PROXY_RECV_QUEUES;
+	/* Common msg/dev return queue */
+	isys_fw_cfg->num_recv_queues[type_dev] = 0;
+	isys_fw_cfg->num_recv_queues[type_msg] = 1;
+
+	size = sizeof(*input_queue_cfg) * max_send_queues;
+	input_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
+	if (!input_queue_cfg)
+		return -ENOMEM;
+
+	size = sizeof(*output_queue_cfg) * IPU6_N_MAX_RECV_QUEUES;
+	output_queue_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
+	if (!output_queue_cfg)
+		return -ENOMEM;
+
+	fwcom->input = input_queue_cfg;
+	fwcom->output = output_queue_cfg;
+
+	fwcom->num_input_queues = isys_fw_cfg->num_send_queues[type_proxy] +
+		isys_fw_cfg->num_send_queues[type_dev] +
+		isys_fw_cfg->num_send_queues[type_msg];
+
+	fwcom->num_output_queues = isys_fw_cfg->num_recv_queues[type_proxy] +
+		isys_fw_cfg->num_recv_queues[type_dev] +
+		isys_fw_cfg->num_recv_queues[type_msg];
+
+	/* SRAM partitioning. Equal partitioning is set. */
+	for (i = 0; i < max_sram_blocks; i++) {
+		if (i < num_in_message_queues)
+			isys_fw_cfg->buffer_partition.num_gda_pages[i] =
+				(IPU6_DEVICE_GDA_NR_PAGES *
+				 IPU6_DEVICE_GDA_VIRT_FACTOR) /
+				num_in_message_queues;
+		else
+			isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0;
+	}
+
+	/* FW assumes proxy interface at fwcom queue 0 */
+	for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) {
+		input_queue_cfg[i].token_size =
+			sizeof(struct ipu6_fw_proxy_send_queue_token);
+		input_queue_cfg[i].queue_size = IPU6_ISYS_SIZE_PROXY_SEND_QUEUE;
+	}
+
+	for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) {
+		input_queue_cfg[base_dev_send + i].token_size =
+			sizeof(struct ipu6_fw_send_queue_token);
+		input_queue_cfg[base_dev_send + i].queue_size = max_devq_size;
+	}
+
+	for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) {
+		input_queue_cfg[base_msg_send + i].token_size =
+			sizeof(struct ipu6_fw_send_queue_token);
+		input_queue_cfg[base_msg_send + i].queue_size =
+			IPU6_ISYS_SIZE_SEND_QUEUE;
+	}
+
+	for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) {
+		output_queue_cfg[i].token_size =
+			sizeof(struct ipu6_fw_proxy_resp_queue_token);
+		output_queue_cfg[i].queue_size =
+			IPU6_ISYS_SIZE_PROXY_RECV_QUEUE;
+	}
+	/* There is no recv DEV queue */
+	for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) {
+		output_queue_cfg[base_msg_recv + i].token_size =
+			sizeof(struct ipu6_fw_resp_queue_token);
+		output_queue_cfg[base_msg_recv + i].queue_size =
+			IPU6_ISYS_SIZE_RECV_QUEUE;
+	}
+
+	fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset;
+	fwcom->specific_addr = isys_fw_cfg;
+	fwcom->specific_size = sizeof(*isys_fw_cfg);
+
+	return 0;
+}
+
+int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	int retry = IPU6_ISYS_OPEN_RETRY;
+	struct ipu6_fw_com_cfg fwcom = {
+		.cell_start = start_sp,
+		.cell_ready = query_sp,
+		.buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET,
+	};
+	int ret;
+
+	ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams);
+
+	isys->fwcom = ipu6_fw_com_prepare(&fwcom, isys->adev,
+					  isys->pdata->base);
+	if (!isys->fwcom) {
+		dev_err(dev, "isys fw com prepare failed\n");
+		return -EIO;
+	}
+
+	ret = ipu6_fw_com_open(isys->fwcom);
+	if (ret) {
+		dev_err(dev, "isys fw com open failed %d\n", ret);
+		return ret;
+	}
+
+	do {
+		usleep_range(400, 500);
+		if (ipu6_fw_com_ready(isys->fwcom))
+			break;
+		retry--;
+	} while (retry > 0);
+
+	if (!retry) {
+		dev_err(dev, "isys port open ready failed %d\n", ret);
+		ipu6_fw_isys_close(isys);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+struct ipu6_fw_isys_resp_info_abi *
+ipu6_fw_isys_get_resp(void *context, unsigned int queue)
+{
+	return ipu6_recv_get_token(context, queue);
+}
+
+void ipu6_fw_isys_put_resp(void *context, unsigned int queue)
+{
+	ipu6_recv_put_token(context, queue);
+}
+
+void ipu6_fw_isys_dump_stream_cfg(struct device *dev,
+				  struct ipu6_fw_isys_stream_cfg_data_abi *cfg)
+{
+	unsigned int i;
+
+	dev_dbg(dev, "-----------------------------------------------------\n");
+	dev_dbg(dev, "IPU6_FW_ISYS_STREAM_CFG_DATA\n");
+
+	dev_dbg(dev, "compfmt = %d\n", cfg->vc);
+	dev_dbg(dev, "src = %d\n", cfg->src);
+	dev_dbg(dev, "vc = %d\n", cfg->vc);
+	dev_dbg(dev, "isl_use = %d\n", cfg->isl_use);
+	dev_dbg(dev, "sensor_type = %d\n", cfg->sensor_type);
+
+	dev_dbg(dev, "send_irq_sof_discarded = %d\n",
+		cfg->send_irq_sof_discarded);
+	dev_dbg(dev, "send_irq_eof_discarded = %d\n",
+		cfg->send_irq_eof_discarded);
+	dev_dbg(dev, "send_resp_sof_discarded = %d\n",
+		cfg->send_resp_sof_discarded);
+	dev_dbg(dev, "send_resp_eof_discarded = %d\n",
+		cfg->send_resp_eof_discarded);
+
+	dev_dbg(dev, "crop:\n");
+	dev_dbg(dev, "\t.left_top = [%d, %d]\n", cfg->crop.left_offset,
+		cfg->crop.top_offset);
+	dev_dbg(dev, "\t.right_bottom = [%d, %d]\n", cfg->crop.right_offset,
+		cfg->crop.bottom_offset);
+
+	dev_dbg(dev, "nof_input_pins = %d\n", cfg->nof_input_pins);
+	for (i = 0; i < cfg->nof_input_pins; i++) {
+		dev_dbg(dev, "input pin[%d]:\n", i);
+		dev_dbg(dev, "\t.dt = 0x%0x\n", cfg->input_pins[i].dt);
+		dev_dbg(dev, "\t.mipi_store_mode = %d\n",
+			cfg->input_pins[i].mipi_store_mode);
+		dev_dbg(dev, "\t.bits_per_pix = %d\n",
+			cfg->input_pins[i].bits_per_pix);
+		dev_dbg(dev, "\t.mapped_dt = 0x%0x\n",
+			cfg->input_pins[i].mapped_dt);
+		dev_dbg(dev, "\t.input_res = %dx%d\n",
+			cfg->input_pins[i].input_res.width,
+			cfg->input_pins[i].input_res.height);
+		dev_dbg(dev, "\t.mipi_decompression = %d\n",
+			cfg->input_pins[i].mipi_decompression);
+		dev_dbg(dev, "\t.capture_mode = %d\n",
+			cfg->input_pins[i].capture_mode);
+	}
+
+	dev_dbg(dev, "nof_output_pins = %d\n", cfg->nof_output_pins);
+	for (i = 0; i < cfg->nof_output_pins; i++) {
+		dev_dbg(dev, "output_pin[%d]:\n", i);
+		dev_dbg(dev, "\t.input_pin_id = %d\n",
+			cfg->output_pins[i].input_pin_id);
+		dev_dbg(dev, "\t.output_res = %dx%d\n",
+			cfg->output_pins[i].output_res.width,
+			cfg->output_pins[i].output_res.height);
+		dev_dbg(dev, "\t.stride = %d\n", cfg->output_pins[i].stride);
+		dev_dbg(dev, "\t.pt = %d\n", cfg->output_pins[i].pt);
+		dev_dbg(dev, "\t.payload_buf_size = %d\n",
+			cfg->output_pins[i].payload_buf_size);
+		dev_dbg(dev, "\t.ft = %d\n", cfg->output_pins[i].ft);
+		dev_dbg(dev, "\t.watermark_in_lines = %d\n",
+			cfg->output_pins[i].watermark_in_lines);
+		dev_dbg(dev, "\t.send_irq = %d\n",
+			cfg->output_pins[i].send_irq);
+		dev_dbg(dev, "\t.reserve_compression = %d\n",
+			cfg->output_pins[i].reserve_compression);
+		dev_dbg(dev, "\t.snoopable = %d\n",
+			cfg->output_pins[i].snoopable);
+		dev_dbg(dev, "\t.error_handling_enable = %d\n",
+			cfg->output_pins[i].error_handling_enable);
+		dev_dbg(dev, "\t.sensor_type = %d\n",
+			cfg->output_pins[i].sensor_type);
+	}
+	dev_dbg(dev, "-----------------------------------------------------\n");
+}
+
+void
+ipu6_fw_isys_dump_frame_buff_set(struct device *dev,
+				 struct ipu6_fw_isys_frame_buff_set_abi *buf,
+				 unsigned int outputs)
+{
+	unsigned int i;
+
+	dev_dbg(dev, "-----------------------------------------------------\n");
+	dev_dbg(dev, "IPU6_FW_ISYS_FRAME_BUFF_SET\n");
+
+	for (i = 0; i < outputs; i++) {
+		dev_dbg(dev, "output_pin[%d]:\n", i);
+		dev_dbg(dev, "\t.out_buf_id = %llu\n",
+			buf->output_pins[i].out_buf_id);
+		dev_dbg(dev, "\t.addr = 0x%x\n", buf->output_pins[i].addr);
+		dev_dbg(dev, "\t.compress = %d\n",
+			buf->output_pins[i].compress);
+	}
+
+	dev_dbg(dev, "send_irq_sof = 0x%x\n", buf->send_irq_sof);
+	dev_dbg(dev, "send_irq_eof = 0x%x\n", buf->send_irq_eof);
+	dev_dbg(dev, "send_resp_sof = 0x%x\n", buf->send_resp_sof);
+	dev_dbg(dev, "send_resp_eof = 0x%x\n", buf->send_resp_eof);
+	dev_dbg(dev, "send_irq_capture_ack = 0x%x\n",
+		buf->send_irq_capture_ack);
+	dev_dbg(dev, "send_irq_capture_done = 0x%x\n",
+		buf->send_irq_capture_done);
+	dev_dbg(dev, "send_resp_capture_ack = 0x%x\n",
+		buf->send_resp_capture_ack);
+	dev_dbg(dev, "send_resp_capture_done = 0x%x\n",
+		buf->send_resp_capture_done);
+
+	dev_dbg(dev, "-----------------------------------------------------\n");
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h b/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h
new file mode 100644
index 000000000000..a7ffa0e22bf0
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-fw-isys.h
@@ -0,0 +1,573 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_FW_ISYS_H
+#define IPU6_FW_ISYS_H
+
+#include <linux/types.h>
+
+struct device;
+struct ipu6_isys;
+
+/* Max number of Input/Output Pins */
+#define IPU6_MAX_IPINS 4
+
+#define IPU6_MAX_OPINS ((IPU6_MAX_IPINS) + 1)
+
+#define IPU6_STREAM_ID_MAX 16
+#define IPU6_NONSECURE_STREAM_ID_MAX 12
+#define IPU6_DEV_SEND_QUEUE_SIZE (IPU6_STREAM_ID_MAX)
+#define IPU6_NOF_SRAM_BLOCKS_MAX (IPU6_STREAM_ID_MAX)
+#define IPU6_N_MAX_MSG_SEND_QUEUES (IPU6_STREAM_ID_MAX)
+#define IPU6SE_STREAM_ID_MAX 8
+#define IPU6SE_NONSECURE_STREAM_ID_MAX 4
+#define IPU6SE_DEV_SEND_QUEUE_SIZE (IPU6SE_STREAM_ID_MAX)
+#define IPU6SE_NOF_SRAM_BLOCKS_MAX (IPU6SE_STREAM_ID_MAX)
+#define IPU6SE_N_MAX_MSG_SEND_QUEUES (IPU6SE_STREAM_ID_MAX)
+
+/* Single return queue for all streams/commands type */
+#define IPU6_N_MAX_MSG_RECV_QUEUES 1
+/* Single device queue for high priority commands (bypass in-order queue) */
+#define IPU6_N_MAX_DEV_SEND_QUEUES 1
+/* Single dedicated send queue for proxy interface */
+#define IPU6_N_MAX_PROXY_SEND_QUEUES 1
+/* Single dedicated recv queue for proxy interface */
+#define IPU6_N_MAX_PROXY_RECV_QUEUES 1
+/* Send queues layout */
+#define IPU6_BASE_PROXY_SEND_QUEUES 0
+#define IPU6_BASE_DEV_SEND_QUEUES \
+	(IPU6_BASE_PROXY_SEND_QUEUES + IPU6_N_MAX_PROXY_SEND_QUEUES)
+#define IPU6_BASE_MSG_SEND_QUEUES \
+	(IPU6_BASE_DEV_SEND_QUEUES + IPU6_N_MAX_DEV_SEND_QUEUES)
+/* Recv queues layout */
+#define IPU6_BASE_PROXY_RECV_QUEUES 0
+#define IPU6_BASE_MSG_RECV_QUEUES \
+	(IPU6_BASE_PROXY_RECV_QUEUES + IPU6_N_MAX_PROXY_RECV_QUEUES)
+#define IPU6_N_MAX_RECV_QUEUES \
+	(IPU6_BASE_MSG_RECV_QUEUES + IPU6_N_MAX_MSG_RECV_QUEUES)
+
+#define IPU6_N_MAX_SEND_QUEUES \
+	(IPU6_BASE_MSG_SEND_QUEUES + IPU6_N_MAX_MSG_SEND_QUEUES)
+#define IPU6SE_N_MAX_SEND_QUEUES \
+	(IPU6_BASE_MSG_SEND_QUEUES + IPU6SE_N_MAX_MSG_SEND_QUEUES)
+
+/* Max number of supported input pins routed in ISL */
+#define IPU6_MAX_IPINS_IN_ISL 2
+
+/* Max number of planes for frame formats supported by the FW */
+#define IPU6_PIN_PLANES_MAX 4
+
+#define IPU6_FW_ISYS_SENSOR_TYPE_START 14
+#define IPU6_FW_ISYS_SENSOR_TYPE_END 19
+#define IPU6SE_FW_ISYS_SENSOR_TYPE_START 6
+#define IPU6SE_FW_ISYS_SENSOR_TYPE_END 11
+/*
+ * Device close takes some time from last ack message to actual stopping
+ * of the SP processor. As long as the SP processor runs we can't proceed with
+ * clean up of resources.
+ */
+#define IPU6_ISYS_OPEN_RETRY			2000
+#define IPU6_ISYS_CLOSE_RETRY			2000
+#define IPU6_FW_CALL_TIMEOUT_JIFFIES		msecs_to_jiffies(2000)
+
+enum ipu6_fw_isys_resp_type {
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0,
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK,
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK,
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK,
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK,
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK,
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK,
+	IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY,
+	IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK,
+	IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF,
+	IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF,
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE,
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE,
+	IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED,
+	IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED,
+	IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED,
+	IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED,
+	IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY,
+	N_IPU6_FW_ISYS_RESP_TYPE
+};
+
+enum ipu6_fw_isys_send_type {
+	IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0,
+	IPU6_FW_ISYS_SEND_TYPE_STREAM_START,
+	IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE,
+	IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE,
+	IPU6_FW_ISYS_SEND_TYPE_STREAM_STOP,
+	IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH,
+	IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE,
+	N_IPU6_FW_ISYS_SEND_TYPE
+};
+
+enum ipu6_fw_isys_queue_type {
+	IPU6_FW_ISYS_QUEUE_TYPE_PROXY = 0,
+	IPU6_FW_ISYS_QUEUE_TYPE_DEV,
+	IPU6_FW_ISYS_QUEUE_TYPE_MSG,
+	N_IPU6_FW_ISYS_QUEUE_TYPE
+};
+
+enum ipu6_fw_isys_stream_source {
+	IPU6_FW_ISYS_STREAM_SRC_PORT_0 = 0,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_1,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_2,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_3,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_4,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_5,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_6,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_7,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_8,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_9,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_10,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_11,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_12,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_13,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_14,
+	IPU6_FW_ISYS_STREAM_SRC_PORT_15,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_2,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_3,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_4,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_5,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_6,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_7,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_8,
+	IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_9,
+	N_IPU6_FW_ISYS_STREAM_SRC
+};
+
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU6_FW_ISYS_STREAM_SRC_PORT_0
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU6_FW_ISYS_STREAM_SRC_PORT_1
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU6_FW_ISYS_STREAM_SRC_PORT_2
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU6_FW_ISYS_STREAM_SRC_PORT_3
+
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU6_FW_ISYS_STREAM_SRC_PORT_4
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU6_FW_ISYS_STREAM_SRC_PORT_5
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 \
+	IPU6_FW_ISYS_STREAM_SRC_PORT_6
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 \
+	IPU6_FW_ISYS_STREAM_SRC_PORT_7
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 \
+	IPU6_FW_ISYS_STREAM_SRC_PORT_8
+#define IPU6_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 \
+	IPU6_FW_ISYS_STREAM_SRC_PORT_9
+
+#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_0
+#define IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU6_FW_ISYS_STREAM_SRC_MIPIGEN_1
+
+/**
+ * enum ipu6_fw_isys_mipi_vc: MIPI csi2 spec
+ * supports up to 4 virtual per physical channel
+ */
+enum ipu6_fw_isys_mipi_vc {
+	IPU6_FW_ISYS_MIPI_VC_0 = 0,
+	IPU6_FW_ISYS_MIPI_VC_1,
+	IPU6_FW_ISYS_MIPI_VC_2,
+	IPU6_FW_ISYS_MIPI_VC_3,
+	N_IPU6_FW_ISYS_MIPI_VC
+};
+
+enum ipu6_fw_isys_frame_format_type {
+	IPU6_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_NV12,	/* 12 bit YUV 420, Y, UV plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */
+	/* 12 bit YUV 420, Intel proprietary tiled format */
+	IPU6_FW_ISYS_FRAME_FORMAT_NV12_TILEY,
+
+	IPU6_FW_ISYS_FRAME_FORMAT_NV16,	/* 16 bit YUV 422, Y, UV plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_NV21,	/* 12 bit YUV 420, Y, VU plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_NV61,	/* 16 bit YUV 422, Y, VU plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_YV12,	/* 12 bit YUV 420, Y, V, U plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_YV16,	/* 16 bit YUV 422, Y, V, U plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */
+	IPU6_FW_ISYS_FRAME_FORMAT_UYVY,	/* 16 bit YUV 422, UYVY interleaved */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUYV,	/* 16 bit YUV 422, YUYV interleaved */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */
+	/* Internal format, 2 y lines followed by a uvinterleaved line */
+	IPU6_FW_ISYS_FRAME_FORMAT_YUV_LINE,
+	IPU6_FW_ISYS_FRAME_FORMAT_RAW8,	/* RAW8, 1 plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */
+	IPU6_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */
+	/**
+	 * 16 bit RGB, 1 plane. Each 3 sub pixels are packed into one 16 bit
+	 * value, 5 bits for R, 6 bits for G and 5 bits for B.
+	 */
+	IPU6_FW_ISYS_FRAME_FORMAT_RGB565,
+	IPU6_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */
+	IPU6_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, A=Alpha */
+	IPU6_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */
+	IPU6_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */
+	N_IPU6_FW_ISYS_FRAME_FORMAT
+};
+
+#define IPU6_FW_ISYS_FRAME_FORMAT_RAW	(IPU6_FW_ISYS_FRAME_FORMAT_RAW16)
+
+enum ipu6_fw_isys_pin_type {
+	/* captured as MIPI packets */
+	IPU6_FW_ISYS_PIN_TYPE_MIPI = 0,
+	/* captured through the SoC path */
+	IPU6_FW_ISYS_PIN_TYPE_RAW_SOC = 3,
+};
+
+/**
+ * enum ipu6_fw_isys_mipi_store_mode. Describes if long MIPI packets reach
+ * MIPI SRAM with the long packet header or
+ * if not, then only option is to capture it with pin type MIPI.
+ */
+enum ipu6_fw_isys_mipi_store_mode {
+	IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0,
+	IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER,
+	N_IPU6_FW_ISYS_MIPI_STORE_MODE
+};
+
+enum ipu6_fw_isys_capture_mode {
+	IPU6_FW_ISYS_CAPTURE_MODE_REGULAR = 0,
+	IPU6_FW_ISYS_CAPTURE_MODE_BURST,
+	N_IPU6_FW_ISYS_CAPTURE_MODE,
+};
+
+enum ipu6_fw_isys_sensor_mode {
+	IPU6_FW_ISYS_SENSOR_MODE_NORMAL = 0,
+	IPU6_FW_ISYS_SENSOR_MODE_TOBII,
+	N_IPU6_FW_ISYS_SENSOR_MODE,
+};
+
+enum ipu6_fw_isys_error {
+	IPU6_FW_ISYS_ERROR_NONE = 0,
+	IPU6_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY,
+	IPU6_FW_ISYS_ERROR_HW_CONSISTENCY,
+	IPU6_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE,
+	IPU6_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION,
+	IPU6_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION,
+	IPU6_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION,
+	IPU6_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES,
+	IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO,
+	IPU6_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO,
+	IPU6_FW_ISYS_ERROR_SENSOR_FW_SYNC,
+	IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION,
+	IPU6_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL,
+	N_IPU6_FW_ISYS_ERROR
+};
+
+enum ipu6_fw_proxy_error {
+	IPU6_FW_PROXY_ERROR_NONE = 0,
+	IPU6_FW_PROXY_ERROR_INVALID_WRITE_REGION,
+	IPU6_FW_PROXY_ERROR_INVALID_WRITE_OFFSET,
+	N_IPU6_FW_PROXY_ERROR
+};
+
+/* firmware ABI structure below are aligned in firmware, no need pack */
+struct ipu6_fw_isys_buffer_partition_abi {
+	u32 num_gda_pages[IPU6_STREAM_ID_MAX];
+};
+
+struct ipu6_fw_isys_fw_config {
+	struct ipu6_fw_isys_buffer_partition_abi buffer_partition;
+	u32 num_send_queues[N_IPU6_FW_ISYS_QUEUE_TYPE];
+	u32 num_recv_queues[N_IPU6_FW_ISYS_QUEUE_TYPE];
+};
+
+/**
+ * struct ipu6_fw_isys_resolution_abi: Generic resolution structure.
+ */
+struct ipu6_fw_isys_resolution_abi {
+	u32 width;
+	u32 height;
+};
+
+/**
+ * struct ipu6_fw_isys_output_pin_payload_abi
+ * @out_buf_id: Points to output pin buffer - buffer identifier
+ * @addr: Points to output pin buffer - CSS Virtual Address
+ * @compress: Request frame compression (1), or  not (0)
+ */
+struct ipu6_fw_isys_output_pin_payload_abi {
+	u64 out_buf_id;
+	u32 addr;
+	u32 compress;
+};
+
+/**
+ * struct ipu6_fw_isys_output_pin_info_abi
+ * @output_res: output pin resolution
+ * @stride: output stride in Bytes (not valid for statistics)
+ * @watermark_in_lines: pin watermark level in lines
+ * @payload_buf_size: minimum size in Bytes of all buffers that will be
+ *			supplied for capture on this pin
+ * @send_irq: assert if pin event should trigger irq
+ * @pt: pin type -real format "enum ipu6_fw_isys_pin_type"
+ * @ft: frame format type -real format "enum ipu6_fw_isys_frame_format_type"
+ * @input_pin_id: related input pin id
+ * @reserve_compression: reserve compression resources for pin
+ */
+struct ipu6_fw_isys_output_pin_info_abi {
+	struct ipu6_fw_isys_resolution_abi output_res;
+	u32 stride;
+	u32 watermark_in_lines;
+	u32 payload_buf_size;
+	u32 ts_offsets[IPU6_PIN_PLANES_MAX];
+	u32 s2m_pixel_soc_pixel_remapping;
+	u32 csi_be_soc_pixel_remapping;
+	u8 send_irq;
+	u8 input_pin_id;
+	u8 pt;
+	u8 ft;
+	u8 reserved;
+	u8 reserve_compression;
+	u8 snoopable;
+	u8 error_handling_enable;
+	u32 sensor_type;
+};
+
+/**
+ * struct ipu6_fw_isys_input_pin_info_abi
+ * @input_res: input resolution
+ * @dt: mipi data type ((enum ipu6_fw_isys_mipi_data_type)
+ * @mipi_store_mode: defines if legacy long packet header will be stored or
+ *		     discarded if discarded, output pin type for this
+ *		     input pin can only be MIPI
+ *		     (enum ipu6_fw_isys_mipi_store_mode)
+ * @bits_per_pix: native bits per pixel
+ * @mapped_dt: actual data type from sensor
+ * @mipi_decompression: defines which compression will be in mipi backend
+ * @crop_first_and_last_lines    Control whether to crop the
+ *                              first and last line of the
+ *                              input image. Crop done by HW
+ *                              device.
+ * @capture_mode: mode of capture, regular or burst, default value is regular
+ */
+struct ipu6_fw_isys_input_pin_info_abi {
+	struct ipu6_fw_isys_resolution_abi input_res;
+	u8 dt;
+	u8 mipi_store_mode;
+	u8 bits_per_pix;
+	u8 mapped_dt;
+	u8 mipi_decompression;
+	u8 crop_first_and_last_lines;
+	u8 capture_mode;
+	u8 reserved;
+};
+
+/**
+ * struct ipu6_fw_isys_cropping_abi - cropping coordinates
+ */
+struct ipu6_fw_isys_cropping_abi {
+	s32 top_offset;
+	s32 left_offset;
+	s32 bottom_offset;
+	s32 right_offset;
+};
+
+/**
+ * struct ipu6_fw_isys_stream_cfg_data_abi
+ * ISYS stream configuration data structure
+ * @crop: for extended use and is not used in FW currently
+ * @input_pins: input pin descriptors
+ * @output_pins: output pin descriptors
+ * @compfmt: de-compression setting for User Defined Data
+ * @nof_input_pins: number of input pins
+ * @nof_output_pins: number of output pins
+ * @send_irq_sof_discarded: send irq on discarded frame sof response
+ *		- if '1' it will override the send_resp_sof_discarded
+ *		  and send the response
+ *		- if '0' the send_resp_sof_discarded will determine
+ *		  whether to send the response
+ * @send_irq_eof_discarded: send irq on discarded frame eof response
+ *		- if '1' it will override the send_resp_eof_discarded
+ *		  and send the response
+ *		- if '0' the send_resp_eof_discarded will determine
+ *		  whether to send the response
+ * @send_resp_sof_discarded: send response for discarded frame sof detected,
+ *			     used only when send_irq_sof_discarded is '0'
+ * @send_resp_eof_discarded: send response for discarded frame eof detected,
+ *			     used only when send_irq_eof_discarded is '0'
+ * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1
+ * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel)
+ * @isl_use: indicates whether stream requires ISL and how
+ * @sensor_type: type of connected sensor, tobii or others, default is 0
+ */
+struct ipu6_fw_isys_stream_cfg_data_abi {
+	struct ipu6_fw_isys_cropping_abi crop;
+	struct ipu6_fw_isys_input_pin_info_abi input_pins[IPU6_MAX_IPINS];
+	struct ipu6_fw_isys_output_pin_info_abi output_pins[IPU6_MAX_OPINS];
+	u32 compfmt;
+	u8 nof_input_pins;
+	u8 nof_output_pins;
+	u8 send_irq_sof_discarded;
+	u8 send_irq_eof_discarded;
+	u8 send_resp_sof_discarded;
+	u8 send_resp_eof_discarded;
+	u8 src;
+	u8 vc;
+	u8 isl_use;
+	u8 sensor_type;
+	u8 reserved;
+	u8 reserved2;
+};
+
+/**
+ * struct ipu6_fw_isys_frame_buff_set - frame buffer set
+ * @output_pins: output pin addresses
+ * @send_irq_sof: send irq on frame sof response
+ *		- if '1' it will override the send_resp_sof and
+ *		  send the response
+ *		- if '0' the send_resp_sof will determine whether to
+ *		  send the response
+ * @send_irq_eof: send irq on frame eof response
+ *		- if '1' it will override the send_resp_eof and
+ *		  send the response
+ *		- if '0' the send_resp_eof will determine whether to
+ *		  send the response
+ * @send_resp_sof: send response for frame sof detected,
+ *		   used only when send_irq_sof is '0'
+ * @send_resp_eof: send response for frame eof detected,
+ *		   used only when send_irq_eof is '0'
+ * @send_resp_capture_ack: send response for capture ack event
+ * @send_resp_capture_done: send response for capture done event
+ */
+struct ipu6_fw_isys_frame_buff_set_abi {
+	struct ipu6_fw_isys_output_pin_payload_abi output_pins[IPU6_MAX_OPINS];
+	u8 send_irq_sof;
+	u8 send_irq_eof;
+	u8 send_irq_capture_ack;
+	u8 send_irq_capture_done;
+	u8 send_resp_sof;
+	u8 send_resp_eof;
+	u8 send_resp_capture_ack;
+	u8 send_resp_capture_done;
+	u8 reserved[8];
+};
+
+/**
+ * struct ipu6_fw_isys_error_info_abi
+ * @error: error code if something went wrong
+ * @error_details: depending on error code, it may contain additional error info
+ */
+struct ipu6_fw_isys_error_info_abi {
+	u32 error;
+	u32 error_details;
+};
+
+/**
+ * struct ipu6_fw_isys_resp_info_comm
+ * @pin: this var is only valid for pin event related responses,
+ *     contains pin addresses
+ * @error_info: error information from the FW
+ * @timestamp: Time information for event if available
+ * @stream_handle: stream id the response corresponds to
+ * @type: response type (enum ipu6_fw_isys_resp_type)
+ * @pin_id: pin id that the pin payload corresponds to
+ */
+struct ipu6_fw_isys_resp_info_abi {
+	u64 buf_id;
+	struct ipu6_fw_isys_output_pin_payload_abi pin;
+	struct ipu6_fw_isys_error_info_abi error_info;
+	u32 timestamp[2];
+	u8 stream_handle;
+	u8 type;
+	u8 pin_id;
+	u8 reserved;
+	u32 reserved2;
+};
+
+/**
+ * struct ipu6_fw_isys_proxy_error_info_comm
+ * @proxy_error: error code if something went wrong
+ * @proxy_error_details: depending on error code, it may contain additional
+ *			error info
+ */
+struct ipu6_fw_isys_proxy_error_info_abi {
+	u32 error;
+	u32 error_details;
+};
+
+struct ipu6_fw_isys_proxy_resp_info_abi {
+	u32 request_id;
+	struct ipu6_fw_isys_proxy_error_info_abi error_info;
+};
+
+/**
+ * struct ipu6_fw_proxy_write_queue_token
+ * @request_id: update id for the specific proxy write request
+ * @region_index: Region id for the proxy write request
+ * @offset: Offset of the write request according to the base address
+ *	    of the region
+ * @value: Value that is requested to be written with the proxy write request
+ */
+struct ipu6_fw_proxy_write_queue_token {
+	u32 request_id;
+	u32 region_index;
+	u32 offset;
+	u32 value;
+};
+
+/**
+ * struct ipu6_fw_resp_queue_token
+ */
+struct ipu6_fw_resp_queue_token {
+	struct ipu6_fw_isys_resp_info_abi resp_info;
+};
+
+/**
+ * struct ipu6_fw_send_queue_token
+ */
+struct ipu6_fw_send_queue_token {
+	u64 buf_handle;
+	u32 payload;
+	u16 send_type;
+	u16 stream_id;
+};
+
+/**
+ * struct ipu6_fw_proxy_resp_queue_token
+ */
+struct ipu6_fw_proxy_resp_queue_token {
+	struct ipu6_fw_isys_proxy_resp_info_abi proxy_resp_info;
+};
+
+/**
+ * struct ipu6_fw_proxy_send_queue_token
+ */
+struct ipu6_fw_proxy_send_queue_token {
+	u32 request_id;
+	u32 region_index;
+	u32 offset;
+	u32 value;
+};
+
+void
+ipu6_fw_isys_dump_stream_cfg(struct device *dev,
+			     struct ipu6_fw_isys_stream_cfg_data_abi *cfg);
+void
+ipu6_fw_isys_dump_frame_buff_set(struct device *dev,
+				 struct ipu6_fw_isys_frame_buff_set_abi *buf,
+				 unsigned int outputs);
+int ipu6_fw_isys_init(struct ipu6_isys *isys, unsigned int num_streams);
+int ipu6_fw_isys_close(struct ipu6_isys *isys);
+int ipu6_fw_isys_simple_cmd(struct ipu6_isys *isys,
+			    const unsigned int stream_handle, u16 send_type);
+int ipu6_fw_isys_complex_cmd(struct ipu6_isys *isys,
+			     const unsigned int stream_handle,
+			     void *cpu_mapped_buf, dma_addr_t dma_mapped_buf,
+			     size_t size, u16 send_type);
+int ipu6_fw_isys_send_proxy_token(struct ipu6_isys *isys,
+				  unsigned int req_id,
+				  unsigned int index,
+				  unsigned int offset, u32 value);
+void ipu6_fw_isys_cleanup(struct ipu6_isys *isys);
+struct ipu6_fw_isys_resp_info_abi *
+ipu6_fw_isys_get_resp(void *context, unsigned int queue);
+void ipu6_fw_isys_put_resp(void *context, unsigned int queue);
+#endif
-- 
2.43.0

From 2d13721450bd1d8e778c0727a7c3eac6cc1fd6c0 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:22 +0800
Subject: [PATCH 08/31] media: intel/ipu6: add IPU6 CSI2 receiver v4l2
 sub-device

Input system CSI2 receiver is exposed as a v4l2 sub-device.
Each CSI2 sub-device represent one single CSI2 hardware port
which be linked with external sub-device such camera sensor
by linked with ISYS CSI2's sink pad. CSI2 source pad is linked
to the sink pad of video capture device.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-9-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c | 666 ++++++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h |  81 +++
 .../media/pci/intel/ipu6/ipu6-isys-subdev.c   | 381 ++++++++++
 .../media/pci/intel/ipu6/ipu6-isys-subdev.h   |  61 ++
 .../intel/ipu6/ipu6-platform-isys-csi2-reg.h  | 189 +++++
 5 files changed, 1378 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
new file mode 100644
index 000000000000..ac9fa3e0d7ab
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/minmax.h>
+#include <linux/sprintf.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-subdev.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+
+static const u32 csi2_supported_codes[] = {
+	MEDIA_BUS_FMT_RGB565_1X16,
+	MEDIA_BUS_FMT_RGB888_1X24,
+	MEDIA_BUS_FMT_UYVY8_1X16,
+	MEDIA_BUS_FMT_YUYV8_1X16,
+	MEDIA_BUS_FMT_SBGGR10_1X10,
+	MEDIA_BUS_FMT_SGBRG10_1X10,
+	MEDIA_BUS_FMT_SGRBG10_1X10,
+	MEDIA_BUS_FMT_SRGGB10_1X10,
+	MEDIA_BUS_FMT_SBGGR12_1X12,
+	MEDIA_BUS_FMT_SGBRG12_1X12,
+	MEDIA_BUS_FMT_SGRBG12_1X12,
+	MEDIA_BUS_FMT_SRGGB12_1X12,
+	MEDIA_BUS_FMT_SBGGR8_1X8,
+	MEDIA_BUS_FMT_SGBRG8_1X8,
+	MEDIA_BUS_FMT_SGRBG8_1X8,
+	MEDIA_BUS_FMT_SRGGB8_1X8,
+	0
+};
+
+/*
+ * Strings corresponding to CSI-2 receiver errors are here.
+ * Corresponding macros are defined in the header file.
+ */
+static const struct ipu6_csi2_error dphy_rx_errors[] = {
+	{ "Single packet header error corrected", true },
+	{ "Multiple packet header errors detected", true },
+	{ "Payload checksum (CRC) error", true },
+	{ "Transfer FIFO overflow", false },
+	{ "Reserved short packet data type detected", true },
+	{ "Reserved long packet data type detected", true },
+	{ "Incomplete long packet detected", false },
+	{ "Frame sync error", false },
+	{ "Line sync error", false },
+	{ "DPHY recoverable synchronization error", true },
+	{ "DPHY fatal error", false },
+	{ "DPHY elastic FIFO overflow", false },
+	{ "Inter-frame short packet discarded", true },
+	{ "Inter-frame long packet discarded", true },
+	{ "MIPI pktgen overflow", false },
+	{ "MIPI pktgen data loss", false },
+	{ "FIFO overflow", false },
+	{ "Lane deskew", false },
+	{ "SOT sync error", false },
+	{ "HSIDLE detected", false }
+};
+
+s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2)
+{
+	struct media_pad *src_pad;
+	struct v4l2_subdev *ext_sd;
+	struct device *dev;
+
+	if (!csi2)
+		return -EINVAL;
+
+	dev = &csi2->isys->adev->auxdev.dev;
+	src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity);
+	if (IS_ERR_OR_NULL(src_pad)) {
+		dev_err(dev, "can't get source pad of %s\n", csi2->asd.sd.name);
+		return -ENOLINK;
+	}
+
+	ext_sd = media_entity_to_v4l2_subdev(src_pad->entity);
+	if (WARN(!ext_sd, "Failed to get subdev for %s\n", csi2->asd.sd.name))
+		return -ENODEV;
+
+	return v4l2_get_link_freq(ext_sd->ctrl_handler, 0, 0);
+}
+
+static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+				struct v4l2_event_subscription *sub)
+{
+	struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+	struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd);
+	struct device *dev = &csi2->isys->adev->auxdev.dev;
+
+	dev_dbg(dev, "csi2 subscribe event(type %u id %u)\n",
+		sub->type, sub->id);
+
+	switch (sub->type) {
+	case V4L2_EVENT_FRAME_SYNC:
+		return v4l2_event_subscribe(fh, sub, 10, NULL);
+	case V4L2_EVENT_CTRL:
+		return v4l2_ctrl_subscribe_event(fh, sub);
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct v4l2_subdev_core_ops csi2_sd_core_ops = {
+	.subscribe_event = csi2_subscribe_event,
+	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+/*
+ * The input system CSI2+ receiver has several
+ * parameters affecting the receiver timings. These depend
+ * on the MIPI bus frequency F in Hz (sensor transmitter rate)
+ * as follows:
+ *	register value = (A/1e9 + B * UI) / COUNT_ACC
+ * where
+ *	UI = 1 / (2 * F) in seconds
+ *	COUNT_ACC = counter accuracy in seconds
+ *	COUNT_ACC = 0.125 ns = 1 / 8 ns, ACCINV = 8.
+ *
+ * A and B are coefficients from the table below,
+ * depending whether the register minimum or maximum value is
+ * calculated.
+ *				       Minimum     Maximum
+ * Clock lane			       A     B     A     B
+ * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
+ * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
+ * Data lanes
+ * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
+ * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
+ * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
+ * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
+ * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
+ * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
+ * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
+ * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
+ *
+ * We use the minimum values of both A and B.
+ */
+
+#define DIV_SHIFT	8
+#define CSI2_ACCINV	8
+
+static u32 calc_timing(s32 a, s32 b, s64 link_freq, s32 accinv)
+{
+	return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT)
+			     / (s32)(link_freq >> DIV_SHIFT));
+}
+
+static int
+ipu6_isys_csi2_calc_timing(struct ipu6_isys_csi2 *csi2,
+			   struct ipu6_isys_csi2_timing *timing, s32 accinv)
+{
+	struct device *dev = &csi2->isys->adev->auxdev.dev;
+	s64 link_freq;
+
+	link_freq = ipu6_isys_csi2_get_link_freq(csi2);
+	if (link_freq < 0)
+		return link_freq;
+
+	timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A,
+				      CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B,
+				      link_freq, accinv);
+	timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A,
+				      CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B,
+				      link_freq, accinv);
+	timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A,
+				      CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B,
+				      link_freq, accinv);
+	timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A,
+				      CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B,
+				      link_freq, accinv);
+
+	dev_dbg(dev, "ctermen %u csettle %u dtermen %u dsettle %u\n",
+		timing->ctermen, timing->csettle,
+		timing->dtermen, timing->dsettle);
+
+	return 0;
+}
+
+void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2)
+{
+	u32 irq = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+			CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+	struct ipu6_isys *isys = csi2->isys;
+	u32 mask;
+
+	mask = isys->pdata->ipdata->csi2.irq_mask;
+	writel(irq & mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+	csi2->receiver_errors |= irq & mask;
+}
+
+void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2)
+{
+	struct device *dev = &csi2->isys->adev->auxdev.dev;
+	const struct ipu6_csi2_error *errors;
+	u32 status;
+	u32 i;
+
+	/* register errors once more in case of interrupts are disabled */
+	ipu6_isys_register_errors(csi2);
+	status = csi2->receiver_errors;
+	csi2->receiver_errors = 0;
+	errors = dphy_rx_errors;
+
+	for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) {
+		if (status & BIT(i))
+			dev_err_ratelimited(dev, "csi2-%i error: %s\n",
+					    csi2->port, errors[i].error_string);
+	}
+}
+
+static int ipu6_isys_csi2_set_stream(struct v4l2_subdev *sd,
+				     const struct ipu6_isys_csi2_timing *timing,
+				     unsigned int nlanes, int enable)
+{
+	struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+	struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd);
+	struct ipu6_isys *isys = csi2->isys;
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct ipu6_isys_csi2_config cfg;
+	unsigned int nports;
+	int ret = 0;
+	u32 mask = 0;
+	u32 i;
+
+	dev_dbg(dev, "stream %s CSI2-%u with %u lanes\n", enable ? "on" : "off",
+		csi2->port, nlanes);
+
+	cfg.port = csi2->port;
+	cfg.nlanes = nlanes;
+
+	mask = isys->pdata->ipdata->csi2.irq_mask;
+	nports = isys->pdata->ipdata->csi2.nports;
+
+	if (!enable) {
+		writel(0, csi2->base + CSI_REG_CSI_FE_ENABLE);
+		writel(0, csi2->base + CSI_REG_PPI2CSI_ENABLE);
+
+		writel(0,
+		       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+		       CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+		writel(mask,
+		       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+		       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+		writel(0,
+		       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+		       CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+		writel(0xffffffff,
+		       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+		       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+
+		isys->phy_set_power(isys, &cfg, timing, false);
+
+		writel(0, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT
+		       (isys->pdata->ipdata->csi2.fw_access_port_ofs,
+			csi2->port));
+		writel(0, isys->pdata->base +
+		       CSI_REG_HUB_DRV_ACCESS_PORT(csi2->port));
+
+		return ret;
+	}
+
+	/* reset port reset */
+	writel(0x1, csi2->base + CSI_REG_PORT_GPREG_SRST);
+	usleep_range(100, 200);
+	writel(0x0, csi2->base + CSI_REG_PORT_GPREG_SRST);
+
+	/* enable port clock */
+	for (i = 0; i < nports; i++) {
+		writel(1, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(i));
+		writel(1, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT
+		       (isys->pdata->ipdata->csi2.fw_access_port_ofs, i));
+	}
+
+	/* enable all error related irq */
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_MASK_OFFSET);
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET);
+	writel(mask,
+	       csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+	       CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+
+	/*
+	 * Using event from firmware instead of irq to handle CSI2 sync event
+	 * which can reduce system wakeups. If CSI2 sync irq enabled, we need
+	 * disable the firmware CSI2 sync event to avoid duplicate handling.
+	 */
+	writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+	writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_MASK_OFFSET);
+	writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+	writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET);
+	writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+
+	/* configure to enable FE and PPI2CSI */
+	writel(0, csi2->base + CSI_REG_CSI_FE_MODE);
+	writel(CSI_SENSOR_INPUT, csi2->base + CSI_REG_CSI_FE_MUX_CTRL);
+	writel(CSI_CNTR_SENSOR_LINE_ID | CSI_CNTR_SENSOR_FRAME_ID,
+	       csi2->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL);
+	writel(FIELD_PREP(PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK, nlanes - 1),
+	       csi2->base + CSI_REG_PPI2CSI_CONFIG_PPI_INTF);
+
+	writel(1, csi2->base + CSI_REG_PPI2CSI_ENABLE);
+	writel(1, csi2->base + CSI_REG_CSI_FE_ENABLE);
+
+	ret = isys->phy_set_power(isys, &cfg, timing, true);
+	if (ret)
+		dev_err(dev, "csi-%d phy power up failed %d\n", csi2->port,
+			ret);
+
+	return ret;
+}
+
+static int set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+	struct ipu6_isys_csi2 *csi2 = to_ipu6_isys_csi2(asd);
+	struct device *dev = &csi2->isys->adev->auxdev.dev;
+	struct ipu6_isys_csi2_timing timing = { };
+	unsigned int nlanes;
+	int ret;
+
+	dev_dbg(dev, "csi2 stream %s callback\n", enable ? "on" : "off");
+
+	if (!enable) {
+		csi2->stream_count--;
+		if (csi2->stream_count)
+			return 0;
+
+		ipu6_isys_csi2_set_stream(sd, &timing, 0, enable);
+		return 0;
+	}
+
+	if (csi2->stream_count) {
+		csi2->stream_count++;
+		return 0;
+	}
+
+	nlanes = csi2->nlanes;
+
+	ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV);
+	if (ret)
+		return ret;
+
+	ret = ipu6_isys_csi2_set_stream(sd, &timing, nlanes, enable);
+	if (ret)
+		return ret;
+
+	csi2->stream_count++;
+
+	return 0;
+}
+
+static int ipu6_isys_csi2_set_sel(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_state *state,
+				  struct v4l2_subdev_selection *sel)
+{
+	struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+	struct device *dev = &asd->isys->adev->auxdev.dev;
+	struct v4l2_mbus_framefmt *sink_ffmt;
+	struct v4l2_mbus_framefmt *src_ffmt;
+	struct v4l2_rect *crop;
+
+	if (sel->pad == CSI2_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
+
+	sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state,
+								 sel->pad,
+								 sel->stream);
+	if (!sink_ffmt)
+		return -EINVAL;
+
+	src_ffmt = v4l2_subdev_state_get_stream_format(state, sel->pad,
+						       sel->stream);
+	if (!src_ffmt)
+		return -EINVAL;
+
+	crop = v4l2_subdev_state_get_stream_crop(state, sel->pad, sel->stream);
+	if (!crop)
+		return -EINVAL;
+
+	/* Only vertical cropping is supported */
+	sel->r.left = 0;
+	sel->r.width = sink_ffmt->width;
+	/* Non-bayer formats can't be single line cropped */
+	if (!ipu6_isys_is_bayer_format(sink_ffmt->code))
+		sel->r.top &= ~1;
+	sel->r.height = clamp(sel->r.height & ~1, IPU6_ISYS_MIN_HEIGHT,
+			      sink_ffmt->height - sel->r.top);
+	*crop = sel->r;
+
+	/* update source pad format */
+	src_ffmt->width = sel->r.width;
+	src_ffmt->height = sel->r.height;
+	if (ipu6_isys_is_bayer_format(sink_ffmt->code))
+		src_ffmt->code = ipu6_isys_convert_bayer_order(sink_ffmt->code,
+							       sel->r.left,
+							       sel->r.top);
+	dev_dbg(dev, "set crop for %s sel: %d,%d,%d,%d code: 0x%x\n",
+		sd->name, sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+		src_ffmt->code);
+
+	return 0;
+}
+
+static int ipu6_isys_csi2_get_sel(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_state *state,
+				  struct v4l2_subdev_selection *sel)
+{
+	struct v4l2_mbus_framefmt *sink_ffmt;
+	struct v4l2_rect *crop;
+	int ret = 0;
+
+	if (sd->entity.pads[sel->pad].flags & MEDIA_PAD_FL_SINK)
+		return -EINVAL;
+
+	sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state,
+								 sel->pad,
+								 sel->stream);
+	if (!sink_ffmt)
+		return -EINVAL;
+
+	crop = v4l2_subdev_state_get_stream_crop(state, sel->pad, sel->stream);
+	if (!crop)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = sink_ffmt->width;
+		sel->r.height = sink_ffmt->height;
+		break;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = *crop;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static const struct v4l2_subdev_video_ops csi2_sd_video_ops = {
+	.s_stream = set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = {
+	.init_cfg = ipu6_isys_subdev_init_cfg,
+	.get_fmt = v4l2_subdev_get_fmt,
+	.set_fmt = ipu6_isys_subdev_set_fmt,
+	.get_selection = ipu6_isys_csi2_get_sel,
+	.set_selection = ipu6_isys_csi2_set_sel,
+	.enum_mbus_code = ipu6_isys_subdev_enum_mbus_code,
+	.set_routing = ipu6_isys_subdev_set_routing,
+};
+
+static const struct v4l2_subdev_ops csi2_sd_ops = {
+	.core = &csi2_sd_core_ops,
+	.video = &csi2_sd_video_ops,
+	.pad = &csi2_sd_pad_ops,
+};
+
+static const struct media_entity_operations csi2_entity_ops = {
+	.link_validate = v4l2_subdev_link_validate,
+	.has_pad_interdep = v4l2_subdev_has_pad_interdep,
+};
+
+void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2)
+{
+	if (!csi2->isys)
+		return;
+
+	v4l2_device_unregister_subdev(&csi2->asd.sd);
+	v4l2_subdev_cleanup(&csi2->asd.sd);
+	ipu6_isys_subdev_cleanup(&csi2->asd);
+	csi2->isys = NULL;
+}
+
+int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2,
+			struct ipu6_isys *isys,
+			void __iomem *base, unsigned int index)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	int ret;
+
+	csi2->isys = isys;
+	csi2->base = base;
+	csi2->port = index;
+
+	csi2->asd.sd.entity.ops = &csi2_entity_ops;
+	csi2->asd.isys = isys;
+	ret = ipu6_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0,
+				    NR_OF_CSI2_SINK_PADS, NR_OF_CSI2_SRC_PADS);
+	if (ret)
+		goto fail;
+
+	csi2->asd.source = IPU6_FW_ISYS_STREAM_SRC_CSI2_PORT0 + index;
+	csi2->asd.supported_codes = csi2_supported_codes;
+	snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name),
+		 IPU6_ISYS_ENTITY_PREFIX " CSI2 %u", index);
+	v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd);
+	ret = v4l2_subdev_init_finalize(&csi2->asd.sd);
+	if (ret) {
+		dev_err(dev, "failed to init v4l2 subdev\n");
+		goto fail;
+	}
+
+	ret = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd);
+	if (ret) {
+		dev_err(dev, "failed to register v4l2 subdev\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	ipu6_isys_csi2_cleanup(csi2);
+
+	return ret;
+}
+
+void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream)
+{
+	struct video_device *vdev = stream->asd->sd.devnode;
+	struct device *dev = &stream->isys->adev->auxdev.dev;
+	struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd);
+	struct v4l2_event ev = {
+		.type = V4L2_EVENT_FRAME_SYNC,
+	};
+
+	ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence);
+	v4l2_event_queue(vdev, &ev);
+
+	dev_dbg(dev, "sof_event::csi2-%i sequence: %i, vc: %d\n",
+		csi2->port, ev.u.frame_sync.frame_sequence, stream->vc);
+}
+
+void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream)
+{
+	struct device *dev = &stream->isys->adev->auxdev.dev;
+	struct ipu6_isys_csi2 *csi2 = ipu6_isys_subdev_to_csi2(stream->asd);
+	u32 frame_sequence = atomic_read(&stream->sequence);
+
+	dev_dbg(dev, "eof_event::csi2-%i sequence: %i\n",
+		csi2->port, frame_sequence);
+}
+
+int ipu6_isys_csi2_get_remote_desc(u32 source_stream,
+				   struct ipu6_isys_csi2 *csi2,
+				   struct media_entity *source_entity,
+				   struct v4l2_mbus_frame_desc_entry *entry,
+				   int *nr_queues)
+{
+	struct v4l2_mbus_frame_desc_entry *desc_entry = NULL;
+	struct device *dev = &csi2->isys->adev->auxdev.dev;
+	struct v4l2_mbus_frame_desc desc;
+	struct v4l2_subdev *source;
+	struct media_pad *pad;
+	unsigned int i;
+	int count = 0;
+	int ret;
+
+	source = media_entity_to_v4l2_subdev(source_entity);
+	if (!source)
+		return -EPIPE;
+
+	pad = media_pad_remote_pad_first(&csi2->asd.pad[CSI2_PAD_SINK]);
+	if (!pad)
+		return -EPIPE;
+
+	ret = v4l2_subdev_call(source, pad, get_frame_desc, pad->index, &desc);
+	if (ret)
+		return ret;
+
+	if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
+		dev_err(dev, "Unsupported frame descriptor type\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < desc.num_entries; i++) {
+		if (source_stream == desc.entry[i].stream) {
+			desc_entry = &desc.entry[i];
+			break;
+		}
+	}
+
+	if (!desc_entry) {
+		dev_err(dev, "Failed to find stream %u from remote subdev\n",
+			source_stream);
+		return -EINVAL;
+	}
+
+	if (desc_entry->bus.csi2.vc >= NR_OF_CSI2_VC) {
+		dev_err(dev, "invalid vc %d\n", desc_entry->bus.csi2.vc);
+		return -EINVAL;
+	}
+
+	*entry = *desc_entry;
+	for (i = 0; i < desc.num_entries; i++) {
+		if (entry->bus.csi2.vc == desc.entry[i].bus.csi2.vc)
+			count++;
+	}
+
+	*nr_queues = count;
+	return 0;
+}
+
+void ipu6_isys_set_csi2_streams_status(struct ipu6_isys_video *av, bool status)
+{
+	struct ipu6_isys_stream *stream = av->stream;
+	struct v4l2_subdev *sd = &stream->asd->sd;
+	struct v4l2_subdev_state *state;
+	struct media_pad *r_pad;
+	unsigned int i;
+	u32 r_stream;
+
+	r_pad = media_pad_remote_pad_first(&av->pad);
+	r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index);
+
+	state = v4l2_subdev_lock_and_get_active_state(sd);
+
+	for (i = 0; i < state->stream_configs.num_configs; i++) {
+		struct v4l2_subdev_stream_config *cfg =
+			&state->stream_configs.configs[i];
+
+		if (cfg->pad == r_pad->index && r_stream == cfg->stream) {
+			dev_dbg(&av->isys->adev->auxdev.dev,
+				"%s: pad:%u, stream:%u, status:%u\n",
+				sd->entity.name, r_pad->index, r_stream,
+				status);
+			cfg->enabled = status;
+		}
+	}
+
+	v4l2_subdev_unlock_state(state);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
new file mode 100644
index 000000000000..d4765bae6112
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_ISYS_CSI2_H
+#define IPU6_ISYS_CSI2_H
+
+#include <linux/container_of.h>
+
+#include "ipu6-isys-subdev.h"
+
+struct media_entity;
+struct v4l2_mbus_frame_desc_entry;
+
+struct ipu6_isys_video;
+struct ipu6_isys;
+struct ipu6_isys_csi2_pdata;
+struct ipu6_isys_stream;
+
+#define NR_OF_CSI2_VC			16
+#define INVALID_VC_ID			-1
+#define NR_OF_CSI2_SINK_PADS		1
+#define CSI2_PAD_SINK			0
+#define NR_OF_CSI2_SRC_PADS		8
+#define CSI2_PAD_SRC			1
+#define NR_OF_CSI2_PADS	(NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SRC_PADS)
+
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A		0
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B		0
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A		95
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B		-8
+
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A		0
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B		0
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A		85
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B		-2
+
+struct ipu6_isys_csi2 {
+	struct ipu6_isys_subdev asd;
+	struct ipu6_isys_csi2_pdata *pdata;
+	struct ipu6_isys *isys;
+
+	void __iomem *base;
+	u32 receiver_errors;
+	unsigned int nlanes;
+	unsigned int port;
+	unsigned int stream_count;
+};
+
+struct ipu6_isys_csi2_timing {
+	u32 ctermen;
+	u32 csettle;
+	u32 dtermen;
+	u32 dsettle;
+};
+
+struct ipu6_csi2_error {
+	const char *error_string;
+	bool is_info_only;
+};
+
+#define ipu6_isys_subdev_to_csi2(__sd) \
+	container_of(__sd, struct ipu6_isys_csi2, asd)
+
+#define to_ipu6_isys_csi2(__asd) container_of(__asd, struct ipu6_isys_csi2, asd)
+
+s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2);
+int ipu6_isys_csi2_init(struct ipu6_isys_csi2 *csi2, struct ipu6_isys *isys,
+			void __iomem *base, unsigned int index);
+void ipu6_isys_csi2_cleanup(struct ipu6_isys_csi2 *csi2);
+void ipu6_isys_csi2_sof_event_by_stream(struct ipu6_isys_stream *stream);
+void ipu6_isys_csi2_eof_event_by_stream(struct ipu6_isys_stream *stream);
+void ipu6_isys_register_errors(struct ipu6_isys_csi2 *csi2);
+void ipu6_isys_csi2_error(struct ipu6_isys_csi2 *csi2);
+int ipu6_isys_csi2_get_remote_desc(u32 source_stream,
+				   struct ipu6_isys_csi2 *csi2,
+				   struct media_entity *source_entity,
+				   struct v4l2_mbus_frame_desc_entry *entry,
+				   int *nr_queues);
+void ipu6_isys_set_csi2_streams_status(struct ipu6_isys_video *av, bool status);
+
+#endif /* IPU6_ISYS_CSI2_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
new file mode 100644
index 000000000000..510c5ca34f9f
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/minmax.h>
+
+#include <media/media-entity.h>
+#include <media/mipi-csi2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-subdev.h"
+
+unsigned int ipu6_isys_mbus_code_to_bpp(u32 code)
+{
+	switch (code) {
+	case MEDIA_BUS_FMT_RGB888_1X24:
+		return 24;
+	case MEDIA_BUS_FMT_RGB565_1X16:
+	case MEDIA_BUS_FMT_UYVY8_1X16:
+	case MEDIA_BUS_FMT_YUYV8_1X16:
+		return 16;
+	case MEDIA_BUS_FMT_SBGGR12_1X12:
+	case MEDIA_BUS_FMT_SGBRG12_1X12:
+	case MEDIA_BUS_FMT_SGRBG12_1X12:
+	case MEDIA_BUS_FMT_SRGGB12_1X12:
+		return 12;
+	case MEDIA_BUS_FMT_SBGGR10_1X10:
+	case MEDIA_BUS_FMT_SGBRG10_1X10:
+	case MEDIA_BUS_FMT_SGRBG10_1X10:
+	case MEDIA_BUS_FMT_SRGGB10_1X10:
+		return 10;
+	case MEDIA_BUS_FMT_SBGGR8_1X8:
+	case MEDIA_BUS_FMT_SGBRG8_1X8:
+	case MEDIA_BUS_FMT_SGRBG8_1X8:
+	case MEDIA_BUS_FMT_SRGGB8_1X8:
+		return 8;
+	default:
+		WARN_ON(1);
+		return 8;
+	}
+}
+
+unsigned int ipu6_isys_mbus_code_to_mipi(u32 code)
+{
+	switch (code) {
+	case MEDIA_BUS_FMT_RGB565_1X16:
+		return MIPI_CSI2_DT_RGB565;
+	case MEDIA_BUS_FMT_RGB888_1X24:
+		return MIPI_CSI2_DT_RGB888;
+	case MEDIA_BUS_FMT_UYVY8_1X16:
+	case MEDIA_BUS_FMT_YUYV8_1X16:
+		return MIPI_CSI2_DT_YUV422_8B;
+	case MEDIA_BUS_FMT_SBGGR12_1X12:
+	case MEDIA_BUS_FMT_SGBRG12_1X12:
+	case MEDIA_BUS_FMT_SGRBG12_1X12:
+	case MEDIA_BUS_FMT_SRGGB12_1X12:
+		return MIPI_CSI2_DT_RAW12;
+	case MEDIA_BUS_FMT_SBGGR10_1X10:
+	case MEDIA_BUS_FMT_SGBRG10_1X10:
+	case MEDIA_BUS_FMT_SGRBG10_1X10:
+	case MEDIA_BUS_FMT_SRGGB10_1X10:
+		return MIPI_CSI2_DT_RAW10;
+	case MEDIA_BUS_FMT_SBGGR8_1X8:
+	case MEDIA_BUS_FMT_SGBRG8_1X8:
+	case MEDIA_BUS_FMT_SGRBG8_1X8:
+	case MEDIA_BUS_FMT_SRGGB8_1X8:
+		return MIPI_CSI2_DT_RAW8;
+	default:
+		/* return unavailable MIPI data type - 0x3f */
+		WARN_ON(1);
+		return 0x3f;
+	}
+}
+
+bool ipu6_isys_is_bayer_format(u32 code)
+{
+	switch (ipu6_isys_mbus_code_to_mipi(code)) {
+	case MIPI_CSI2_DT_RAW8:
+	case MIPI_CSI2_DT_RAW10:
+	case MIPI_CSI2_DT_RAW12:
+		return true;
+	default:
+		return false;
+	}
+}
+
+u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y)
+{
+	static const u32 code_map[] = {
+		MEDIA_BUS_FMT_SRGGB8_1X8,
+		MEDIA_BUS_FMT_SGRBG8_1X8,
+		MEDIA_BUS_FMT_SGBRG8_1X8,
+		MEDIA_BUS_FMT_SBGGR8_1X8,
+		MEDIA_BUS_FMT_SRGGB10_1X10,
+		MEDIA_BUS_FMT_SGRBG10_1X10,
+		MEDIA_BUS_FMT_SGBRG10_1X10,
+		MEDIA_BUS_FMT_SBGGR10_1X10,
+		MEDIA_BUS_FMT_SRGGB12_1X12,
+		MEDIA_BUS_FMT_SGRBG12_1X12,
+		MEDIA_BUS_FMT_SGBRG12_1X12,
+		MEDIA_BUS_FMT_SBGGR12_1X12
+	};
+	u32 i;
+
+	for (i = 0; i < ARRAY_SIZE(code_map); i++)
+		if (code_map[i] == code)
+			break;
+
+	if (WARN_ON(i == ARRAY_SIZE(code_map)))
+		return code;
+
+	return code_map[i ^ (((y & 1) << 1) | (x & 1))];
+}
+
+int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_state *state,
+			     struct v4l2_subdev_format *format)
+{
+	struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+	struct v4l2_mbus_framefmt *fmt;
+	struct v4l2_rect *crop;
+	u32 code = asd->supported_codes[0];
+	u32 other_pad, other_stream;
+	unsigned int i;
+	int ret;
+
+	/* No transcoding, source and sink formats must match. */
+	if ((sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SOURCE) &&
+	    sd->entity.num_pads > 1)
+		return v4l2_subdev_get_fmt(sd, state, format);
+
+	format->format.width = clamp(format->format.width, IPU6_ISYS_MIN_WIDTH,
+				     IPU6_ISYS_MAX_WIDTH);
+	format->format.height = clamp(format->format.height,
+				      IPU6_ISYS_MIN_HEIGHT,
+				      IPU6_ISYS_MAX_HEIGHT);
+
+	for (i = 0; asd->supported_codes[i]; i++) {
+		if (asd->supported_codes[i] == format->format.code) {
+			code = asd->supported_codes[i];
+			break;
+		}
+	}
+	format->format.code = code;
+	format->format.field = V4L2_FIELD_NONE;
+
+	/* Store the format and propagate it to the source pad. */
+	fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
+						  format->stream);
+	if (!fmt)
+		return -EINVAL;
+
+	*fmt = format->format;
+
+	if (!(sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SINK))
+		return 0;
+
+	/* propagate format to following source pad */
+	fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
+							   format->stream);
+	if (!fmt)
+		return -EINVAL;
+
+	*fmt = format->format;
+
+	ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
+						    format->pad,
+						    format->stream,
+						    &other_pad,
+						    &other_stream);
+	if (ret)
+		return -EINVAL;
+
+	crop = v4l2_subdev_state_get_stream_crop(state, other_pad,
+						 other_stream);
+	/* reset crop */
+	crop->left = 0;
+	crop->top = 0;
+	crop->width = fmt->width;
+	crop->height = fmt->height;
+
+	return 0;
+}
+
+int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+				    struct v4l2_subdev_state *state,
+				    struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct ipu6_isys_subdev *asd = to_ipu6_isys_subdev(sd);
+	const u32 *supported_codes = asd->supported_codes;
+	u32 index;
+
+	for (index = 0; supported_codes[index]; index++) {
+		if (index == code->index) {
+			code->code = supported_codes[index];
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int subdev_set_routing(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_state *state,
+			      struct v4l2_subdev_krouting *routing)
+{
+	static const struct v4l2_mbus_framefmt format = {
+		.width = 4096,
+		.height = 3072,
+		.code = MEDIA_BUS_FMT_SGRBG10_1X10,
+		.field = V4L2_FIELD_NONE,
+	};
+	int ret;
+
+	ret = v4l2_subdev_routing_validate(sd, routing,
+					   V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
+	if (ret)
+		return ret;
+
+	return v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
+}
+
+int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
+				 struct v4l2_mbus_framefmt *format)
+{
+	struct v4l2_mbus_framefmt *fmt;
+	struct v4l2_subdev_state *state;
+
+	if (!sd || !format)
+		return -EINVAL;
+
+	state = v4l2_subdev_lock_and_get_active_state(sd);
+	fmt = v4l2_subdev_state_get_stream_format(state, pad, stream);
+	if (fmt)
+		*format = *fmt;
+	v4l2_subdev_unlock_state(state);
+
+	return fmt ? 0 : -EINVAL;
+}
+
+int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream,
+				  struct v4l2_rect *crop)
+{
+	struct v4l2_subdev_state *state;
+	struct v4l2_rect *rect;
+
+	if (!sd || !crop)
+		return -EINVAL;
+
+	state = v4l2_subdev_lock_and_get_active_state(sd);
+	rect = v4l2_subdev_state_get_stream_crop(state, pad, stream);
+	if (rect)
+		*crop = *rect;
+	v4l2_subdev_unlock_state(state);
+
+	return rect ? 0 : -EINVAL;
+}
+
+u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad)
+{
+	struct v4l2_subdev_state *state;
+	struct v4l2_subdev_route *routes;
+	unsigned int i;
+	u32 source_stream = 0;
+
+	state = v4l2_subdev_lock_and_get_active_state(sd);
+	if (!state)
+		return 0;
+
+	routes = state->routing.routes;
+	for (i = 0; i < state->routing.num_routes; i++) {
+		if (routes[i].source_pad == pad) {
+			source_stream = routes[i].source_stream;
+			break;
+		}
+	}
+
+	v4l2_subdev_unlock_state(state);
+
+	return source_stream;
+}
+
+int ipu6_isys_subdev_init_cfg(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_state *state)
+{
+	struct v4l2_subdev_route route = {
+		.sink_pad = 0,
+		.sink_stream = 0,
+		.source_pad = 1,
+		.source_stream = 0,
+		.flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+	};
+	struct v4l2_subdev_krouting routing = {
+		.num_routes = 1,
+		.routes = &route,
+	};
+
+	return subdev_set_routing(sd, state, &routing);
+}
+
+int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_state *state,
+				 enum v4l2_subdev_format_whence which,
+				 struct v4l2_subdev_krouting *routing)
+{
+	return subdev_set_routing(sd, state, routing);
+}
+
+int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd,
+			  const struct v4l2_subdev_ops *ops,
+			  unsigned int nr_ctrls,
+			  unsigned int num_sink_pads,
+			  unsigned int num_source_pads)
+{
+	unsigned int num_pads = num_sink_pads + num_source_pads;
+	unsigned int i;
+	int ret;
+
+	v4l2_subdev_init(&asd->sd, ops);
+
+	asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+			 V4L2_SUBDEV_FL_HAS_EVENTS |
+			 V4L2_SUBDEV_FL_STREAMS;
+	asd->sd.owner = THIS_MODULE;
+	asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+	asd->pad = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads,
+				sizeof(*asd->pad), GFP_KERNEL);
+
+	if (!asd->pad)
+		return -ENOMEM;
+
+	for (i = 0; i < num_sink_pads; i++)
+		asd->pad[i].flags = MEDIA_PAD_FL_SINK |
+				    MEDIA_PAD_FL_MUST_CONNECT;
+
+	for (i = num_sink_pads; i < num_pads; i++)
+		asd->pad[i].flags = MEDIA_PAD_FL_SOURCE;
+
+	ret = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad);
+	if (ret)
+		return ret;
+
+	if (asd->ctrl_init) {
+		ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls);
+		if (ret)
+			goto out_media_entity_cleanup;
+
+		asd->ctrl_init(&asd->sd);
+		if (asd->ctrl_handler.error) {
+			ret = asd->ctrl_handler.error;
+			goto out_v4l2_ctrl_handler_free;
+		}
+
+		asd->sd.ctrl_handler = &asd->ctrl_handler;
+	}
+
+	asd->source = -1;
+
+	return 0;
+
+out_v4l2_ctrl_handler_free:
+	v4l2_ctrl_handler_free(&asd->ctrl_handler);
+
+out_media_entity_cleanup:
+	media_entity_cleanup(&asd->sd.entity);
+
+	return ret;
+}
+
+void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd)
+{
+	media_entity_cleanup(&asd->sd.entity);
+	v4l2_ctrl_handler_free(&asd->ctrl_handler);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
new file mode 100644
index 000000000000..adea2a55761d
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_ISYS_SUBDEV_H
+#define IPU6_ISYS_SUBDEV_H
+
+#include <linux/container_of.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+struct ipu6_isys;
+
+struct ipu6_isys_subdev {
+	struct v4l2_subdev sd;
+	struct ipu6_isys *isys;
+	u32 const *supported_codes;
+	struct media_pad *pad;
+	struct v4l2_ctrl_handler ctrl_handler;
+	void (*ctrl_init)(struct v4l2_subdev *sd);
+	int source;	/* SSI stream source; -1 if unset */
+};
+
+#define to_ipu6_isys_subdev(__sd) \
+	container_of(__sd, struct ipu6_isys_subdev, sd)
+
+unsigned int ipu6_isys_mbus_code_to_bpp(u32 code);
+unsigned int ipu6_isys_mbus_code_to_mipi(u32 code);
+bool ipu6_isys_is_bayer_format(u32 code);
+u32 ipu6_isys_convert_bayer_order(u32 code, int x, int y);
+
+int ipu6_isys_subdev_set_fmt(struct v4l2_subdev *sd,
+			     struct v4l2_subdev_state *state,
+			     struct v4l2_subdev_format *fmt);
+int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+				    struct v4l2_subdev_state *state,
+				    struct v4l2_subdev_mbus_code_enum
+				    *code);
+int ipu6_isys_subdev_link_validate(struct v4l2_subdev *sd,
+				   struct media_link *link,
+				   struct v4l2_subdev_format *source_fmt,
+				   struct v4l2_subdev_format *sink_fmt);
+u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad);
+int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
+				 struct v4l2_mbus_framefmt *format);
+int ipu6_isys_get_stream_pad_crop(struct v4l2_subdev *sd, u32 pad, u32 stream,
+				  struct v4l2_rect *crop);
+int ipu6_isys_subdev_init_cfg(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_state *state);
+int ipu6_isys_subdev_set_routing(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_state *state,
+				 enum v4l2_subdev_format_whence which,
+				 struct v4l2_subdev_krouting *routing);
+int ipu6_isys_subdev_init(struct ipu6_isys_subdev *asd,
+			  const struct v4l2_subdev_ops *ops,
+			  unsigned int nr_ctrls,
+			  unsigned int num_sink_pads,
+			  unsigned int num_source_pads);
+void ipu6_isys_subdev_cleanup(struct ipu6_isys_subdev *asd);
+#endif /* IPU6_ISYS_SUBDEV_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h b/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h
new file mode 100644
index 000000000000..2034e1109d98
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef IPU6_PLATFORM_ISYS_CSI2_REG_H
+#define IPU6_PLATFORM_ISYS_CSI2_REG_H
+
+#include <linux/bits.h>
+
+#define CSI_REG_BASE			0x220000
+#define CSI_REG_BASE_PORT(id)		((id) * 0x1000)
+
+#define IPU6_CSI_PORT_A_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(0))
+#define IPU6_CSI_PORT_B_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(1))
+#define IPU6_CSI_PORT_C_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(2))
+#define IPU6_CSI_PORT_D_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(3))
+#define IPU6_CSI_PORT_E_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(4))
+#define IPU6_CSI_PORT_F_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(5))
+#define IPU6_CSI_PORT_G_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(6))
+#define IPU6_CSI_PORT_H_ADDR_OFFSET	\
+		(CSI_REG_BASE + CSI_REG_BASE_PORT(7))
+
+/* CSI Port Genral Purpose Registers */
+#define CSI_REG_PORT_GPREG_SRST                 0x0
+#define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST    0x4
+#define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL    0x8
+
+/*
+ * Port IRQs mapping events:
+ * IRQ0 - CSI_FE event
+ * IRQ1 - CSI_SYNC
+ * IRQ2 - S2M_SIDS0TO7
+ * IRQ3 - S2M_SIDS8TO15
+ */
+#define CSI_PORT_REG_BASE_IRQ_CSI               0x80
+#define CSI_PORT_REG_BASE_IRQ_CSI_SYNC          0xA0
+#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS0TOS7     0xC0
+#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS8TOS15    0xE0
+
+#define CSI_PORT_REG_BASE_IRQ_EDGE_OFFSET	0x0
+#define CSI_PORT_REG_BASE_IRQ_MASK_OFFSET	0x4
+#define CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET	0x8
+#define CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET	0xc
+#define CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET	0x10
+#define CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET	0x14
+
+#define IPU6SE_CSI_RX_ERROR_IRQ_MASK		GENMASK(18, 0)
+#define IPU6_CSI_RX_ERROR_IRQ_MASK		GENMASK(19, 0)
+
+#define CSI_RX_NUM_ERRORS_IN_IRQ		20
+#define CSI_RX_NUM_IRQ				32
+
+#define IPU_CSI_RX_IRQ_FS_VC(chn)	(1 << ((chn) * 2))
+#define IPU_CSI_RX_IRQ_FE_VC(chn)	(2 << ((chn) * 2))
+
+/* PPI2CSI */
+#define CSI_REG_PPI2CSI_ENABLE				0x200
+#define CSI_REG_PPI2CSI_CONFIG_PPI_INTF			0x204
+#define PPI_INTF_CONFIG_NOF_ENABLED_DLANES_MASK		GENMASK(4, 3)
+#define CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE		0x208
+
+enum CSI_PPI2CSI_CTRL {
+	CSI_PPI2CSI_DISABLE = 0,
+	CSI_PPI2CSI_ENABLE = 1,
+};
+
+/* CSI_FE */
+#define CSI_REG_CSI_FE_ENABLE                   0x280
+#define CSI_REG_CSI_FE_MODE                     0x284
+#define CSI_REG_CSI_FE_MUX_CTRL                 0x288
+#define CSI_REG_CSI_FE_SYNC_CNTR_SEL            0x290
+
+enum CSI_FE_ENABLE_TYPE {
+	CSI_FE_DISABLE = 0,
+	CSI_FE_ENABLE = 1,
+};
+
+enum CSI_FE_MODE_TYPE {
+	CSI_FE_DPHY_MODE = 0,
+	CSI_FE_CPHY_MODE = 1,
+};
+
+enum CSI_FE_INPUT_SELECTOR {
+	CSI_SENSOR_INPUT = 0,
+	CSI_MIPIGEN_INPUT = 1,
+};
+
+enum CSI_FE_SYNC_CNTR_SEL_TYPE {
+	CSI_CNTR_SENSOR_LINE_ID = BIT(0),
+	CSI_CNTR_INT_LINE_PKT_ID = ~CSI_CNTR_SENSOR_LINE_ID,
+	CSI_CNTR_SENSOR_FRAME_ID = BIT(1),
+	CSI_CNTR_INT_FRAME_PKT_ID = ~CSI_CNTR_SENSOR_FRAME_ID,
+};
+
+/* CSI HUB General Purpose Registers */
+#define CSI_REG_HUB_GPREG_SRST			(CSI_REG_BASE + 0x18000)
+#define CSI_REG_HUB_GPREG_SLV_REG_SRST		(CSI_REG_BASE + 0x18004)
+
+#define CSI_REG_HUB_DRV_ACCESS_PORT(id)	(CSI_REG_BASE + 0x18018 + (id) * 4)
+#define CSI_REG_HUB_FW_ACCESS_PORT_OFS		0x17000
+#define CSI_REG_HUB_FW_ACCESS_PORT_V6OFS	0x16000
+#define CSI_REG_HUB_FW_ACCESS_PORT(ofs, id)	\
+					(CSI_REG_BASE + (ofs) + (id) * 4)
+
+enum CSI_PORT_CLK_GATING_SWITCH {
+	CSI_PORT_CLK_GATING_OFF = 0,
+	CSI_PORT_CLK_GATING_ON = 1,
+};
+
+#define CSI_REG_BASE_HUB_IRQ                        0x18200
+
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE			0x238200
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK			0x238204
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS			0x238208
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR			0x23820c
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE			0x238210
+#define IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE		0x238214
+
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE			0x238220
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK			0x238224
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS			0x238228
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR			0x23822c
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE			0x238230
+#define IPU6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE		0x238234
+
+/* MTL IPU6V6 irq ctrl0 & ctrl1 */
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE			0x238700
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK			0x238704
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS		0x238708
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR			0x23870c
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE		0x238710
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE	0x238714
+
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE			0x238720
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK			0x238724
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS		0x238728
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR			0x23872c
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE		0x238730
+#define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE	0x238734
+
+/*
+ * 3:0 CSI_PORT.irq_out[3:0] CSI_PORT_CTRL0 IRQ outputs (4bits)
+ * [0] CSI_PORT.IRQ_CTRL0_csi
+ * [1] CSI_PORT.IRQ_CTRL1_csi_sync
+ * [2] CSI_PORT.IRQ_CTRL2_s2m_sids0to7
+ * [3] CSI_PORT.IRQ_CTRL3_s2m_sids8to15
+ */
+#define IPU6_ISYS_UNISPART_IRQ_CSI2(port)		\
+				   (0x3 << ((port) * IPU6_CSI_IRQ_NUM_PER_PIPE))
+
+/*
+ * ipu6se support 2 front ends, 2 port per front end, 4 ports 0..3
+ * sip0 - 0, 1
+ * sip1 - 2, 3
+ * 0 and 2 support 4 data lanes, 1 and 3 support 2 data lanes
+ * all offset are base from isys base address
+ */
+
+#define CSI2_HUB_GPREG_SIP_SRST(sip)			(0x238038 + (sip) * 4)
+#define CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip)		(0x238050 + (sip) * 4)
+
+#define CSI2_HUB_GPREG_DPHY_TIMER_INCR			0x238040
+#define CSI2_HUB_GPREG_HPLL_FREQ			0x238044
+#define CSI2_HUB_GPREG_IS_CLK_RATIO			0x238048
+#define CSI2_HUB_GPREG_HPLL_FREQ_ISCLK_RATE_OVERRIDE	0x23804c
+#define CSI2_HUB_GPREG_PORT_CLKGATING_DISABLE		0x238058
+#define CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL		0x23805c
+#define CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL		0x238088
+#define CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL		0x2380a4
+#define CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL		0x2380d0
+
+#define CSI2_SIP_TOP_CSI_RX_BASE(sip)		(0x23805c + (sip) * 0x48)
+#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port)	(0x23805c + ((port) / 2) * 0x48)
+#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port)	(0x238088 + ((port) / 2) * 0x48)
+
+/* offset from port base */
+#define CSI2_SIP_TOP_CSI_RX_PORT_CONTROL		0x0
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE	0x4
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE	0x8
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(lane)	(0xc + (lane) * 8)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(lane)	(0x10 + (lane) * 8)
+
+#endif /* IPU6_ISYS_CSI2_REG_H */
-- 
2.43.0

From ba67c52b68d458e2c8ae4af08bd01a6ebe60b475 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:23 +0800
Subject: [PATCH 09/31] media: intel/ipu6: add the CSI2 DPHY implementation

IPU6 CSI2 DPHY hardware varies on different platforms, current
IPU6 has three DPHY hardware instance which maybe used on tigerlake,
alderlake, metorlake and jasperlake. MCD DPHY is shipped on tigerlake
and alderlake, DWC DPHY is shipped on metorlake.

Each PHY has its own register space, input system driver call the
DPHY callback which was set at isys_probe().

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-10-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 .../media/pci/intel/ipu6/ipu6-isys-dwc-phy.c  | 536 +++++++++++++
 .../media/pci/intel/ipu6/ipu6-isys-jsl-phy.c  | 242 ++++++
 .../media/pci/intel/ipu6/ipu6-isys-mcd-phy.c  | 720 ++++++++++++++++++
 3 files changed, 1498 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c

diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c b/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c
new file mode 100644
index 000000000000..a4bb5ff51d4e
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+
+#define IPU6_DWC_DPHY_BASE(i)			(0x238038 + 0x34 * (i))
+#define IPU6_DWC_DPHY_RSTZ			0x00
+#define IPU6_DWC_DPHY_SHUTDOWNZ			0x04
+#define IPU6_DWC_DPHY_HSFREQRANGE		0x08
+#define IPU6_DWC_DPHY_CFGCLKFREQRANGE		0x0c
+#define IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE	0x10
+#define IPU6_DWC_DPHY_TEST_IFC_REQ		0x14
+#define IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION	0x18
+#define IPU6_DWC_DPHY_DFT_CTRL0			0x28
+#define IPU6_DWC_DPHY_DFT_CTRL1			0x2c
+#define IPU6_DWC_DPHY_DFT_CTRL2			0x30
+
+/*
+ * test IFC request definition:
+ * - req: 0 for read, 1 for write
+ * - 12 bits address
+ * - 8bits data (will ignore for read)
+ * --24----16------4-----0
+ * --|-data-|-addr-|-req-|
+ */
+#define IFC_REQ(req, addr, data) (FIELD_PREP(GENMASK(23, 16), data) | \
+				  FIELD_PREP(GENMASK(15, 4), addr) | \
+				  FIELD_PREP(GENMASK(1, 0), req))
+
+#define TEST_IFC_REQ_READ	0
+#define TEST_IFC_REQ_WRITE	1
+#define TEST_IFC_REQ_RESET	2
+
+#define TEST_IFC_ACCESS_MODE_FSM	0
+#define TEST_IFC_ACCESS_MODE_IFC_CTL	1
+
+enum phy_fsm_state {
+	PHY_FSM_STATE_POWERON = 0,
+	PHY_FSM_STATE_BGPON = 1,
+	PHY_FSM_STATE_CAL_TYPE = 2,
+	PHY_FSM_STATE_BURNIN_CAL = 3,
+	PHY_FSM_STATE_TERMCAL = 4,
+	PHY_FSM_STATE_OFFSETCAL = 5,
+	PHY_FSM_STATE_OFFSET_LANE = 6,
+	PHY_FSM_STATE_IDLE = 7,
+	PHY_FSM_STATE_ULP = 8,
+	PHY_FSM_STATE_DDLTUNNING = 9,
+	PHY_FSM_STATE_SKEW_BACKWARD = 10,
+	PHY_FSM_STATE_INVALID,
+};
+
+static void dwc_dphy_write(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+			   u32 data)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id);
+
+	dev_dbg(dev, "write: reg 0x%lx = data 0x%x", base + addr - isys_base,
+		data);
+	writel(data, base + addr);
+}
+
+static u32 dwc_dphy_read(struct ipu6_isys *isys, u32 phy_id, u32 addr)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id);
+	u32 data;
+
+	data = readl(base + addr);
+	dev_dbg(dev, "read: reg 0x%lx = data 0x%x", base + addr - isys_base,
+		data);
+
+	return data;
+}
+
+static void dwc_dphy_write_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+				u32 data, u8 shift, u8 width)
+{
+	u32 temp;
+	u32 mask;
+
+	mask = (1 << width) - 1;
+	temp = dwc_dphy_read(isys, phy_id, addr);
+	temp &= ~(mask << shift);
+	temp |= (data & mask) << shift;
+	dwc_dphy_write(isys, phy_id, addr, temp);
+}
+
+static u32 __maybe_unused dwc_dphy_read_mask(struct ipu6_isys *isys, u32 phy_id,
+					     u32 addr, u8 shift,  u8 width)
+{
+	u32 val;
+
+	val = dwc_dphy_read(isys, phy_id, addr) >> shift;
+	return val & ((1 << width) - 1);
+}
+
+#define DWC_DPHY_TIMEOUT (5 * USEC_PER_SEC)
+static int dwc_dphy_ifc_read(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+			     u32 *val)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id);
+	void __iomem *reg;
+	u32 completion;
+	int ret;
+
+	dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ,
+		       IFC_REQ(TEST_IFC_REQ_READ, addr, 0));
+	reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION;
+	ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)),
+				 10, DWC_DPHY_TIMEOUT);
+	if (ret) {
+		dev_err(dev, "DWC ifc request read timeout\n");
+		return ret;
+	}
+
+	*val = completion >> 8 & 0xff;
+	*val = FIELD_GET(GENMASK(15, 8), completion);
+	dev_dbg(dev, "DWC ifc read 0x%x = 0x%x", addr, *val);
+
+	return 0;
+}
+
+static int dwc_dphy_ifc_write(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+			      u32 data)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	void __iomem *base = isys_base + IPU6_DWC_DPHY_BASE(phy_id);
+	void __iomem *reg;
+	u32 completion;
+	int ret;
+
+	dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ,
+		       IFC_REQ(TEST_IFC_REQ_WRITE, addr, data));
+	completion = readl(base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION);
+	reg = base + IPU6_DWC_DPHY_TEST_IFC_REQ_COMPLETION;
+	ret = readl_poll_timeout(reg, completion, !(completion & BIT(0)),
+				 10, DWC_DPHY_TIMEOUT);
+	if (ret)
+		dev_err(dev, "DWC ifc request write timeout\n");
+
+	return ret;
+}
+
+static void dwc_dphy_ifc_write_mask(struct ipu6_isys *isys, u32 phy_id,
+				    u32 addr, u32 data, u8 shift, u8 width)
+{
+	u32 temp, mask;
+	int ret;
+
+	ret = dwc_dphy_ifc_read(isys, phy_id, addr, &temp);
+	if (ret)
+		return;
+
+	mask = (1 << width) - 1;
+	temp &= ~(mask << shift);
+	temp |= (data & mask) << shift;
+	dwc_dphy_ifc_write(isys, phy_id, addr, temp);
+}
+
+static u32 dwc_dphy_ifc_read_mask(struct ipu6_isys *isys, u32 phy_id, u32 addr,
+				  u8 shift, u8 width)
+{
+	int ret;
+	u32 val;
+
+	ret = dwc_dphy_ifc_read(isys, phy_id, addr, &val);
+	if (ret)
+		return 0;
+
+	return ((val >> shift) & ((1 << width) - 1));
+}
+
+static int dwc_dphy_pwr_up(struct ipu6_isys *isys, u32 phy_id)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	u32 fsm_state;
+	int ret;
+
+	dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 1);
+	usleep_range(10, 20);
+	dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 1);
+
+	ret = read_poll_timeout(dwc_dphy_ifc_read_mask, fsm_state,
+				(fsm_state == PHY_FSM_STATE_IDLE ||
+				 fsm_state == PHY_FSM_STATE_ULP),
+				100, DWC_DPHY_TIMEOUT, false, isys,
+				phy_id, 0x1e, 0, 4);
+
+	if (ret)
+		dev_err(dev, "Dphy %d power up failed, state 0x%x", phy_id,
+			fsm_state);
+
+	return ret;
+}
+
+struct dwc_dphy_freq_range {
+	u8 hsfreq;
+	u16 min;
+	u16 max;
+	u16 default_mbps;
+	u16 osc_freq_target;
+};
+
+#define DPHY_FREQ_RANGE_NUM		(63)
+#define DPHY_FREQ_RANGE_INVALID_INDEX	(0xff)
+static const struct dwc_dphy_freq_range freqranges[DPHY_FREQ_RANGE_NUM] = {
+	{0x00,	80,	97,	80,	335},
+	{0x10,	80,	107,	90,	335},
+	{0x20,	84,	118,	100,	335},
+	{0x30,	93,	128,	110,	335},
+	{0x01,	103,	139,	120,	335},
+	{0x11,	112,	149,	130,	335},
+	{0x21,	122,	160,	140,	335},
+	{0x31,	131,	170,	150,	335},
+	{0x02,	141,	181,	160,	335},
+	{0x12,	150,	191,	170,	335},
+	{0x22,	160,	202,	180,	335},
+	{0x32,	169,	212,	190,	335},
+	{0x03,	183,	228,	205,	335},
+	{0x13,	198,	244,	220,	335},
+	{0x23,	212,	259,	235,	335},
+	{0x33,	226,	275,	250,	335},
+	{0x04,	250,	301,	275,	335},
+	{0x14,	274,	328,	300,	335},
+	{0x25,	297,	354,	325,	335},
+	{0x35,	321,	380,	350,	335},
+	{0x05,	369,	433,	400,	335},
+	{0x16,	416,	485,	450,	335},
+	{0x26,	464,	538,	500,	335},
+	{0x37,	511,	590,	550,	335},
+	{0x07,	559,	643,	600,	335},
+	{0x18,	606,	695,	650,	335},
+	{0x28,	654,	748,	700,	335},
+	{0x39,	701,	800,	750,	335},
+	{0x09,	749,	853,	800,	335},
+	{0x19,	796,	905,	850,	335},
+	{0x29,	844,	958,	900,	335},
+	{0x3a,	891,	1010,	950,	335},
+	{0x0a,	939,	1063,	1000,	335},
+	{0x1a,	986,	1115,	1050,	335},
+	{0x2a,	1034,	1168,	1100,	335},
+	{0x3b,	1081,	1220,	1150,	335},
+	{0x0b,	1129,	1273,	1200,	335},
+	{0x1b,	1176,	1325,	1250,	335},
+	{0x2b,	1224,	1378,	1300,	335},
+	{0x3c,	1271,	1430,	1350,	335},
+	{0x0c,	1319,	1483,	1400,	335},
+	{0x1c,	1366,	1535,	1450,	335},
+	{0x2c,	1414,	1588,	1500,	335},
+	{0x3d,	1461,	1640,	1550,	208},
+	{0x0d,	1509,	1693,	1600,	214},
+	{0x1d,	1556,	1745,	1650,	221},
+	{0x2e,	1604,	1798,	1700,	228},
+	{0x3e,	1651,	1850,	1750,	234},
+	{0x0e,	1699,	1903,	1800,	241},
+	{0x1e,	1746,	1955,	1850,	248},
+	{0x2f,	1794,	2008,	1900,	255},
+	{0x3f,	1841,	2060,	1950,	261},
+	{0x0f,	1889,	2113,	2000,	268},
+	{0x40,	1936,	2165,	2050,	275},
+	{0x41,	1984,	2218,	2100,	281},
+	{0x42,	2031,	2270,	2150,	288},
+	{0x43,	2079,	2323,	2200,	294},
+	{0x44,	2126,	2375,	2250,	302},
+	{0x45,	2174,	2428,	2300,	308},
+	{0x46,	2221,	2480,	2350,	315},
+	{0x47,	2269,	2500,	2400,	321},
+	{0x48,	2316,	2500,	2450,	328},
+	{0x49,	2364,	2500,	2500,	335}
+};
+
+static u16 get_hsfreq_by_mbps(u32 mbps)
+{
+	unsigned int i = DPHY_FREQ_RANGE_NUM;
+
+	while (i--) {
+		if (freqranges[i].default_mbps == mbps ||
+		    (mbps >= freqranges[i].min && mbps <= freqranges[i].max))
+			return i;
+	}
+
+	return DPHY_FREQ_RANGE_INVALID_INDEX;
+}
+
+static int ipu6_isys_dwc_phy_config(struct ipu6_isys *isys,
+				    u32 phy_id, u32 mbps)
+{
+	struct ipu6_bus_device *adev = isys->adev;
+	struct device *dev = &adev->auxdev.dev;
+	struct ipu6_device *isp = adev->isp;
+	u32 cfg_clk_freqrange;
+	u32 osc_freq_target;
+	u32 index;
+
+	dev_dbg(dev, "config Dphy %u with %u mbps", phy_id, mbps);
+
+	index = get_hsfreq_by_mbps(mbps);
+	if (index == DPHY_FREQ_RANGE_INVALID_INDEX) {
+		dev_err(dev, "link freq not found for mbps %u", mbps);
+		return -EINVAL;
+	}
+
+	dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_HSFREQRANGE,
+			    freqranges[index].hsfreq, 0, 7);
+
+	/* Force termination Calibration */
+	if (isys->phy_termcal_val) {
+		dwc_dphy_ifc_write_mask(isys, phy_id, 0x20a, 0x1, 0, 1);
+		dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, 0x3, 0, 2);
+		dwc_dphy_ifc_write_mask(isys, phy_id, 0x209,
+					isys->phy_termcal_val, 4, 4);
+	}
+
+	/*
+	 * Enable override to configure the DDL target oscillation
+	 * frequency on bit 0 of register 0xe4
+	 */
+	dwc_dphy_ifc_write_mask(isys, phy_id, 0xe4, 0x1, 0, 1);
+	/*
+	 * configure registers 0xe2, 0xe3 with the
+	 * appropriate DDL target oscillation frequency
+	 * 0x1cc(460)
+	 */
+	osc_freq_target = freqranges[index].osc_freq_target;
+	dwc_dphy_ifc_write_mask(isys, phy_id, 0xe2,
+				osc_freq_target & 0xff, 0, 8);
+	dwc_dphy_ifc_write_mask(isys, phy_id, 0xe3,
+				(osc_freq_target >> 8) & 0xf, 0, 4);
+
+	if (mbps < 1500) {
+		/* deskew_polarity_rw, for < 1.5Gbps */
+		dwc_dphy_ifc_write_mask(isys, phy_id, 0x8, 0x1, 5, 1);
+	}
+
+	/*
+	 * Set cfgclkfreqrange[5:0] = round[(Fcfg_clk(MHz)-17)*4]
+	 * (38.4 - 17) * 4 = ~85 (0x55)
+	 */
+	cfg_clk_freqrange = (isp->buttress.ref_clk - 170) * 4 / 10;
+	dev_dbg(dev, "ref_clk = %u clk_freqrange = %u",
+		isp->buttress.ref_clk, cfg_clk_freqrange);
+	dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_CFGCLKFREQRANGE,
+			    cfg_clk_freqrange, 0, 8);
+
+	dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 4, 1);
+	dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0x1, 8, 1);
+
+	return 0;
+}
+
+static void ipu6_isys_dwc_phy_aggr_setup(struct ipu6_isys *isys, u32 master,
+					 u32 slave, u32 mbps)
+{
+	/* Config mastermacro */
+	dwc_dphy_ifc_write_mask(isys, master, 0x133, 0x1, 0, 1);
+	dwc_dphy_ifc_write_mask(isys, slave, 0x133, 0x0, 0, 1);
+
+	/* Config master PHY clk lane to drive long channel clk */
+	dwc_dphy_ifc_write_mask(isys, master, 0x307, 0x1, 2, 1);
+	dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x0, 2, 1);
+
+	/* Config both PHYs data lanes to get clk from long channel */
+	dwc_dphy_ifc_write_mask(isys, master, 0x508, 0x1, 5, 1);
+	dwc_dphy_ifc_write_mask(isys, slave, 0x508, 0x1, 5, 1);
+	dwc_dphy_ifc_write_mask(isys, master, 0x708, 0x1, 5, 1);
+	dwc_dphy_ifc_write_mask(isys, slave, 0x708, 0x1, 5, 1);
+
+	/* Config slave PHY clk lane to bypass long channel clk to DDR clk */
+	dwc_dphy_ifc_write_mask(isys, master, 0x308, 0x0, 3, 1);
+	dwc_dphy_ifc_write_mask(isys, slave, 0x308, 0x1, 3, 1);
+
+	/* Override slave PHY clk lane enable (DPHYRXCLK_CLL_demux module) */
+	dwc_dphy_ifc_write_mask(isys, slave, 0xe0, 0x3, 0, 2);
+
+	/* Override slave PHY DDR clk lane enable (DPHYHSRX_div124 module) */
+	dwc_dphy_ifc_write_mask(isys, slave, 0xe1, 0x1, 1, 1);
+	dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x1, 3, 1);
+
+	/* Turn off slave PHY LP-RX clk lane */
+	dwc_dphy_ifc_write_mask(isys, slave, 0x304, 0x1, 7, 1);
+	dwc_dphy_ifc_write_mask(isys, slave, 0x305, 0xa, 0, 5);
+}
+
+#define PHY_E	4
+static int ipu6_isys_dwc_phy_powerup_ack(struct ipu6_isys *isys, u32 phy_id)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	u32 rescal_done;
+	int ret;
+
+	ret = dwc_dphy_pwr_up(isys, phy_id);
+	if (ret != 0) {
+		dev_err(dev, "Dphy %u power up failed(%d)", phy_id, ret);
+		return ret;
+	}
+
+	/* reset forcerxmode */
+	dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 4, 1);
+	dwc_dphy_write_mask(isys, phy_id, IPU6_DWC_DPHY_DFT_CTRL2, 0, 8, 1);
+
+	dev_dbg(dev, "Dphy %u is ready!", phy_id);
+
+	if (phy_id != PHY_E || isys->phy_termcal_val)
+		return 0;
+
+	usleep_range(100, 200);
+	rescal_done = dwc_dphy_ifc_read_mask(isys, phy_id, 0x221, 7, 1);
+	if (rescal_done) {
+		isys->phy_termcal_val = dwc_dphy_ifc_read_mask(isys, phy_id,
+							       0x220, 2, 4);
+		dev_dbg(dev, "termcal done with value = %u",
+			isys->phy_termcal_val);
+	}
+
+	return 0;
+}
+
+static void ipu6_isys_dwc_phy_reset(struct ipu6_isys *isys, u32 phy_id)
+{
+	dev_dbg(&isys->adev->auxdev.dev, "Reset phy %u", phy_id);
+
+	dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_SHUTDOWNZ, 0);
+	dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_RSTZ, 0);
+	dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_ACCESS_MODE,
+		       TEST_IFC_ACCESS_MODE_FSM);
+	dwc_dphy_write(isys, phy_id, IPU6_DWC_DPHY_TEST_IFC_REQ,
+		       TEST_IFC_REQ_RESET);
+}
+
+int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys,
+				struct ipu6_isys_csi2_config *cfg,
+				const struct ipu6_isys_csi2_timing *timing,
+				bool on)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	u32 phy_id, primary, secondary;
+	u32 nlanes, port, mbps;
+	s64 link_freq;
+	int ret;
+
+	port = cfg->port;
+
+	if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) {
+		dev_warn(dev, "invalid port ID %d\n", port);
+		return -EINVAL;
+	}
+
+	nlanes = cfg->nlanes;
+	/* only port 0, 2 and 4 support 4 lanes */
+	if (nlanes == 4 && port % 2) {
+		dev_err(dev, "invalid csi-port %u with %u lanes\n", port,
+			nlanes);
+		return -EINVAL;
+	}
+
+	link_freq = ipu6_isys_csi2_get_link_freq(&isys->csi2[port]);
+	if (link_freq < 0) {
+		dev_err(dev, "get link freq failed(%lld).\n", link_freq);
+		return link_freq;
+	}
+
+	mbps = div_u64(link_freq, 500000);
+
+	phy_id = port;
+	primary = port & ~1;
+	secondary = primary + 1;
+	if (on) {
+		if (nlanes == 4) {
+			dev_dbg(dev, "config phy %u and %u in aggr mode\n",
+				primary, secondary);
+
+			ipu6_isys_dwc_phy_reset(isys, primary);
+			ipu6_isys_dwc_phy_reset(isys, secondary);
+			ipu6_isys_dwc_phy_aggr_setup(isys, primary,
+						     secondary, mbps);
+
+			ret = ipu6_isys_dwc_phy_config(isys, primary, mbps);
+			if (ret)
+				return ret;
+			ret = ipu6_isys_dwc_phy_config(isys, secondary, mbps);
+			if (ret)
+				return ret;
+
+			ret = ipu6_isys_dwc_phy_powerup_ack(isys, primary);
+			if (ret)
+				return ret;
+
+			ret = ipu6_isys_dwc_phy_powerup_ack(isys, secondary);
+			return ret;
+		}
+
+		dev_dbg(dev, "config phy %u with %u lanes in non-aggr mode\n",
+			phy_id, nlanes);
+
+		ipu6_isys_dwc_phy_reset(isys, phy_id);
+		ret = ipu6_isys_dwc_phy_config(isys, phy_id, mbps);
+		if (ret)
+			return ret;
+
+		ret = ipu6_isys_dwc_phy_powerup_ack(isys, phy_id);
+		return ret;
+	}
+
+	if (nlanes == 4) {
+		dev_dbg(dev, "Power down phy %u and phy %u for port %u\n",
+			primary, secondary, port);
+		ipu6_isys_dwc_phy_reset(isys, secondary);
+		ipu6_isys_dwc_phy_reset(isys, primary);
+
+		return 0;
+	}
+
+	dev_dbg(dev, "Powerdown phy %u with %u lanes\n", phy_id, nlanes);
+
+	ipu6_isys_dwc_phy_reset(isys, phy_id);
+
+	return 0;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c b/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c
new file mode 100644
index 000000000000..dcc7743e0cee
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+
+/* only use BB0, BB2, BB4, and BB6 on PHY0 */
+#define IPU6SE_ISYS_PHY_BB_NUM		4
+#define IPU6SE_ISYS_PHY_0_BASE		0x10000
+
+#define PHY_CPHY_DLL_OVRD(x)		(0x100 + 0x100 * (x))
+#define PHY_CPHY_RX_CONTROL1(x)		(0x110 + 0x100 * (x))
+#define PHY_DPHY_CFG(x)			(0x148 + 0x100 * (x))
+#define PHY_BB_AFE_CONFIG(x)		(0x174 + 0x100 * (x))
+
+/*
+ * use port_cfg to configure that which data lanes used
+ * +---------+     +------+ +-----+
+ * | port0 x4<-----|      | |     |
+ * |         |     | port | |     |
+ * | port1 x2<-----|      | |     |
+ * |         |     |      <-| PHY |
+ * | port2 x4<-----|      | |     |
+ * |         |     |config| |     |
+ * | port3 x2<-----|      | |     |
+ * +---------+     +------+ +-----+
+ */
+static const unsigned int csi2_port_cfg[][3] = {
+	{0, 0, 0x1f}, /* no link */
+	{4, 0, 0x10}, /* x4 + x4 config */
+	{2, 0, 0x12}, /* x2 + x2 config */
+	{1, 0, 0x13}, /* x1 + x1 config */
+	{2, 1, 0x15}, /* x2x1 + x2x1 config */
+	{1, 1, 0x16}, /* x1x1 + x1x1 config */
+	{2, 2, 0x18}, /* x2x2 + x2x2 config */
+	{1, 2, 0x19} /* x1x2 + x1x2 config */
+};
+
+/* port, nlanes, bbindex, portcfg */
+static const unsigned int phy_port_cfg[][4] = {
+	/* sip0 */
+	{0, 1, 0, 0x15},
+	{0, 2, 0, 0x15},
+	{0, 4, 0, 0x15},
+	{0, 4, 2, 0x22},
+	/* sip1 */
+	{2, 1, 4, 0x15},
+	{2, 2, 4, 0x15},
+	{2, 4, 4, 0x15},
+	{2, 4, 6, 0x22}
+};
+
+static void ipu6_isys_csi2_phy_config_by_port(struct ipu6_isys *isys,
+					      unsigned int port,
+					      unsigned int nlanes)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *base = isys->adev->isp->base;
+	unsigned int bbnum;
+	u32 val, reg, i;
+
+	dev_dbg(dev, "port %u with %u lanes", port, nlanes);
+
+	/* only support <1.5Gbps */
+	for (i = 0; i < IPU6SE_ISYS_PHY_BB_NUM; i++) {
+		/* cphy_dll_ovrd.crcdc_fsm_dlane0 = 13 */
+		reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i);
+		val = readl(base + reg);
+		val |= FIELD_PREP(GENMASK(6, 1), 13);
+		writel(val, base + reg);
+
+		/* cphy_rx_control1.en_crc1 = 1 */
+		reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_RX_CONTROL1(i);
+		val = readl(base + reg);
+		val |= BIT(31);
+		writel(val, base + reg);
+
+		/* dphy_cfg.reserved = 1, .lden_from_dll_ovrd_0 = 1 */
+		reg = IPU6SE_ISYS_PHY_0_BASE + PHY_DPHY_CFG(i);
+		val = readl(base + reg);
+		val |= BIT(25) | BIT(26);
+		writel(val, base + reg);
+
+		/* cphy_dll_ovrd.lden_crcdc_fsm_dlane0 = 1 */
+		reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i);
+		val = readl(base + reg);
+		val |= BIT(0);
+		writel(val, base + reg);
+	}
+
+	/* Front end config, use minimal channel loss */
+	for (i = 0; i < ARRAY_SIZE(phy_port_cfg); i++) {
+		if (phy_port_cfg[i][0] == port &&
+		    phy_port_cfg[i][1] == nlanes) {
+			bbnum = phy_port_cfg[i][2] / 2;
+			reg = IPU6SE_ISYS_PHY_0_BASE + PHY_BB_AFE_CONFIG(bbnum);
+			val = readl(base + reg);
+			val |= phy_port_cfg[i][3];
+			writel(val, base + reg);
+		}
+	}
+}
+
+static void ipu6_isys_csi2_rx_control(struct ipu6_isys *isys)
+{
+	void __iomem *base = isys->adev->isp->base;
+	u32 val, reg;
+
+	reg = CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL;
+	val = readl(base + reg);
+	val |= BIT(0);
+	writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL);
+
+	reg = CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL;
+	val = readl(base + reg);
+	val |= BIT(0);
+	writel(val, base + CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL);
+
+	reg = CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL;
+	val = readl(base + reg);
+	val |= BIT(0);
+	writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL);
+
+	reg = CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL;
+	val = readl(base + reg);
+	val |= BIT(0);
+	writel(val, base + CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL);
+}
+
+static int ipu6_isys_csi2_set_port_cfg(struct ipu6_isys *isys,
+				       unsigned int port, unsigned int nlanes)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	unsigned int sip = port / 2;
+	unsigned int index;
+
+	switch (nlanes) {
+	case 1:
+		index = 5;
+		break;
+	case 2:
+		index = 6;
+		break;
+	case 4:
+		index = 1;
+		break;
+	default:
+		dev_err(dev, "lanes nr %u is unsupported\n", nlanes);
+		return -EINVAL;
+	}
+
+	dev_dbg(dev, "port config for port %u with %u lanes\n",	port, nlanes);
+
+	writel(csi2_port_cfg[index][2],
+	       isys->pdata->base + CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip));
+
+	return 0;
+}
+
+static void
+ipu6_isys_csi2_set_timing(struct ipu6_isys *isys,
+			  const struct ipu6_isys_csi2_timing *timing,
+			  unsigned int port, unsigned int nlanes)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *reg;
+	u32 port_base;
+	u32 i;
+
+	port_base = (port % 2) ? CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) :
+		CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port);
+
+	dev_dbg(dev, "set timing for port %u with %u lanes\n", port, nlanes);
+
+	reg = isys->pdata->base + port_base;
+	reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE;
+
+	writel(timing->ctermen, reg);
+
+	reg = isys->pdata->base + port_base;
+	reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE;
+	writel(timing->csettle, reg);
+
+	for (i = 0; i < nlanes; i++) {
+		reg = isys->pdata->base + port_base;
+		reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(i);
+		writel(timing->dtermen, reg);
+
+		reg = isys->pdata->base + port_base;
+		reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(i);
+		writel(timing->dsettle, reg);
+	}
+}
+
+#define DPHY_TIMER_INCR	0x28
+int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys,
+				struct ipu6_isys_csi2_config *cfg,
+				const struct ipu6_isys_csi2_timing *timing,
+				bool on)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	int ret = 0;
+	u32 nlanes;
+	u32 port;
+
+	if (!on)
+		return 0;
+
+	port = cfg->port;
+	nlanes = cfg->nlanes;
+
+	if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) {
+		dev_warn(dev, "invalid port ID %d\n", port);
+		return -EINVAL;
+	}
+
+	ipu6_isys_csi2_phy_config_by_port(isys, port, nlanes);
+
+	writel(DPHY_TIMER_INCR,
+	       isys->pdata->base + CSI2_HUB_GPREG_DPHY_TIMER_INCR);
+
+	/* set port cfg and rx timing */
+	ipu6_isys_csi2_set_timing(isys, timing, port, nlanes);
+
+	ret = ipu6_isys_csi2_set_port_cfg(isys, port, nlanes);
+	if (ret)
+		return ret;
+
+	ipu6_isys_csi2_rx_control(isys);
+
+	return 0;
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c b/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c
new file mode 100644
index 000000000000..9abf389a05f1
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/refcount.h>
+#include <linux/time64.h>
+
+#include <media/v4l2-async.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+
+#define CSI_REG_HUB_GPREG_PHY_CTL(id) (CSI_REG_BASE + 0x18008 + (id) * 0x8)
+#define CSI_REG_HUB_GPREG_PHY_CTL_RESET			BIT(4)
+#define CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN		BIT(0)
+#define CSI_REG_HUB_GPREG_PHY_STATUS(id) (CSI_REG_BASE + 0x1800c + (id) * 0x8)
+#define CSI_REG_HUB_GPREG_PHY_POWER_ACK			BIT(0)
+#define CSI_REG_HUB_GPREG_PHY_READY			BIT(4)
+
+#define MCD_PHY_POWER_STATUS_TIMEOUT			(200 * USEC_PER_MSEC)
+
+/*
+ * bridge to phy in buttress reg map, each phy has 16 kbytes
+ * only 2 phys for TGL U and Y
+ */
+#define IPU6_ISYS_MCD_PHY_BASE(i)			(0x10000 + (i) * 0x4000)
+
+/*
+ *  There are 2 MCD DPHY instances on TGL and 1 MCD DPHY instance on ADL.
+ *  Each MCD PHY has 12-lanes which has 8 data lanes and 4 clock lanes.
+ *  CSI port 1, 3 (5, 7) can support max 2 data lanes.
+ *  CSI port 0, 2 (4, 6) can support max 4 data lanes.
+ *  PHY configurations are PPI based instead of port.
+ *  Left:
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | PPI     | PPI5    | PPI4    | PPI3    | PPI2   | PPI1    | PPI0     |
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x4      | unused  | D3      | D2      | C0     | D0      | D1       |
+ *  |---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x2x2    | C1      | D0      | D1      | C0     | D0      | D1       |
+ *  ----------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x2x1    | C1      | D0      | unused  | C0     | D0      | D1       |
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x1x1    | C1      | D0      | unused  | C0     | D0      | unused   |
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x1x2    | C1      | D0      | D1      | C0     | D0      | unused   |
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *
+ *  Right:
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | PPI     | PPI6    | PPI7    | PPI8    | PPI9   | PPI10   | PPI11    |
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x4      | D1      | D0      | C2      | D2     | D3      | unused   |
+ *  |---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x2x2    | D1      | D0      | C2      | D1     | D0      | C3       |
+ *  ----------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x2x1    | D1      | D0      | C2      | unused | D0      | C3       |
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x1x1    | unused  | D0      | C2      | unused | D0      | C3       |
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *  |         |         |         |         |        |         |          |
+ *  | x1x2    | unused  | D0      | C2      | D1     | D0      | C3       |
+ *  +---------+---------+---------+---------+--------+---------+----------+
+ *
+ * ppi mapping per phy :
+ *
+ * x4 + x4:
+ * Left : port0 - PPI range {0, 1, 2, 3, 4}
+ * Right: port2 - PPI range {6, 7, 8, 9, 10}
+ *
+ * x4 + x2x2:
+ * Left: port0 - PPI range {0, 1, 2, 3, 4}
+ * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11}
+ *
+ * x2x2 + x4:
+ * Left: port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5}
+ * Right: port2 - PPI range {6, 7, 8, 9, 10}
+ *
+ * x2x2 + x2x2:
+ * Left : port0 - PPI range {0, 1, 2}, port1 - PPI range {3, 4, 5}
+ * Right: port2 - PPI range {6, 7, 8}, port3 - PPI range {9, 10, 11}
+ */
+
+struct phy_reg {
+	u32 reg;
+	u32 val;
+};
+
+static const struct phy_reg common_init_regs[] = {
+	/* for TGL-U, use 0x80000000 */
+	{0x00000040, 0x80000000},
+	{0x00000044, 0x00a80880},
+	{0x00000044, 0x00b80880},
+	{0x00000010, 0x0000078c},
+	{0x00000344, 0x2f4401e2},
+	{0x00000544, 0x924401e2},
+	{0x00000744, 0x594401e2},
+	{0x00000944, 0x624401e2},
+	{0x00000b44, 0xfc4401e2},
+	{0x00000d44, 0xc54401e2},
+	{0x00000f44, 0x034401e2},
+	{0x00001144, 0x8f4401e2},
+	{0x00001344, 0x754401e2},
+	{0x00001544, 0xe94401e2},
+	{0x00001744, 0xcb4401e2},
+	{0x00001944, 0xfa4401e2}
+};
+
+static const struct phy_reg x1_port0_config_regs[] = {
+	{0x00000694, 0xc80060fa},
+	{0x00000680, 0x3d4f78ea},
+	{0x00000690, 0x10a0140b},
+	{0x000006a8, 0xdf04010a},
+	{0x00000700, 0x57050060},
+	{0x00000710, 0x0030001c},
+	{0x00000738, 0x5f004444},
+	{0x0000073c, 0x78464204},
+	{0x00000748, 0x7821f940},
+	{0x0000074c, 0xb2000433},
+	{0x00000494, 0xfe6030fa},
+	{0x00000480, 0x29ef5ed0},
+	{0x00000490, 0x10a0540b},
+	{0x000004a8, 0x7a01010a},
+	{0x00000500, 0xef053460},
+	{0x00000510, 0xe030101c},
+	{0x00000538, 0xdf808444},
+	{0x0000053c, 0xc8422204},
+	{0x00000540, 0x0180088c},
+	{0x00000574, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x1_port1_config_regs[] = {
+	{0x00000c94, 0xc80060fa},
+	{0x00000c80, 0xcf47abea},
+	{0x00000c90, 0x10a0840b},
+	{0x00000ca8, 0xdf04010a},
+	{0x00000d00, 0x57050060},
+	{0x00000d10, 0x0030001c},
+	{0x00000d38, 0x5f004444},
+	{0x00000d3c, 0x78464204},
+	{0x00000d48, 0x7821f940},
+	{0x00000d4c, 0xb2000433},
+	{0x00000a94, 0xc91030fa},
+	{0x00000a80, 0x5a166ed0},
+	{0x00000a90, 0x10a0540b},
+	{0x00000aa8, 0x5d060100},
+	{0x00000b00, 0xef053460},
+	{0x00000b10, 0xa030101c},
+	{0x00000b38, 0xdf808444},
+	{0x00000b3c, 0xc8422204},
+	{0x00000b40, 0x0180088c},
+	{0x00000b74, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x1_port2_config_regs[] = {
+	{0x00001294, 0x28f000fa},
+	{0x00001280, 0x08130cea},
+	{0x00001290, 0x10a0140b},
+	{0x000012a8, 0xd704010a},
+	{0x00001300, 0x8d050060},
+	{0x00001310, 0x0030001c},
+	{0x00001338, 0xdf008444},
+	{0x0000133c, 0x78422204},
+	{0x00001348, 0x7821f940},
+	{0x0000134c, 0x5a000433},
+	{0x00001094, 0x2d20b0fa},
+	{0x00001080, 0xade75dd0},
+	{0x00001090, 0x10a0540b},
+	{0x000010a8, 0xb101010a},
+	{0x00001100, 0x33053460},
+	{0x00001110, 0x0030101c},
+	{0x00001138, 0xdf808444},
+	{0x0000113c, 0xc8422204},
+	{0x00001140, 0x8180088c},
+	{0x00001174, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x1_port3_config_regs[] = {
+	{0x00001894, 0xc80060fa},
+	{0x00001880, 0x0f90fd6a},
+	{0x00001890, 0x10a0840b},
+	{0x000018a8, 0xdf04010a},
+	{0x00001900, 0x57050060},
+	{0x00001910, 0x0030001c},
+	{0x00001938, 0x5f004444},
+	{0x0000193c, 0x78464204},
+	{0x00001948, 0x7821f940},
+	{0x0000194c, 0xb2000433},
+	{0x00001694, 0x3050d0fa},
+	{0x00001680, 0x0ef6d050},
+	{0x00001690, 0x10a0540b},
+	{0x000016a8, 0xe301010a},
+	{0x00001700, 0x69053460},
+	{0x00001710, 0xa030101c},
+	{0x00001738, 0xdf808444},
+	{0x0000173c, 0xc8422204},
+	{0x00001740, 0x0180088c},
+	{0x00001774, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port0_config_regs[] = {
+	{0x00000694, 0xc80060fa},
+	{0x00000680, 0x3d4f78ea},
+	{0x00000690, 0x10a0140b},
+	{0x000006a8, 0xdf04010a},
+	{0x00000700, 0x57050060},
+	{0x00000710, 0x0030001c},
+	{0x00000738, 0x5f004444},
+	{0x0000073c, 0x78464204},
+	{0x00000748, 0x7821f940},
+	{0x0000074c, 0xb2000433},
+	{0x00000494, 0xc80060fa},
+	{0x00000480, 0x29ef5ed8},
+	{0x00000490, 0x10a0540b},
+	{0x000004a8, 0x7a01010a},
+	{0x00000500, 0xef053460},
+	{0x00000510, 0xe030101c},
+	{0x00000538, 0xdf808444},
+	{0x0000053c, 0xc8422204},
+	{0x00000540, 0x0180088c},
+	{0x00000574, 0x00000000},
+	{0x00000294, 0xc80060fa},
+	{0x00000280, 0xcb45b950},
+	{0x00000290, 0x10a0540b},
+	{0x000002a8, 0x8c01010a},
+	{0x00000300, 0xef053460},
+	{0x00000310, 0x8030101c},
+	{0x00000338, 0x41808444},
+	{0x0000033c, 0x32422204},
+	{0x00000340, 0x0180088c},
+	{0x00000374, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port1_config_regs[] = {
+	{0x00000c94, 0xc80060fa},
+	{0x00000c80, 0xcf47abea},
+	{0x00000c90, 0x10a0840b},
+	{0x00000ca8, 0xdf04010a},
+	{0x00000d00, 0x57050060},
+	{0x00000d10, 0x0030001c},
+	{0x00000d38, 0x5f004444},
+	{0x00000d3c, 0x78464204},
+	{0x00000d48, 0x7821f940},
+	{0x00000d4c, 0xb2000433},
+	{0x00000a94, 0xc80060fa},
+	{0x00000a80, 0x5a166ed8},
+	{0x00000a90, 0x10a0540b},
+	{0x00000aa8, 0x7a01010a},
+	{0x00000b00, 0xef053460},
+	{0x00000b10, 0xa030101c},
+	{0x00000b38, 0xdf808444},
+	{0x00000b3c, 0xc8422204},
+	{0x00000b40, 0x0180088c},
+	{0x00000b74, 0x00000000},
+	{0x00000894, 0xc80060fa},
+	{0x00000880, 0x4d4f21d0},
+	{0x00000890, 0x10a0540b},
+	{0x000008a8, 0x5601010a},
+	{0x00000900, 0xef053460},
+	{0x00000910, 0x8030101c},
+	{0x00000938, 0xdf808444},
+	{0x0000093c, 0xc8422204},
+	{0x00000940, 0x0180088c},
+	{0x00000974, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port2_config_regs[] = {
+	{0x00001294, 0xc80060fa},
+	{0x00001280, 0x08130cea},
+	{0x00001290, 0x10a0140b},
+	{0x000012a8, 0xd704010a},
+	{0x00001300, 0x8d050060},
+	{0x00001310, 0x0030001c},
+	{0x00001338, 0xdf008444},
+	{0x0000133c, 0x78422204},
+	{0x00001348, 0x7821f940},
+	{0x0000134c, 0x5a000433},
+	{0x00001094, 0xc80060fa},
+	{0x00001080, 0xade75dd8},
+	{0x00001090, 0x10a0540b},
+	{0x000010a8, 0xb101010a},
+	{0x00001100, 0x33053460},
+	{0x00001110, 0x0030101c},
+	{0x00001138, 0xdf808444},
+	{0x0000113c, 0xc8422204},
+	{0x00001140, 0x8180088c},
+	{0x00001174, 0x00000000},
+	{0x00000e94, 0xc80060fa},
+	{0x00000e80, 0x0fbf16d0},
+	{0x00000e90, 0x10a0540b},
+	{0x00000ea8, 0x7a01010a},
+	{0x00000f00, 0xf5053460},
+	{0x00000f10, 0xc030101c},
+	{0x00000f38, 0xdf808444},
+	{0x00000f3c, 0xc8422204},
+	{0x00000f40, 0x8180088c},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port3_config_regs[] = {
+	{0x00001894, 0xc80060fa},
+	{0x00001880, 0x0f90fd6a},
+	{0x00001890, 0x10a0840b},
+	{0x000018a8, 0xdf04010a},
+	{0x00001900, 0x57050060},
+	{0x00001910, 0x0030001c},
+	{0x00001938, 0x5f004444},
+	{0x0000193c, 0x78464204},
+	{0x00001948, 0x7821f940},
+	{0x0000194c, 0xb2000433},
+	{0x00001694, 0xc80060fa},
+	{0x00001680, 0x0ef6d058},
+	{0x00001690, 0x10a0540b},
+	{0x000016a8, 0x7a01010a},
+	{0x00001700, 0x69053460},
+	{0x00001710, 0xa030101c},
+	{0x00001738, 0xdf808444},
+	{0x0000173c, 0xc8422204},
+	{0x00001740, 0x0180088c},
+	{0x00001774, 0x00000000},
+	{0x00001494, 0xc80060fa},
+	{0x00001480, 0xf9d34bd0},
+	{0x00001490, 0x10a0540b},
+	{0x000014a8, 0x7a01010a},
+	{0x00001500, 0x1b053460},
+	{0x00001510, 0x0030101c},
+	{0x00001538, 0xdf808444},
+	{0x0000153c, 0xc8422204},
+	{0x00001540, 0x8180088c},
+	{0x00001574, 0x00000000},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port0_config_regs[] = {
+	{0x00000694, 0xc80060fa},
+	{0x00000680, 0x3d4f78fa},
+	{0x00000690, 0x10a0140b},
+	{0x000006a8, 0xdf04010a},
+	{0x00000700, 0x57050060},
+	{0x00000710, 0x0030001c},
+	{0x00000738, 0x5f004444},
+	{0x0000073c, 0x78464204},
+	{0x00000748, 0x7821f940},
+	{0x0000074c, 0xb2000433},
+	{0x00000494, 0xfe6030fa},
+	{0x00000480, 0x29ef5ed8},
+	{0x00000490, 0x10a0540b},
+	{0x000004a8, 0x7a01010a},
+	{0x00000500, 0xef053460},
+	{0x00000510, 0xe030101c},
+	{0x00000538, 0xdf808444},
+	{0x0000053c, 0xc8422204},
+	{0x00000540, 0x0180088c},
+	{0x00000574, 0x00000004},
+	{0x00000294, 0x23e030fa},
+	{0x00000280, 0xcb45b950},
+	{0x00000290, 0x10a0540b},
+	{0x000002a8, 0x8c01010a},
+	{0x00000300, 0xef053460},
+	{0x00000310, 0x8030101c},
+	{0x00000338, 0x41808444},
+	{0x0000033c, 0x32422204},
+	{0x00000340, 0x0180088c},
+	{0x00000374, 0x00000004},
+	{0x00000894, 0x5620b0fa},
+	{0x00000880, 0x4d4f21dc},
+	{0x00000890, 0x10a0540b},
+	{0x000008a8, 0x5601010a},
+	{0x00000900, 0xef053460},
+	{0x00000910, 0x8030101c},
+	{0x00000938, 0xdf808444},
+	{0x0000093c, 0xc8422204},
+	{0x00000940, 0x0180088c},
+	{0x00000974, 0x00000004},
+	{0x00000a94, 0xc91030fa},
+	{0x00000a80, 0x5a166ecc},
+	{0x00000a90, 0x10a0540b},
+	{0x00000aa8, 0x5d01010a},
+	{0x00000b00, 0xef053460},
+	{0x00000b10, 0xa030101c},
+	{0x00000b38, 0xdf808444},
+	{0x00000b3c, 0xc8422204},
+	{0x00000b40, 0x0180088c},
+	{0x00000b74, 0x00000004},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port1_config_regs[] = {
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port2_config_regs[] = {
+	{0x00001294, 0x28f000fa},
+	{0x00001280, 0x08130cfa},
+	{0x00001290, 0x10c0140b},
+	{0x000012a8, 0xd704010a},
+	{0x00001300, 0x8d050060},
+	{0x00001310, 0x0030001c},
+	{0x00001338, 0xdf008444},
+	{0x0000133c, 0x78422204},
+	{0x00001348, 0x7821f940},
+	{0x0000134c, 0x5a000433},
+	{0x00001094, 0x2d20b0fa},
+	{0x00001080, 0xade75dd8},
+	{0x00001090, 0x10a0540b},
+	{0x000010a8, 0xb101010a},
+	{0x00001100, 0x33053460},
+	{0x00001110, 0x0030101c},
+	{0x00001138, 0xdf808444},
+	{0x0000113c, 0xc8422204},
+	{0x00001140, 0x8180088c},
+	{0x00001174, 0x00000004},
+	{0x00000e94, 0xd308d0fa},
+	{0x00000e80, 0x0fbf16d0},
+	{0x00000e90, 0x10a0540b},
+	{0x00000ea8, 0x2c01010a},
+	{0x00000f00, 0xf5053460},
+	{0x00000f10, 0xc030101c},
+	{0x00000f38, 0xdf808444},
+	{0x00000f3c, 0xc8422204},
+	{0x00000f40, 0x8180088c},
+	{0x00000f74, 0x00000004},
+	{0x00001494, 0x136850fa},
+	{0x00001480, 0xf9d34bdc},
+	{0x00001490, 0x10a0540b},
+	{0x000014a8, 0x5a01010a},
+	{0x00001500, 0x1b053460},
+	{0x00001510, 0x0030101c},
+	{0x00001538, 0xdf808444},
+	{0x0000153c, 0xc8422204},
+	{0x00001540, 0x8180088c},
+	{0x00001574, 0x00000004},
+	{0x00001694, 0x3050d0fa},
+	{0x00001680, 0x0ef6d04c},
+	{0x00001690, 0x10a0540b},
+	{0x000016a8, 0xe301010a},
+	{0x00001700, 0x69053460},
+	{0x00001710, 0xa030101c},
+	{0x00001738, 0xdf808444},
+	{0x0000173c, 0xc8422204},
+	{0x00001740, 0x0180088c},
+	{0x00001774, 0x00000004},
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port3_config_regs[] = {
+	{0x00000000, 0x00000000}
+};
+
+static const struct phy_reg *x1_config_regs[4] = {
+	x1_port0_config_regs,
+	x1_port1_config_regs,
+	x1_port2_config_regs,
+	x1_port3_config_regs
+};
+
+static const struct phy_reg *x2_config_regs[4] = {
+	x2_port0_config_regs,
+	x2_port1_config_regs,
+	x2_port2_config_regs,
+	x2_port3_config_regs
+};
+
+static const struct phy_reg *x4_config_regs[4] = {
+	x4_port0_config_regs,
+	x4_port1_config_regs,
+	x4_port2_config_regs,
+	x4_port3_config_regs
+};
+
+static const struct phy_reg **config_regs[3] = {
+	x1_config_regs,
+	x2_config_regs,
+	x4_config_regs
+};
+
+static int ipu6_isys_mcd_phy_powerup_ack(struct ipu6_isys *isys, u8 id)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	u32 val;
+	int ret;
+
+	val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+	val |= CSI_REG_HUB_GPREG_PHY_CTL_PWR_EN;
+	writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+
+	ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id),
+				 val, val & CSI_REG_HUB_GPREG_PHY_POWER_ACK,
+				 200, MCD_PHY_POWER_STATUS_TIMEOUT);
+	if (ret)
+		dev_err(dev, "PHY%d powerup ack timeout", id);
+
+	return ret;
+}
+
+static int ipu6_isys_mcd_phy_powerdown_ack(struct ipu6_isys *isys, u8 id)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	u32 val;
+	int ret;
+
+	writel(0, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+	ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id),
+				 val, !(val & CSI_REG_HUB_GPREG_PHY_POWER_ACK),
+				 200, MCD_PHY_POWER_STATUS_TIMEOUT);
+	if (ret)
+		dev_err(dev, "PHY%d powerdown ack timeout", id);
+
+	return ret;
+}
+
+static void ipu6_isys_mcd_phy_reset(struct ipu6_isys *isys, u8 id, bool assert)
+{
+	void __iomem *isys_base = isys->pdata->base;
+	u32 val;
+
+	val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+	if (assert)
+		val |= CSI_REG_HUB_GPREG_PHY_CTL_RESET;
+	else
+		val &= ~(CSI_REG_HUB_GPREG_PHY_CTL_RESET);
+
+	writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CTL(id));
+}
+
+static int ipu6_isys_mcd_phy_ready(struct ipu6_isys *isys, u8 id)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	u32 val;
+	int ret;
+
+	ret = readl_poll_timeout(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(id),
+				 val, val & CSI_REG_HUB_GPREG_PHY_READY,
+				 200, MCD_PHY_POWER_STATUS_TIMEOUT);
+	if (ret)
+		dev_err(dev, "PHY%d ready ack timeout", id);
+
+	return ret;
+}
+
+static void ipu6_isys_mcd_phy_common_init(struct ipu6_isys *isys)
+{
+	struct ipu6_bus_device *adev = isys->adev;
+	struct ipu6_device *isp = adev->isp;
+	void __iomem *isp_base = isp->base;
+	struct sensor_async_sd *s_asd;
+	struct v4l2_async_connection *asc;
+	void __iomem *phy_base;
+	unsigned int i;
+	u8 phy_id;
+
+	list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) {
+		s_asd = container_of(asc, struct sensor_async_sd, asc);
+		phy_id = s_asd->csi2.port / 4;
+		phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id);
+
+		for (i = 0; i < ARRAY_SIZE(common_init_regs); i++)
+			writel(common_init_regs[i].val,
+			       phy_base + common_init_regs[i].reg);
+	}
+}
+
+static int ipu6_isys_driver_port_to_phy_port(struct ipu6_isys_csi2_config *cfg)
+{
+	int phy_port;
+	int ret;
+
+	if (!(cfg->nlanes == 4 || cfg->nlanes == 2 || cfg->nlanes == 1))
+		return -EINVAL;
+
+	/* B,F -> C0 A,E -> C1 C,G -> C2 D,H -> C4 */
+	/* normalize driver port number */
+	phy_port = cfg->port % 4;
+
+	/* swap port number only for A and B */
+	if (phy_port == 0)
+		phy_port = 1;
+	else if (phy_port == 1)
+		phy_port = 0;
+
+	ret = phy_port;
+
+	/* check validity per lane configuration */
+	if (cfg->nlanes == 4 && !(phy_port == 0 || phy_port == 2))
+		ret = -EINVAL;
+	else if ((cfg->nlanes == 2 || cfg->nlanes == 1) &&
+		 !(phy_port >= 0 && phy_port <= 3))
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static int ipu6_isys_mcd_phy_config(struct ipu6_isys *isys)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct ipu6_bus_device *adev = isys->adev;
+	const struct phy_reg **phy_config_regs;
+	struct ipu6_device *isp = adev->isp;
+	void __iomem *isp_base = isp->base;
+	struct sensor_async_sd *s_asd;
+	struct ipu6_isys_csi2_config cfg;
+	struct v4l2_async_connection *asc;
+	u8 phy_port, phy_id;
+	unsigned int i;
+	void __iomem *phy_base;
+
+	list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) {
+		s_asd = container_of(asc, struct sensor_async_sd, asc);
+		cfg.port = s_asd->csi2.port;
+		cfg.nlanes = s_asd->csi2.nlanes;
+		phy_port = ipu6_isys_driver_port_to_phy_port(&cfg);
+		if (phy_port < 0) {
+			dev_err(dev, "invalid port %d for lane %d", cfg.port,
+				cfg.nlanes);
+			return -ENXIO;
+		}
+
+		phy_id = cfg.port / 4;
+		phy_base = isp_base + IPU6_ISYS_MCD_PHY_BASE(phy_id);
+		dev_dbg(dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id,
+			cfg.nlanes);
+
+		phy_config_regs = config_regs[cfg.nlanes / 2];
+		cfg.port = phy_port;
+		for (i = 0; phy_config_regs[cfg.port][i].reg; i++)
+			writel(phy_config_regs[cfg.port][i].val,
+			       phy_base + phy_config_regs[cfg.port][i].reg);
+	}
+
+	return 0;
+}
+
+#define CSI_MCD_PHY_NUM		2
+static refcount_t phy_power_ref_count[CSI_MCD_PHY_NUM];
+
+int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys,
+				struct ipu6_isys_csi2_config *cfg,
+				const struct ipu6_isys_csi2_timing *timing,
+				bool on)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	void __iomem *isys_base = isys->pdata->base;
+	u8 port, phy_id;
+	refcount_t *ref;
+	int ret;
+
+	port = cfg->port;
+	phy_id = port / 4;
+
+	ref = &phy_power_ref_count[phy_id];
+
+	dev_dbg(dev, "for phy %d port %d, lanes: %d\n", phy_id, port,
+		cfg->nlanes);
+
+	if (!isys_base || port >= isys->pdata->ipdata->csi2.nports) {
+		dev_warn(dev, "invalid port ID %d\n", port);
+		return -EINVAL;
+	}
+
+	if (on) {
+		if (refcount_read(ref)) {
+			dev_dbg(dev, "for phy %d is already UP", phy_id);
+			refcount_inc(ref);
+			return 0;
+		}
+
+		ret = ipu6_isys_mcd_phy_powerup_ack(isys, phy_id);
+		if (ret)
+			return ret;
+
+		ipu6_isys_mcd_phy_reset(isys, phy_id, 0);
+		ipu6_isys_mcd_phy_common_init(isys);
+
+		ret = ipu6_isys_mcd_phy_config(isys);
+		if (ret)
+			return ret;
+
+		ipu6_isys_mcd_phy_reset(isys, phy_id, 1);
+		ret = ipu6_isys_mcd_phy_ready(isys, phy_id);
+		if (ret)
+			return ret;
+
+		refcount_set(ref, 1);
+		return 0;
+	}
+
+	if (!refcount_dec_and_test(ref))
+		return 0;
+
+	return ipu6_isys_mcd_phy_powerdown_ack(isys, phy_id);
+}
-- 
2.43.0

From a3b3658e56d3fe9e0cb03166e38a023dba853594 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:24 +0800
Subject: [PATCH 10/31] media: intel/ipu6: add input system driver

Input system driver do basic isys hardware setup and irq handling
and work with fwnode and v4l2 to register the ISYS v4l2 devices.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-11-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/pci/intel/ipu6/ipu6-isys.c | 1353 ++++++++++++++++++++++
 drivers/media/pci/intel/ipu6/ipu6-isys.h |  207 ++++
 2 files changed, 1560 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
new file mode 100644
index 000000000000..e8983363a0da
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -0,0 +1,1353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <media/ipu-bridge.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-cpd.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-mmu.h"
+#include "ipu6-platform-buttress-regs.h"
+#include "ipu6-platform-isys-csi2-reg.h"
+#include "ipu6-platform-regs.h"
+
+#define IPU6_BUTTRESS_FABIC_CONTROL		0x68
+#define GDA_ENABLE_IWAKE_INDEX			2
+#define GDA_IWAKE_THRESHOLD_INDEX		1
+#define GDA_IRQ_CRITICAL_THRESHOLD_INDEX	0
+#define GDA_MEMOPEN_THRESHOLD_INDEX		3
+#define DEFAULT_DID_RATIO			90
+#define DEFAULT_IWAKE_THRESHOLD			0x42
+#define DEFAULT_MEM_OPEN_TIME			10
+#define ONE_THOUSAND_MICROSECOND		1000
+/* One page is 2KB, 8 x 16 x 16 = 2048B = 2KB */
+#define ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE	0x800
+
+/* LTR & DID value are 10 bit at most */
+#define LTR_DID_VAL_MAX				1023
+#define LTR_DEFAULT_VALUE			0x70503c19
+#define FILL_TIME_DEFAULT_VALUE			0xfff0783c
+#define LTR_DID_PKGC_2R				20
+#define LTR_SCALE_DEFAULT			5
+#define LTR_SCALE_1024NS			2
+#define DID_SCALE_1US				2
+#define DID_SCALE_32US				3
+#define REG_PKGC_PMON_CFG			0xb00
+
+#define VAL_PKGC_PMON_CFG_RESET			0x38
+#define VAL_PKGC_PMON_CFG_START			0x7
+
+#define IS_PIXEL_BUFFER_PAGES			0x80
+/*
+ * when iwake mode is disabled, the critical threshold is statically set
+ * to 75% of the IS pixel buffer, criticalThreshold = (128 * 3) / 4
+ */
+#define CRITICAL_THRESHOLD_IWAKE_DISABLE	(IS_PIXEL_BUFFER_PAGES * 3 / 4)
+
+union fabric_ctrl {
+	struct {
+		u16 ltr_val   : 10;
+		u16 ltr_scale : 3;
+		u16 reserved  : 3;
+		u16 did_val   : 10;
+		u16 did_scale : 3;
+		u16 reserved2 : 1;
+		u16 keep_power_in_D0   : 1;
+		u16 keep_power_override : 1;
+	} bits;
+	u32 value;
+};
+
+enum ltr_did_type {
+	LTR_IWAKE_ON,
+	LTR_IWAKE_OFF,
+	LTR_ISYS_ON,
+	LTR_ISYS_OFF,
+	LTR_ENHANNCE_IWAKE,
+	LTR_TYPE_MAX
+};
+
+#define ISYS_PM_QOS_VALUE	300
+
+static int isys_isr_one(struct ipu6_bus_device *adev);
+
+static int
+isys_complete_ext_device_registration(struct ipu6_isys *isys,
+				      struct v4l2_subdev *sd,
+				      struct ipu6_isys_csi2_config *csi2)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < sd->entity.num_pads; i++) {
+		if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+			break;
+	}
+
+	if (i == sd->entity.num_pads) {
+		dev_warn(dev, "no src pad in external entity\n");
+		ret = -ENOENT;
+		goto unregister_subdev;
+	}
+
+	ret = media_create_pad_link(&sd->entity, i,
+				    &isys->csi2[csi2->port].asd.sd.entity,
+				    0, 0);
+	if (ret) {
+		dev_warn(dev, "can't create link\n");
+		goto unregister_subdev;
+	}
+
+	isys->csi2[csi2->port].nlanes = csi2->nlanes;
+
+	return 0;
+
+unregister_subdev:
+	v4l2_device_unregister_subdev(sd);
+
+	return ret;
+}
+
+static void isys_stream_init(struct ipu6_isys *isys)
+{
+	u32 i;
+
+	for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+		mutex_init(&isys->streams[i].mutex);
+		init_completion(&isys->streams[i].stream_open_completion);
+		init_completion(&isys->streams[i].stream_close_completion);
+		init_completion(&isys->streams[i].stream_start_completion);
+		init_completion(&isys->streams[i].stream_stop_completion);
+		INIT_LIST_HEAD(&isys->streams[i].queues);
+		isys->streams[i].isys = isys;
+		isys->streams[i].stream_handle = i;
+		isys->streams[i].vc = INVALID_VC_ID;
+	}
+}
+
+static void isys_csi2_unregister_subdevices(struct ipu6_isys *isys)
+{
+	const struct ipu6_isys_internal_csi2_pdata *csi2 =
+		&isys->pdata->ipdata->csi2;
+	unsigned int i;
+
+	for (i = 0; i < csi2->nports; i++)
+		ipu6_isys_csi2_cleanup(&isys->csi2[i]);
+}
+
+static int isys_csi2_register_subdevices(struct ipu6_isys *isys)
+{
+	const struct ipu6_isys_internal_csi2_pdata *csi2_pdata =
+		&isys->pdata->ipdata->csi2;
+	struct device *dev = &isys->adev->auxdev.dev;
+	unsigned int i;
+	int ret;
+
+	isys->csi2 = devm_kcalloc(dev, csi2_pdata->nports,
+				  sizeof(*isys->csi2), GFP_KERNEL);
+	if (!isys->csi2)
+		return -ENOMEM;
+
+	for (i = 0; i < csi2_pdata->nports; i++) {
+		ret = ipu6_isys_csi2_init(&isys->csi2[i], isys,
+					  isys->pdata->base +
+					  csi2_pdata->offsets[i], i);
+		if (ret)
+			goto fail;
+
+		isys->isr_csi2_bits |= IPU6_ISYS_UNISPART_IRQ_CSI2(i);
+	}
+
+	return 0;
+
+fail:
+	while (i--)
+		ipu6_isys_csi2_cleanup(&isys->csi2[i]);
+
+	return ret;
+}
+
+static int isys_csi2_create_media_links(struct ipu6_isys *isys)
+{
+	const struct ipu6_isys_internal_csi2_pdata *csi2_pdata =
+		&isys->pdata->ipdata->csi2;
+	struct device *dev = &isys->adev->auxdev.dev;
+	unsigned int i, j, k;
+	int ret;
+
+	for (i = 0; i < csi2_pdata->nports; i++) {
+		struct media_entity *sd = &isys->csi2[i].asd.sd.entity;
+
+		for (j = 0; j < NR_OF_VIDEO_DEVICE; j++) {
+			struct media_entity *v = &isys->av[j].vdev.entity;
+			u32 flag = MEDIA_LNK_FL_DYNAMIC;
+
+			for (k = CSI2_PAD_SRC; k < NR_OF_CSI2_PADS; k++) {
+				ret = media_create_pad_link(sd, k, v, 0, flag);
+				if (ret) {
+					dev_err(dev, "CSI2 can't create link\n");
+					return ret;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void isys_unregister_video_devices(struct ipu6_isys *isys)
+{
+	unsigned int i;
+
+	for (i = 0; i < NR_OF_VIDEO_DEVICE; i++)
+		ipu6_isys_video_cleanup(&isys->av[i]);
+}
+
+static int isys_register_video_devices(struct ipu6_isys *isys)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < NR_OF_VIDEO_DEVICE; i++) {
+		snprintf(isys->av[i].vdev.name, sizeof(isys->av[i].vdev.name),
+			 IPU6_ISYS_ENTITY_PREFIX " ISYS Capture %u", i);
+		isys->av[i].isys = isys;
+		isys->av[i].aq.vbq.buf_struct_size =
+			sizeof(struct ipu6_isys_video_buffer);
+		isys->av[i].pfmt = &ipu6_isys_pfmts[0];
+
+		ret = ipu6_isys_video_init(&isys->av[i]);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	while (i--)
+		ipu6_isys_video_cleanup(&isys->av[i]);
+
+	return ret;
+}
+
+void isys_setup_hw(struct ipu6_isys *isys)
+{
+	void __iomem *base = isys->pdata->base;
+	const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold;
+	u32 irqs = 0;
+	unsigned int i, nports;
+
+	nports = isys->pdata->ipdata->csi2.nports;
+
+	/* Enable irqs for all MIPI ports */
+	for (i = 0; i < nports; i++)
+		irqs |= IPU6_ISYS_UNISPART_IRQ_CSI2(i);
+
+	writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_edge);
+	writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_lnp);
+	writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_mask);
+	writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_enable);
+	writel(GENMASK(19, 0),
+	       base + isys->pdata->ipdata->csi2.ctrl0_irq_clear);
+
+	irqs = ISYS_UNISPART_IRQS;
+	writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_EDGE);
+	writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE);
+	writel(GENMASK(28, 0), base + IPU6_REG_ISYS_UNISPART_IRQ_CLEAR);
+	writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK);
+	writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_ENABLE);
+
+	writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG);
+	writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG);
+
+	/* Write CDC FIFO threshold values for isys */
+	for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++)
+		writel(thd[i], base + IPU6_REG_ISYS_CDC_THRESHOLD(i));
+}
+
+static void ipu6_isys_csi2_isr(struct ipu6_isys_csi2 *csi2)
+{
+	struct ipu6_isys_stream *stream;
+	unsigned int i;
+	u32 status;
+	int source;
+
+	ipu6_isys_register_errors(csi2);
+
+	status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+		       CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+
+	writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+	       CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+
+	source = csi2->asd.source;
+	for (i = 0; i < NR_OF_CSI2_VC; i++) {
+		if (status & IPU_CSI_RX_IRQ_FS_VC(i)) {
+			stream = ipu6_isys_query_stream_by_source(csi2->isys,
+								  source, i);
+			if (stream) {
+				ipu6_isys_csi2_sof_event_by_stream(stream);
+				ipu6_isys_put_stream(stream);
+			}
+		}
+
+		if (status & IPU_CSI_RX_IRQ_FE_VC(i)) {
+			stream = ipu6_isys_query_stream_by_source(csi2->isys,
+								  source, i);
+			if (stream) {
+				ipu6_isys_csi2_eof_event_by_stream(stream);
+				ipu6_isys_put_stream(stream);
+			}
+		}
+	}
+}
+
+irqreturn_t isys_isr(struct ipu6_bus_device *adev)
+{
+	struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+	void __iomem *base = isys->pdata->base;
+	u32 status_sw, status_csi;
+	u32 ctrl0_status, ctrl0_clear;
+
+	spin_lock(&isys->power_lock);
+	if (!isys->power) {
+		spin_unlock(&isys->power_lock);
+		return IRQ_NONE;
+	}
+
+	ctrl0_status = isys->pdata->ipdata->csi2.ctrl0_irq_status;
+	ctrl0_clear = isys->pdata->ipdata->csi2.ctrl0_irq_clear;
+
+	status_csi = readl(isys->pdata->base + ctrl0_status);
+	status_sw = readl(isys->pdata->base +
+			  IPU6_REG_ISYS_UNISPART_IRQ_STATUS);
+
+	writel(ISYS_UNISPART_IRQS & ~IPU6_ISYS_UNISPART_IRQ_SW,
+	       base + IPU6_REG_ISYS_UNISPART_IRQ_MASK);
+
+	do {
+		writel(status_csi, isys->pdata->base + ctrl0_clear);
+
+		writel(status_sw, isys->pdata->base +
+		       IPU6_REG_ISYS_UNISPART_IRQ_CLEAR);
+
+		if (isys->isr_csi2_bits & status_csi) {
+			unsigned int i;
+
+			for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) {
+				/* irq from not enabled port */
+				if (!isys->csi2[i].base)
+					continue;
+				if (status_csi & IPU6_ISYS_UNISPART_IRQ_CSI2(i))
+					ipu6_isys_csi2_isr(&isys->csi2[i]);
+			}
+		}
+
+		writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG);
+
+		if (!isys_isr_one(adev))
+			status_sw = IPU6_ISYS_UNISPART_IRQ_SW;
+		else
+			status_sw = 0;
+
+		status_csi = readl(isys->pdata->base + ctrl0_status);
+		status_sw |= readl(isys->pdata->base +
+				   IPU6_REG_ISYS_UNISPART_IRQ_STATUS);
+	} while ((status_csi & isys->isr_csi2_bits) ||
+		 (status_sw & IPU6_ISYS_UNISPART_IRQ_SW));
+
+	writel(ISYS_UNISPART_IRQS, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK);
+
+	spin_unlock(&isys->power_lock);
+
+	return IRQ_HANDLED;
+}
+
+static void get_lut_ltrdid(struct ipu6_isys *isys, struct ltr_did *pltr_did)
+{
+	struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+	struct ltr_did ltrdid_default;
+
+	ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE;
+	ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE;
+
+	if (iwake_watermark->ltrdid.lut_ltr.value)
+		*pltr_did = iwake_watermark->ltrdid;
+	else
+		*pltr_did = ltrdid_default;
+}
+
+static int set_iwake_register(struct ipu6_isys *isys, u32 index, u32 value)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	u32 req_id = index;
+	u32 offset = 0;
+	int ret;
+
+	ret = ipu6_fw_isys_send_proxy_token(isys, req_id, index, offset, value);
+	if (ret)
+		dev_err(dev, "write %d failed %d", index, ret);
+
+	return ret;
+}
+
+/*
+ * When input system is powered up and before enabling any new sensor capture,
+ * or after disabling any sensor capture the following values need to be set:
+ * LTR_value = LTR(usec) from calculation;
+ * LTR_scale = 2;
+ * DID_value = DID(usec) from calculation;
+ * DID_scale = 2;
+ *
+ * When input system is powered down, the LTR and DID values
+ * must be returned to the default values:
+ * LTR_value = 1023;
+ * LTR_scale = 5;
+ * DID_value = 1023;
+ * DID_scale = 2;
+ */
+static void set_iwake_ltrdid(struct ipu6_isys *isys, u16 ltr, u16 did,
+			     enum ltr_did_type use)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	u16 ltr_val, ltr_scale = LTR_SCALE_1024NS;
+	u16 did_val, did_scale = DID_SCALE_1US;
+	struct ipu6_device *isp = isys->adev->isp;
+	union fabric_ctrl fc;
+
+	switch (use) {
+	case LTR_IWAKE_ON:
+		ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX);
+		did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX);
+		ltr_scale = (ltr == LTR_DID_VAL_MAX &&
+			     did == LTR_DID_VAL_MAX) ?
+			LTR_SCALE_DEFAULT : LTR_SCALE_1024NS;
+		break;
+	case LTR_ISYS_ON:
+	case LTR_IWAKE_OFF:
+		ltr_val = LTR_DID_PKGC_2R;
+		did_val = LTR_DID_PKGC_2R;
+		break;
+	case LTR_ISYS_OFF:
+		ltr_val   = LTR_DID_VAL_MAX;
+		did_val   = LTR_DID_VAL_MAX;
+		ltr_scale = LTR_SCALE_DEFAULT;
+		break;
+	case LTR_ENHANNCE_IWAKE:
+		if (ltr == LTR_DID_VAL_MAX && did == LTR_DID_VAL_MAX) {
+			ltr_val = LTR_DID_VAL_MAX;
+			did_val = LTR_DID_VAL_MAX;
+			ltr_scale = LTR_SCALE_DEFAULT;
+		} else if (did < ONE_THOUSAND_MICROSECOND) {
+			ltr_val = ltr;
+			did_val = did;
+		} else {
+			ltr_val = ltr;
+			/* div 90% value by 32 to account for scale change */
+			did_val = did / 32;
+			did_scale = DID_SCALE_32US;
+		}
+		break;
+	default:
+		ltr_val   = LTR_DID_VAL_MAX;
+		did_val   = LTR_DID_VAL_MAX;
+		ltr_scale = LTR_SCALE_DEFAULT;
+		break;
+	}
+
+	fc.value = readl(isp->base + IPU6_BUTTRESS_FABIC_CONTROL);
+	fc.bits.ltr_val = ltr_val;
+	fc.bits.ltr_scale = ltr_scale;
+	fc.bits.did_val = did_val;
+	fc.bits.did_scale = did_scale;
+
+	dev_dbg(dev, "ltr: value %u scale %u, did: value %u scale %u\n",
+		ltr_val, ltr_scale, did_val, did_scale);
+	writel(fc.value, isp->base + IPU6_BUTTRESS_FABIC_CONTROL);
+}
+
+/*
+ * Driver may clear register GDA_ENABLE_IWAKE before FW configures the
+ * stream for debug purpose. Otherwise driver should not access this register.
+ */
+static void enable_iwake(struct ipu6_isys *isys, bool enable)
+{
+	struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+	int ret;
+
+	mutex_lock(&iwake_watermark->mutex);
+
+	if (iwake_watermark->iwake_enabled == enable) {
+		mutex_unlock(&iwake_watermark->mutex);
+		return;
+	}
+
+	ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable);
+	if (!ret)
+		iwake_watermark->iwake_enabled = enable;
+
+	mutex_unlock(&iwake_watermark->mutex);
+}
+
+void update_watermark_setting(struct ipu6_isys *isys)
+{
+	struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+	u32 iwake_threshold, iwake_critical_threshold, page_num;
+	struct device *dev = &isys->adev->auxdev.dev;
+	u32 calc_fill_time_us = 0, ltr = 0, did = 0;
+	struct video_stream_watermark *p_watermark;
+	enum ltr_did_type ltr_did_type;
+	struct list_head *stream_node;
+	u64 isys_pb_datarate_mbs = 0;
+	u32 mem_open_threshold = 0;
+	struct ltr_did ltrdid;
+	u64 threshold_bytes;
+	u32 max_sram_size;
+	u32 shift;
+
+	shift = isys->pdata->ipdata->sram_gran_shift;
+	max_sram_size = isys->pdata->ipdata->max_sram_size;
+
+	mutex_lock(&iwake_watermark->mutex);
+	if (iwake_watermark->force_iwake_disable) {
+		set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF);
+		set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+				   CRITICAL_THRESHOLD_IWAKE_DISABLE);
+		goto unlock_exit;
+	}
+
+	if (list_empty(&iwake_watermark->video_list)) {
+		isys_pb_datarate_mbs = 0;
+	} else {
+		list_for_each(stream_node, &iwake_watermark->video_list) {
+			p_watermark = list_entry(stream_node,
+						 struct video_stream_watermark,
+						 stream_node);
+			isys_pb_datarate_mbs += p_watermark->stream_data_rate;
+		}
+	}
+	mutex_unlock(&iwake_watermark->mutex);
+
+	if (!isys_pb_datarate_mbs) {
+		enable_iwake(isys, false);
+		set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF);
+		mutex_lock(&iwake_watermark->mutex);
+		set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+				   CRITICAL_THRESHOLD_IWAKE_DISABLE);
+		goto unlock_exit;
+	}
+
+	enable_iwake(isys, true);
+	calc_fill_time_us = max_sram_size / isys_pb_datarate_mbs;
+
+	if (isys->pdata->ipdata->enhanced_iwake) {
+		ltr = isys->pdata->ipdata->ltr;
+		did = calc_fill_time_us * DEFAULT_DID_RATIO / 100;
+		ltr_did_type = LTR_ENHANNCE_IWAKE;
+	} else {
+		get_lut_ltrdid(isys, &ltrdid);
+
+		if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0)
+			ltr = 0;
+		else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1)
+			ltr = ltrdid.lut_ltr.bits.val0;
+		else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2)
+			ltr = ltrdid.lut_ltr.bits.val1;
+		else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3)
+			ltr = ltrdid.lut_ltr.bits.val2;
+		else
+			ltr = ltrdid.lut_ltr.bits.val3;
+
+		did = calc_fill_time_us - ltr;
+		ltr_did_type = LTR_IWAKE_ON;
+	}
+
+	set_iwake_ltrdid(isys, ltr, did, ltr_did_type);
+
+	/* calculate iwake threshold with 2KB granularity pages */
+	threshold_bytes = did * isys_pb_datarate_mbs;
+	iwake_threshold = max_t(u32, 1, threshold_bytes >> shift);
+	iwake_threshold = min_t(u32, iwake_threshold, max_sram_size);
+
+	mutex_lock(&iwake_watermark->mutex);
+	if (isys->pdata->ipdata->enhanced_iwake) {
+		set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX,
+				   DEFAULT_IWAKE_THRESHOLD);
+		/* calculate number of pages that will be filled in 10 usec */
+		page_num = (DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) /
+			ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE;
+		page_num += ((DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) %
+			     ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE) ? 1 : 0;
+		mem_open_threshold = isys->pdata->ipdata->memopen_threshold;
+		mem_open_threshold = max_t(u32, mem_open_threshold, page_num);
+		dev_dbg(dev, "mem_open_threshold: %u\n", mem_open_threshold);
+		set_iwake_register(isys, GDA_MEMOPEN_THRESHOLD_INDEX,
+				   mem_open_threshold);
+	} else {
+		set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX,
+				   iwake_threshold);
+	}
+
+	iwake_critical_threshold = iwake_threshold +
+		(IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2;
+
+	dev_dbg(dev, "threshold: %u critical: %u\n", iwake_threshold,
+		iwake_critical_threshold);
+
+	set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+			   iwake_critical_threshold);
+
+	writel(VAL_PKGC_PMON_CFG_RESET,
+	       isys->adev->isp->base + REG_PKGC_PMON_CFG);
+	writel(VAL_PKGC_PMON_CFG_START,
+	       isys->adev->isp->base + REG_PKGC_PMON_CFG);
+unlock_exit:
+	mutex_unlock(&iwake_watermark->mutex);
+}
+
+static void isys_iwake_watermark_init(struct ipu6_isys *isys)
+{
+	struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+
+	INIT_LIST_HEAD(&iwake_watermark->video_list);
+	mutex_init(&iwake_watermark->mutex);
+
+	iwake_watermark->ltrdid.lut_ltr.value = 0;
+	iwake_watermark->isys = isys;
+	iwake_watermark->iwake_enabled = false;
+	iwake_watermark->force_iwake_disable = false;
+}
+
+static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys)
+{
+	struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+
+	mutex_lock(&iwake_watermark->mutex);
+	list_del(&iwake_watermark->video_list);
+	mutex_unlock(&iwake_watermark->mutex);
+
+	mutex_destroy(&iwake_watermark->mutex);
+}
+
+/* The .bound() notifier callback when a match is found */
+static int isys_notifier_bound(struct v4l2_async_notifier *notifier,
+			       struct v4l2_subdev *sd,
+			       struct v4l2_async_connection *asc)
+{
+	struct ipu6_isys *isys =
+		container_of(notifier, struct ipu6_isys, notifier);
+	struct sensor_async_sd *s_asd =
+		container_of(asc, struct sensor_async_sd, asc);
+	int ret;
+
+	ret = ipu_bridge_instantiate_vcm(sd->dev);
+	if (ret) {
+		dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n");
+		return ret;
+	}
+
+	dev_dbg(&isys->adev->auxdev.dev, "bind %s nlanes is %d port is %d\n",
+		sd->name, s_asd->csi2.nlanes, s_asd->csi2.port);
+	ret = isys_complete_ext_device_registration(isys, sd, &s_asd->csi2);
+	if (ret)
+		return ret;
+
+	return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static int isys_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+	struct ipu6_isys *isys =
+		container_of(notifier, struct ipu6_isys, notifier);
+
+	return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations isys_async_ops = {
+	.bound = isys_notifier_bound,
+	.complete = isys_notifier_complete,
+};
+
+#define ISYS_MAX_PORTS 8
+static int isys_notifier_init(struct ipu6_isys *isys)
+{
+	struct ipu6_device *isp = isys->adev->isp;
+	struct device *dev = &isp->pdev->dev;
+	unsigned int i;
+	int ret;
+
+	v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev);
+
+	for (i = 0; i < ISYS_MAX_PORTS; i++) {
+		struct v4l2_fwnode_endpoint vep = {
+			.bus_type = V4L2_MBUS_CSI2_DPHY
+		};
+		struct sensor_async_sd *s_asd;
+		struct fwnode_handle *ep;
+
+		ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
+						FWNODE_GRAPH_ENDPOINT_NEXT);
+		if (!ep)
+			continue;
+
+		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+		if (ret) {
+			dev_err(dev, "fwnode endpoint parse failed: %d\n", ret);
+			goto err_parse;
+		}
+
+		s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep,
+							struct sensor_async_sd);
+		if (IS_ERR(s_asd)) {
+			ret = PTR_ERR(s_asd);
+			dev_err(dev, "add remove fwnode failed: %d\n", ret);
+			goto err_parse;
+		}
+
+		s_asd->csi2.port = vep.base.port;
+		s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes;
+
+		dev_dbg(dev, "remote endpoint port %d with %d lanes added\n",
+			s_asd->csi2.port, s_asd->csi2.nlanes);
+
+		fwnode_handle_put(ep);
+
+		continue;
+
+err_parse:
+		fwnode_handle_put(ep);
+		return ret;
+	}
+
+	isys->notifier.ops = &isys_async_ops;
+	ret = v4l2_async_nf_register(&isys->notifier);
+	if (ret) {
+		dev_err(dev, "failed to register async notifier : %d\n", ret);
+		v4l2_async_nf_cleanup(&isys->notifier);
+	}
+
+	return ret;
+}
+
+static void isys_notifier_cleanup(struct ipu6_isys *isys)
+{
+	v4l2_async_nf_unregister(&isys->notifier);
+	v4l2_async_nf_cleanup(&isys->notifier);
+}
+
+static int isys_register_devices(struct ipu6_isys *isys)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct pci_dev *pdev = isys->adev->isp->pdev;
+	int ret;
+
+	isys->media_dev.dev = dev;
+	media_device_pci_init(&isys->media_dev,
+			      pdev, IPU6_MEDIA_DEV_MODEL_NAME);
+
+	strscpy(isys->v4l2_dev.name, isys->media_dev.model,
+		sizeof(isys->v4l2_dev.name));
+
+	ret = media_device_register(&isys->media_dev);
+	if (ret < 0)
+		goto out_media_device_unregister;
+
+	isys->v4l2_dev.mdev = &isys->media_dev;
+	isys->v4l2_dev.ctrl_handler = NULL;
+
+	ret = v4l2_device_register(dev->parent, &isys->v4l2_dev);
+	if (ret < 0)
+		goto out_media_device_unregister;
+
+	ret = isys_register_video_devices(isys);
+	if (ret)
+		goto out_v4l2_device_unregister;
+
+	ret = isys_csi2_register_subdevices(isys);
+	if (ret)
+		goto out_isys_unregister_video_device;
+
+	ret = isys_csi2_create_media_links(isys);
+	if (ret)
+		goto out_isys_unregister_subdevices;
+
+	ret = isys_notifier_init(isys);
+	if (ret)
+		goto out_isys_unregister_subdevices;
+
+	return 0;
+
+out_isys_unregister_subdevices:
+	isys_csi2_unregister_subdevices(isys);
+
+out_isys_unregister_video_device:
+	isys_unregister_video_devices(isys);
+
+out_v4l2_device_unregister:
+	v4l2_device_unregister(&isys->v4l2_dev);
+
+out_media_device_unregister:
+	media_device_unregister(&isys->media_dev);
+	media_device_cleanup(&isys->media_dev);
+
+	dev_err(dev, "failed to register isys devices\n");
+
+	return ret;
+}
+
+static void isys_unregister_devices(struct ipu6_isys *isys)
+{
+	isys_unregister_video_devices(isys);
+	isys_csi2_unregister_subdevices(isys);
+	v4l2_device_unregister(&isys->v4l2_dev);
+	media_device_unregister(&isys->media_dev);
+	media_device_cleanup(&isys->media_dev);
+}
+
+static int isys_runtime_pm_resume(struct device *dev)
+{
+	struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+	struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+	struct ipu6_device *isp = adev->isp;
+	unsigned long flags;
+	int ret;
+
+	if (!isys)
+		return 0;
+
+	ret = ipu6_mmu_hw_init(adev->mmu);
+	if (ret)
+		return ret;
+
+	cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE);
+
+	ret = ipu6_buttress_start_tsc_sync(isp);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&isys->power_lock, flags);
+	isys->power = 1;
+	spin_unlock_irqrestore(&isys->power_lock, flags);
+
+	isys_setup_hw(isys);
+
+	set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON);
+
+	return 0;
+}
+
+static int isys_runtime_pm_suspend(struct device *dev)
+{
+	struct ipu6_bus_device *adev = to_ipu6_bus_device(dev);
+	struct ipu6_isys *isys;
+	unsigned long flags;
+
+	isys = dev_get_drvdata(dev);
+	if (!isys)
+		return 0;
+
+	spin_lock_irqsave(&isys->power_lock, flags);
+	isys->power = 0;
+	spin_unlock_irqrestore(&isys->power_lock, flags);
+
+	mutex_lock(&isys->mutex);
+	isys->need_reset = false;
+	mutex_unlock(&isys->mutex);
+
+	isys->phy_termcal_val = 0;
+	cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+	set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF);
+
+	ipu6_mmu_hw_cleanup(adev->mmu);
+
+	return 0;
+}
+
+static int isys_suspend(struct device *dev)
+{
+	struct ipu6_isys *isys = dev_get_drvdata(dev);
+
+	/* If stream is open, refuse to suspend */
+	if (isys->stream_opened)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int isys_resume(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops isys_pm_ops = {
+	.runtime_suspend = isys_runtime_pm_suspend,
+	.runtime_resume = isys_runtime_pm_resume,
+	.suspend = isys_suspend,
+	.resume = isys_resume,
+};
+
+static void isys_remove(struct auxiliary_device *auxdev)
+{
+	struct ipu6_bus_device *adev = auxdev_to_adev(auxdev);
+	struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev);
+	struct ipu6_device *isp = adev->isp;
+	struct isys_fw_msgs *fwmsg, *safe;
+	unsigned int i;
+
+	list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head)
+		dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs),
+			       fwmsg, fwmsg->dma_addr, 0);
+
+	list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head)
+		dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs),
+			       fwmsg, fwmsg->dma_addr, 0);
+
+	isys_unregister_devices(isys);
+	isys_notifier_cleanup(isys);
+
+	cpu_latency_qos_remove_request(&isys->pm_qos);
+
+	if (!isp->secure_mode) {
+		ipu6_cpd_free_pkg_dir(adev);
+		ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt);
+		release_firmware(adev->fw);
+	}
+
+	for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++)
+		mutex_destroy(&isys->streams[i].mutex);
+
+	isys_iwake_watermark_cleanup(isys);
+	mutex_destroy(&isys->stream_mutex);
+	mutex_destroy(&isys->mutex);
+}
+
+static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount)
+{
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct isys_fw_msgs *addr;
+	dma_addr_t dma_addr;
+	unsigned long flags;
+	unsigned int i;
+
+	for (i = 0; i < amount; i++) {
+		addr = dma_alloc_attrs(dev, sizeof(struct isys_fw_msgs),
+				       &dma_addr, GFP_KERNEL, 0);
+		if (!addr)
+			break;
+		addr->dma_addr = dma_addr;
+
+		spin_lock_irqsave(&isys->listlock, flags);
+		list_add(&addr->head, &isys->framebuflist);
+		spin_unlock_irqrestore(&isys->listlock, flags);
+	}
+
+	if (i == amount)
+		return 0;
+
+	spin_lock_irqsave(&isys->listlock, flags);
+	while (!list_empty(&isys->framebuflist)) {
+		addr = list_first_entry(&isys->framebuflist,
+					struct isys_fw_msgs, head);
+		list_del(&addr->head);
+		spin_unlock_irqrestore(&isys->listlock, flags);
+		dma_free_attrs(dev, sizeof(struct isys_fw_msgs), addr,
+			       addr->dma_addr, 0);
+		spin_lock_irqsave(&isys->listlock, flags);
+	}
+	spin_unlock_irqrestore(&isys->listlock, flags);
+
+	return -ENOMEM;
+}
+
+struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream)
+{
+	struct ipu6_isys *isys = stream->isys;
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct isys_fw_msgs *msg;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&isys->listlock, flags);
+	if (list_empty(&isys->framebuflist)) {
+		spin_unlock_irqrestore(&isys->listlock, flags);
+		dev_dbg(dev, "Frame list empty\n");
+
+		ret = alloc_fw_msg_bufs(isys, 5);
+		if (ret < 0)
+			return NULL;
+
+		spin_lock_irqsave(&isys->listlock, flags);
+		if (list_empty(&isys->framebuflist)) {
+			spin_unlock_irqrestore(&isys->listlock, flags);
+			dev_err(dev, "Frame list empty\n");
+			return NULL;
+		}
+	}
+	msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head);
+	list_move(&msg->head, &isys->framebuflist_fw);
+	spin_unlock_irqrestore(&isys->listlock, flags);
+	memset(&msg->fw_msg, 0, sizeof(msg->fw_msg));
+
+	return msg;
+}
+
+void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys)
+{
+	struct isys_fw_msgs *fwmsg, *fwmsg0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&isys->listlock, flags);
+	list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head)
+		list_move(&fwmsg->head, &isys->framebuflist);
+	spin_unlock_irqrestore(&isys->listlock, flags);
+}
+
+void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data)
+{
+	struct isys_fw_msgs *msg;
+	unsigned long flags;
+	u64 *ptr = (u64 *)data;
+
+	if (!ptr)
+		return;
+
+	spin_lock_irqsave(&isys->listlock, flags);
+	msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy);
+	list_move(&msg->head, &isys->framebuflist);
+	spin_unlock_irqrestore(&isys->listlock, flags);
+}
+
+static int isys_probe(struct auxiliary_device *auxdev,
+		      const struct auxiliary_device_id *auxdev_id)
+{
+	struct ipu6_bus_device *adev = auxdev_to_adev(auxdev);
+	struct ipu6_device *isp = adev->isp;
+	const struct firmware *fw;
+	struct ipu6_isys *isys;
+	unsigned int i;
+	int ret;
+
+	if (!isp->bus_ready_to_probe)
+		return -EPROBE_DEFER;
+
+	isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL);
+	if (!isys)
+		return -ENOMEM;
+
+	ret = ipu6_mmu_hw_init(adev->mmu);
+	if (ret)
+		return ret;
+
+	adev->auxdrv_data =
+		(const struct ipu6_auxdrv_data *)auxdev_id->driver_data;
+	adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver);
+	isys->adev = adev;
+	isys->pdata = adev->pdata;
+
+	/* initial sensor type */
+	isys->sensor_type = isys->pdata->ipdata->sensor_type_start;
+
+	spin_lock_init(&isys->streams_lock);
+	spin_lock_init(&isys->power_lock);
+	isys->power = 0;
+	isys->phy_termcal_val = 0;
+
+	mutex_init(&isys->mutex);
+	mutex_init(&isys->stream_mutex);
+
+	spin_lock_init(&isys->listlock);
+	INIT_LIST_HEAD(&isys->framebuflist);
+	INIT_LIST_HEAD(&isys->framebuflist_fw);
+
+	isys->line_align = IPU6_ISYS_2600_MEM_LINE_ALIGN;
+	isys->icache_prefetch = 0;
+
+	dev_set_drvdata(&auxdev->dev, isys);
+
+	isys_stream_init(isys);
+
+	if (!isp->secure_mode) {
+		fw = isp->cpd_fw;
+		ret = ipu6_buttress_map_fw_image(adev, fw, &adev->fw_sgt);
+		if (ret)
+			goto release_firmware;
+
+		ret = ipu6_cpd_create_pkg_dir(adev, isp->cpd_fw->data);
+		if (ret)
+			goto remove_shared_buffer;
+	}
+
+	cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+	ret = alloc_fw_msg_bufs(isys, 20);
+	if (ret < 0)
+		goto out_remove_pkg_dir_shared_buffer;
+
+	isys_iwake_watermark_init(isys);
+
+	if (is_ipu6se(adev->isp->hw_ver))
+		isys->phy_set_power = ipu6_isys_jsl_phy_set_power;
+	else if (is_ipu6ep_mtl(adev->isp->hw_ver))
+		isys->phy_set_power = ipu6_isys_dwc_phy_set_power;
+	else
+		isys->phy_set_power = ipu6_isys_mcd_phy_set_power;
+
+	ret = isys_register_devices(isys);
+	if (ret)
+		goto out_remove_pkg_dir_shared_buffer;
+
+	ipu6_mmu_hw_cleanup(adev->mmu);
+
+	return 0;
+
+out_remove_pkg_dir_shared_buffer:
+	if (!isp->secure_mode)
+		ipu6_cpd_free_pkg_dir(adev);
+remove_shared_buffer:
+	if (!isp->secure_mode)
+		ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt);
+release_firmware:
+	if (!isp->secure_mode)
+		release_firmware(adev->fw);
+
+	for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++)
+		mutex_destroy(&isys->streams[i].mutex);
+
+	mutex_destroy(&isys->mutex);
+	mutex_destroy(&isys->stream_mutex);
+
+	ipu6_mmu_hw_cleanup(adev->mmu);
+
+	return ret;
+}
+
+struct fwmsg {
+	int type;
+	char *msg;
+	bool valid_ts;
+};
+
+static const struct fwmsg fw_msg[] = {
+	{IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0},
+	{IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0},
+	{IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0},
+	{IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK,
+	 "STREAM_START_AND_CAPTURE_ACK", 0},
+	{IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0},
+	{IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0},
+	{IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1},
+	{IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0},
+	{IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE,
+	 "STREAM_START_AND_CAPTURE_DONE", 1},
+	{IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1},
+	{IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1},
+	{IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1},
+	{IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1},
+	{-1, "UNKNOWN MESSAGE", 0}
+};
+
+static u32 resp_type_to_index(int type)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(fw_msg); i++)
+		if (fw_msg[i].type == type)
+			return i;
+
+	return  ARRAY_SIZE(fw_msg) - 1;
+}
+
+static int isys_isr_one(struct ipu6_bus_device *adev)
+{
+	struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev);
+	struct ipu6_fw_isys_resp_info_abi *resp;
+	struct ipu6_isys_stream *stream;
+	struct ipu6_isys_csi2 *csi2 = NULL;
+	u32 index;
+	u64 ts;
+
+	if (!isys->fwcom)
+		return 1;
+
+	resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES);
+	if (!resp)
+		return 1;
+
+	ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0];
+
+	index = resp_type_to_index(resp->type);
+	dev_dbg(&adev->auxdev.dev,
+		"FW resp %02d %s, stream %u, ts 0x%16.16llx, pin %d\n",
+		resp->type, fw_msg[index].msg, resp->stream_handle,
+		fw_msg[index].valid_ts ? ts : 0, resp->pin_id);
+
+	if (resp->error_info.error == IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION)
+		/* Suspension is kind of special case: not enough buffers */
+		dev_dbg(&adev->auxdev.dev,
+			"FW error resp SUSPENSION, details %d\n",
+			resp->error_info.error_details);
+	else if (resp->error_info.error)
+		dev_dbg(&adev->auxdev.dev,
+			"FW error resp error %d, details %d\n",
+			resp->error_info.error, resp->error_info.error_details);
+
+	if (resp->stream_handle >= IPU6_ISYS_MAX_STREAMS) {
+		dev_err(&adev->auxdev.dev, "bad stream handle %u\n",
+			resp->stream_handle);
+		goto leave;
+	}
+
+	stream = ipu6_isys_query_stream_by_handle(isys, resp->stream_handle);
+	if (!stream) {
+		dev_err(&adev->auxdev.dev, "stream of stream_handle %u is unused\n",
+			resp->stream_handle);
+		goto leave;
+	}
+	stream->error = resp->error_info.error;
+
+	csi2 = ipu6_isys_subdev_to_csi2(stream->asd);
+
+	switch (resp->type) {
+	case IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE:
+		complete(&stream->stream_open_completion);
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK:
+		complete(&stream->stream_close_completion);
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK:
+		complete(&stream->stream_start_completion);
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK:
+		complete(&stream->stream_start_completion);
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK:
+		complete(&stream->stream_stop_completion);
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK:
+		complete(&stream->stream_stop_completion);
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY:
+		/*
+		 * firmware only release the capture msg until software
+		 * get pin_data_ready event
+		 */
+		ipu6_put_fw_msg_buf(ipu6_bus_get_drvdata(adev), resp->buf_id);
+		if (resp->pin_id < IPU6_ISYS_OUTPUT_PINS &&
+		    stream->output_pins[resp->pin_id].pin_ready)
+			stream->output_pins[resp->pin_id].pin_ready(stream,
+								    resp);
+		else
+			dev_warn(&adev->auxdev.dev,
+				 "%d:No data pin ready handler for pin id %d\n",
+				 resp->stream_handle, resp->pin_id);
+		if (csi2)
+			ipu6_isys_csi2_error(csi2);
+
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK:
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE:
+	case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE:
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF:
+
+		ipu6_isys_csi2_sof_event_by_stream(stream);
+		stream->seq[stream->seq_index].sequence =
+			atomic_read(&stream->sequence) - 1;
+		stream->seq[stream->seq_index].timestamp = ts;
+		dev_dbg(&adev->auxdev.dev,
+			"sof: handle %d: (index %u), timestamp 0x%16.16llx\n",
+			resp->stream_handle,
+			stream->seq[stream->seq_index].sequence, ts);
+		stream->seq_index = (stream->seq_index + 1)
+			% IPU6_ISYS_MAX_PARALLEL_SOF;
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF:
+		ipu6_isys_csi2_eof_event_by_stream(stream);
+		dev_dbg(&adev->auxdev.dev,
+			"eof: handle %d: (index %u), timestamp 0x%16.16llx\n",
+			resp->stream_handle,
+			stream->seq[stream->seq_index].sequence, ts);
+		break;
+	case IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY:
+		break;
+	default:
+		dev_err(&adev->auxdev.dev, "%d:unknown response type %u\n",
+			resp->stream_handle, resp->type);
+		break;
+	}
+
+	ipu6_isys_put_stream(stream);
+leave:
+	ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES);
+	return 0;
+}
+
+static const struct ipu6_auxdrv_data ipu6_isys_auxdrv_data = {
+	.isr = isys_isr,
+	.isr_threaded = NULL,
+	.wake_isr_thread = false,
+};
+
+static const struct auxiliary_device_id ipu6_isys_id_table[] = {
+	{
+		.name = "intel_ipu6.isys",
+		.driver_data = (kernel_ulong_t)&ipu6_isys_auxdrv_data,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ipu6_isys_id_table);
+
+static struct auxiliary_driver isys_driver = {
+	.name = IPU6_ISYS_NAME,
+	.probe = isys_probe,
+	.remove = isys_remove,
+	.id_table = ipu6_isys_id_table,
+	.driver = {
+		.pm = &isys_pm_ops,
+	},
+};
+
+module_auxiliary_driver(isys_driver);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
+MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel IPU6 input system driver");
+MODULE_IMPORT_NS(INTEL_IPU6);
+MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.h b/drivers/media/pci/intel/ipu6/ipu6-isys.h
new file mode 100644
index 000000000000..cf7a90bfedc9
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_ISYS_H
+#define IPU6_ISYS_H
+
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+
+#include "ipu6.h"
+#include "ipu6-fw-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-video.h"
+
+struct ipu6_bus_device;
+
+#define IPU6_ISYS_ENTITY_PREFIX		"Intel IPU6"
+/* FW support max 16 streams */
+#define IPU6_ISYS_MAX_STREAMS		16
+#define ISYS_UNISPART_IRQS	(IPU6_ISYS_UNISPART_IRQ_SW |	\
+				 IPU6_ISYS_UNISPART_IRQ_CSI0 |	\
+				 IPU6_ISYS_UNISPART_IRQ_CSI1)
+
+#define IPU6_ISYS_2600_MEM_LINE_ALIGN	64
+
+/*
+ * Current message queue configuration. These must be big enough
+ * so that they never gets full. Queues are located in system memory
+ */
+#define IPU6_ISYS_SIZE_RECV_QUEUE 40
+#define IPU6_ISYS_SIZE_SEND_QUEUE 40
+#define IPU6_ISYS_SIZE_PROXY_RECV_QUEUE 5
+#define IPU6_ISYS_SIZE_PROXY_SEND_QUEUE 5
+#define IPU6_ISYS_NUM_RECV_QUEUE 1
+
+#define IPU6_ISYS_MIN_WIDTH		1U
+#define IPU6_ISYS_MIN_HEIGHT		1U
+#define IPU6_ISYS_MAX_WIDTH		4672U
+#define IPU6_ISYS_MAX_HEIGHT		3416U
+
+/* the threshold granularity is 2KB on IPU6 */
+#define IPU6_SRAM_GRANULARITY_SHIFT	11
+#define IPU6_SRAM_GRANULARITY_SIZE	2048
+/* the threshold granularity is 1KB on IPU6SE */
+#define IPU6SE_SRAM_GRANULARITY_SHIFT	10
+#define IPU6SE_SRAM_GRANULARITY_SIZE	1024
+/* IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6 */
+#define IPU6_MAX_SRAM_SIZE			(200 << 10)
+/* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE */
+#define IPU6SE_MAX_SRAM_SIZE			(96 << 10)
+
+#define IPU6EP_LTR_VALUE			200
+#define IPU6EP_MIN_MEMOPEN_TH			0x4
+#define IPU6EP_MTL_LTR_VALUE			1023
+#define IPU6EP_MTL_MIN_MEMOPEN_TH		0xc
+
+struct ltr_did {
+	union {
+		u32 value;
+		struct {
+			u8 val0;
+			u8 val1;
+			u8 val2;
+			u8 val3;
+		} bits;
+	} lut_ltr;
+	union {
+		u32 value;
+		struct {
+			u8 th0;
+			u8 th1;
+			u8 th2;
+			u8 th3;
+		} bits;
+	} lut_fill_time;
+};
+
+struct isys_iwake_watermark {
+	bool iwake_enabled;
+	bool force_iwake_disable;
+	u32 iwake_threshold;
+	u64 isys_pixelbuffer_datarate;
+	struct ltr_did ltrdid;
+	struct mutex mutex; /* protect whole struct */
+	struct ipu6_isys *isys;
+	struct list_head video_list;
+};
+
+struct ipu6_isys_csi2_config {
+	u32 nlanes;
+	u32 port;
+};
+
+struct sensor_async_sd {
+	struct v4l2_async_connection asc;
+	struct ipu6_isys_csi2_config csi2;
+};
+
+/*
+ * struct ipu6_isys
+ *
+ * @media_dev: Media device
+ * @v4l2_dev: V4L2 device
+ * @adev: ISYS bus device
+ * @power: Is ISYS powered on or not?
+ * @isr_bits: Which bits does the ISR handle?
+ * @power_lock: Serialise access to power (power state in general)
+ * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers
+ * @streams_lock: serialise access to streams
+ * @streams: streams per firmware stream ID
+ * @fwcom: fw communication layer private pointer
+ *         or optional external library private pointer
+ * @line_align: line alignment in memory
+ * @phy_termcal_val: the termination calibration value, only used for DWC PHY
+ * @need_reset: Isys requires d0i0->i3 transition
+ * @ref_count: total number of callers fw open
+ * @mutex: serialise access isys video open/release related operations
+ * @stream_mutex: serialise stream start and stop, queueing requests
+ * @pdata: platform data pointer
+ * @csi2: CSI-2 receivers
+ */
+struct ipu6_isys {
+	struct media_device media_dev;
+	struct v4l2_device v4l2_dev;
+	struct ipu6_bus_device *adev;
+
+	int power;
+	spinlock_t power_lock;
+	u32 isr_csi2_bits;
+	u32 csi2_rx_ctrl_cached;
+	spinlock_t streams_lock;
+	struct ipu6_isys_stream streams[IPU6_ISYS_MAX_STREAMS];
+	int streams_ref_count[IPU6_ISYS_MAX_STREAMS];
+	void *fwcom;
+	unsigned int line_align;
+	u32 phy_termcal_val;
+	bool need_reset;
+	bool icache_prefetch;
+	bool csi2_cse_ipc_not_supported;
+	unsigned int ref_count;
+	unsigned int stream_opened;
+	unsigned int sensor_type;
+
+	struct mutex mutex;
+	struct mutex stream_mutex;
+
+	struct ipu6_isys_pdata *pdata;
+
+	int (*phy_set_power)(struct ipu6_isys *isys,
+			     struct ipu6_isys_csi2_config *cfg,
+			     const struct ipu6_isys_csi2_timing *timing,
+			     bool on);
+
+	struct ipu6_isys_csi2 *csi2;
+	struct ipu6_isys_video av[NR_OF_VIDEO_DEVICE];
+
+	struct pm_qos_request pm_qos;
+	spinlock_t listlock;	/* Protect framebuflist */
+	struct list_head framebuflist;
+	struct list_head framebuflist_fw;
+	struct v4l2_async_notifier notifier;
+	struct isys_iwake_watermark iwake_watermark;
+};
+
+struct isys_fw_msgs {
+	union {
+		u64 dummy;
+		struct ipu6_fw_isys_frame_buff_set_abi frame;
+		struct ipu6_fw_isys_stream_cfg_data_abi stream;
+	} fw_msg;
+	struct list_head head;
+	dma_addr_t dma_addr;
+};
+
+struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream);
+void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data);
+void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys);
+
+extern const struct v4l2_ioctl_ops ipu6_isys_ioctl_ops;
+
+void isys_setup_hw(struct ipu6_isys *isys);
+irqreturn_t isys_isr(struct ipu6_bus_device *adev);
+void update_watermark_setting(struct ipu6_isys *isys);
+
+int ipu6_isys_mcd_phy_set_power(struct ipu6_isys *isys,
+				struct ipu6_isys_csi2_config *cfg,
+				const struct ipu6_isys_csi2_timing *timing,
+				bool on);
+
+int ipu6_isys_dwc_phy_set_power(struct ipu6_isys *isys,
+				struct ipu6_isys_csi2_config *cfg,
+				const struct ipu6_isys_csi2_timing *timing,
+				bool on);
+
+int ipu6_isys_jsl_phy_set_power(struct ipu6_isys *isys,
+				struct ipu6_isys_csi2_config *cfg,
+				const struct ipu6_isys_csi2_timing *timing,
+				bool on);
+#endif /* IPU6_ISYS_H */
-- 
2.43.0

From 32d07a2e879187ea87b90256ac32a41080e3b8bc Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:25 +0800
Subject: [PATCH 11/31] media: intel/ipu6: input system video capture nodes

Register v4l2 video device and setup the vb2 queue to
support basic video capture. Video streaming callback
will trigger the input system driver to construct a
input system stream configuration for firmware based on
data type and stream ID and then queue buffers to firmware
to do capture.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-12-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 .../media/pci/intel/ipu6/ipu6-isys-queue.c    |  825 +++++++++++
 .../media/pci/intel/ipu6/ipu6-isys-queue.h    |   76 +
 .../media/pci/intel/ipu6/ipu6-isys-video.c    | 1253 +++++++++++++++++
 .../media/pci/intel/ipu6/ipu6-isys-video.h    |  136 ++
 4 files changed, 2290 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-video.c
 create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-video.h

diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
new file mode 100644
index 000000000000..735d2d642d87
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "ipu6-bus.h"
+#include "ipu6-fw-isys.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-video.h"
+
+static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+		       unsigned int *num_planes, unsigned int sizes[],
+		       struct device *alloc_devs[])
+{
+	struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
+	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	bool use_fmt = false;
+	unsigned int i;
+	u32 size;
+
+	/* num_planes == 0: we're being called through VIDIOC_REQBUFS */
+	if (!*num_planes) {
+		use_fmt = true;
+		*num_planes = av->mpix.num_planes;
+	}
+
+	for (i = 0; i < *num_planes; i++) {
+		size = av->mpix.plane_fmt[i].sizeimage;
+		if (use_fmt) {
+			sizes[i] = size;
+		} else if (sizes[i] < size) {
+			dev_err(dev, "%s: queue setup: plane %d size %u < %u\n",
+				av->vdev.name, i, sizes[i], size);
+			return -EINVAL;
+		}
+
+		alloc_devs[i] = aq->dev;
+	}
+
+	return 0;
+}
+
+static int ipu6_isys_buf_prepare(struct vb2_buffer *vb)
+{
+	struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+	struct device *dev = &av->isys->adev->auxdev.dev;
+
+	dev_dbg(dev, "buffer: %s: configured size %u, buffer size %lu\n",
+		av->vdev.name, av->mpix.plane_fmt[0].sizeimage,
+		vb2_plane_size(vb, 0));
+
+	if (av->mpix.plane_fmt[0].sizeimage > vb2_plane_size(vb, 0))
+		return -EINVAL;
+
+	vb2_set_plane_payload(vb, 0, av->mpix.plane_fmt[0].bytesperline *
+			      av->mpix.height);
+	vb->planes[0].data_offset = 0;
+
+	return 0;
+}
+
+/*
+ * Queue a buffer list back to incoming or active queues. The buffers
+ * are removed from the buffer list.
+ */
+void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl,
+				 unsigned long op_flags,
+				 enum vb2_buffer_state state)
+{
+	struct ipu6_isys_buffer *ib, *ib_safe;
+	unsigned long flags;
+	bool first = true;
+
+	if (!bl)
+		return;
+
+	WARN_ON_ONCE(!bl->nbufs);
+	WARN_ON_ONCE(op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE &&
+		     op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING);
+
+	list_for_each_entry_safe(ib, ib_safe, &bl->head, head) {
+		struct ipu6_isys_video *av;
+		struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+		struct ipu6_isys_queue *aq =
+			vb2_queue_to_isys_queue(vb->vb2_queue);
+		struct device *dev;
+
+		av = ipu6_isys_queue_to_video(aq);
+		dev = &av->isys->adev->auxdev.dev;
+		spin_lock_irqsave(&aq->lock, flags);
+		list_del(&ib->head);
+		if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_ACTIVE)
+			list_add(&ib->head, &aq->active);
+		else if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_INCOMING)
+			list_add_tail(&ib->head, &aq->incoming);
+		spin_unlock_irqrestore(&aq->lock, flags);
+
+		if (op_flags & IPU6_ISYS_BUFFER_LIST_FL_SET_STATE)
+			vb2_buffer_done(vb, state);
+
+		if (first) {
+			dev_dbg(dev,
+				"queue buf list %p flags %lx, s %d, %d bufs\n",
+				bl, op_flags, state, bl->nbufs);
+			first = false;
+		}
+
+		bl->nbufs--;
+	}
+
+	WARN_ON(bl->nbufs);
+}
+
+/*
+ * flush_firmware_streamon_fail() - Flush in cases where requests may
+ * have been queued to firmware and the *firmware streamon fails for a
+ * reason or another.
+ */
+static void flush_firmware_streamon_fail(struct ipu6_isys_stream *stream)
+{
+	struct device *dev = &stream->isys->adev->auxdev.dev;
+	struct ipu6_isys_queue *aq;
+	unsigned long flags;
+
+	lockdep_assert_held(&stream->mutex);
+
+	list_for_each_entry(aq, &stream->queues, node) {
+		struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+		struct ipu6_isys_buffer *ib, *ib_safe;
+
+		spin_lock_irqsave(&aq->lock, flags);
+		list_for_each_entry_safe(ib, ib_safe, &aq->active, head) {
+			struct vb2_buffer *vb =
+				ipu6_isys_buffer_to_vb2_buffer(ib);
+
+			list_del(&ib->head);
+			if (av->streaming) {
+				dev_dbg(dev,
+					"%s: queue buffer %u back to incoming\n",
+					av->vdev.name, vb->index);
+				/* Queue already streaming, return to driver. */
+				list_add(&ib->head, &aq->incoming);
+				continue;
+			}
+			/* Queue not yet streaming, return to user. */
+			dev_dbg(dev, "%s: return %u back to videobuf2\n",
+				av->vdev.name, vb->index);
+			vb2_buffer_done(ipu6_isys_buffer_to_vb2_buffer(ib),
+					VB2_BUF_STATE_QUEUED);
+		}
+		spin_unlock_irqrestore(&aq->lock, flags);
+	}
+}
+
+/*
+ * Attempt obtaining a buffer list from the incoming queues, a list of buffers
+ * that contains one entry from each video buffer queue. If a buffer can't be
+ * obtained from every queue, the buffers are returned back to the queue.
+ */
+static int buffer_list_get(struct ipu6_isys_stream *stream,
+			   struct ipu6_isys_buffer_list *bl)
+{
+	struct device *dev = &stream->isys->adev->auxdev.dev;
+	struct ipu6_isys_queue *aq;
+	unsigned long flags;
+	unsigned long buf_flag = IPU6_ISYS_BUFFER_LIST_FL_INCOMING;
+
+	bl->nbufs = 0;
+	INIT_LIST_HEAD(&bl->head);
+
+	list_for_each_entry(aq, &stream->queues, node) {
+		struct ipu6_isys_buffer *ib;
+
+		spin_lock_irqsave(&aq->lock, flags);
+		if (list_empty(&aq->incoming)) {
+			spin_unlock_irqrestore(&aq->lock, flags);
+			if (!list_empty(&bl->head))
+				ipu6_isys_buffer_list_queue(bl, buf_flag, 0);
+			return -ENODATA;
+		}
+
+		ib = list_last_entry(&aq->incoming,
+				     struct ipu6_isys_buffer, head);
+
+		dev_dbg(dev, "buffer: %s: buffer %u\n",
+			ipu6_isys_queue_to_video(aq)->vdev.name,
+			ipu6_isys_buffer_to_vb2_buffer(ib)->index);
+		list_del(&ib->head);
+		list_add(&ib->head, &bl->head);
+		spin_unlock_irqrestore(&aq->lock, flags);
+
+		bl->nbufs++;
+	}
+
+	dev_dbg(dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs);
+
+	return 0;
+}
+
+static void
+ipu6_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb,
+				  struct ipu6_fw_isys_frame_buff_set_abi *set)
+{
+	struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+
+	set->output_pins[aq->fw_output].addr =
+		vb2_dma_contig_plane_dma_addr(vb, 0);
+	set->output_pins[aq->fw_output].out_buf_id = vb->index + 1;
+}
+
+/*
+ * Convert a buffer list to a isys fw ABI framebuffer set. The
+ * buffer list is not modified.
+ */
+#define IPU6_ISYS_FRAME_NUM_THRESHOLD  (30)
+void
+ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set,
+			      struct ipu6_isys_stream *stream,
+			      struct ipu6_isys_buffer_list *bl)
+{
+	struct ipu6_isys_buffer *ib;
+
+	WARN_ON(!bl->nbufs);
+
+	set->send_irq_sof = 1;
+	set->send_resp_sof = 1;
+	set->send_irq_eof = 0;
+	set->send_resp_eof = 0;
+
+	if (stream->streaming)
+		set->send_irq_capture_ack = 0;
+	else
+		set->send_irq_capture_ack = 1;
+	set->send_irq_capture_done = 0;
+
+	set->send_resp_capture_ack = 1;
+	set->send_resp_capture_done = 1;
+	if (atomic_read(&stream->sequence) >= IPU6_ISYS_FRAME_NUM_THRESHOLD) {
+		set->send_resp_capture_ack = 0;
+		set->send_resp_capture_done = 0;
+	}
+
+	list_for_each_entry(ib, &bl->head, head) {
+		struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+
+		ipu6_isys_buf_to_fw_frame_buf_pin(vb, set);
+	}
+}
+
+/* Start streaming for real. The buffer list must be available. */
+static int ipu6_isys_stream_start(struct ipu6_isys_video *av,
+				  struct ipu6_isys_buffer_list *bl, bool error)
+{
+	struct ipu6_isys_stream *stream = av->stream;
+	struct device *dev = &stream->isys->adev->auxdev.dev;
+	struct ipu6_isys_buffer_list __bl;
+	int ret;
+
+	mutex_lock(&stream->isys->stream_mutex);
+	ret = ipu6_isys_video_set_streaming(av, 1, bl);
+	mutex_unlock(&stream->isys->stream_mutex);
+	if (ret)
+		goto out_requeue;
+
+	stream->streaming = 1;
+
+	bl = &__bl;
+
+	do {
+		struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL;
+		struct isys_fw_msgs *msg;
+		u16 send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE;
+
+		ret = buffer_list_get(stream, bl);
+		if (ret < 0)
+			break;
+
+		msg = ipu6_get_fw_msg_buf(stream);
+		if (!msg)
+			return -ENOMEM;
+
+		buf = &msg->fw_msg.frame;
+		ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl);
+		ipu6_fw_isys_dump_frame_buff_set(dev, buf,
+						 stream->nr_output_pins);
+		ipu6_isys_buffer_list_queue(bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE,
+					    0);
+		ret = ipu6_fw_isys_complex_cmd(stream->isys,
+					       stream->stream_handle, buf,
+					       msg->dma_addr, sizeof(*buf),
+					       send_type);
+	} while (!WARN_ON(ret));
+
+	return 0;
+
+out_requeue:
+	if (bl && bl->nbufs)
+		ipu6_isys_buffer_list_queue(bl,
+					    (IPU6_ISYS_BUFFER_LIST_FL_INCOMING |
+					     error) ?
+					    IPU6_ISYS_BUFFER_LIST_FL_SET_STATE :
+					    0, error ? VB2_BUF_STATE_ERROR :
+					    VB2_BUF_STATE_QUEUED);
+	flush_firmware_streamon_fail(stream);
+
+	return ret;
+}
+
+static void buf_queue(struct vb2_buffer *vb)
+{
+	struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+	struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
+	struct ipu6_isys_video_buffer *ivb =
+		vb2_buffer_to_ipu6_isys_video_buffer(vvb);
+	struct ipu6_isys_buffer *ib = &ivb->ib;
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	struct media_pipeline *media_pipe =
+		media_entity_pipeline(&av->vdev.entity);
+	struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL;
+	struct ipu6_isys_stream *stream = av->stream;
+	struct ipu6_isys_buffer_list bl;
+	struct isys_fw_msgs *msg;
+	unsigned long flags;
+	dma_addr_t dma;
+	unsigned int i;
+	int ret;
+
+	dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name);
+
+	for (i = 0; i < vb->num_planes; i++) {
+		dma = vb2_dma_contig_plane_dma_addr(vb, i);
+		dev_dbg(dev, "iova: plane %u iova %pad\n", i, &dma);
+	}
+
+	spin_lock_irqsave(&aq->lock, flags);
+	list_add(&ib->head, &aq->incoming);
+	spin_unlock_irqrestore(&aq->lock, flags);
+
+	if (!media_pipe || !vb->vb2_queue->start_streaming_called) {
+		dev_dbg(dev, "media pipeline is not ready for %s\n",
+			av->vdev.name);
+		return;
+	}
+
+	mutex_lock(&stream->mutex);
+
+	if (stream->nr_streaming != stream->nr_queues) {
+		dev_dbg(dev, "not streaming yet, adding to incoming\n");
+		goto out;
+	}
+
+	/*
+	 * We just put one buffer to the incoming list of this queue
+	 * (above). Let's see whether all queues in the pipeline would
+	 * have a buffer.
+	 */
+	ret = buffer_list_get(stream, &bl);
+	if (ret < 0) {
+		dev_warn(dev, "No buffers available\n");
+		goto out;
+	}
+
+	msg = ipu6_get_fw_msg_buf(stream);
+	if (!msg) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	buf = &msg->fw_msg.frame;
+	ipu6_isys_buf_to_fw_frame_buf(buf, stream, &bl);
+	ipu6_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins);
+
+	if (!stream->streaming) {
+		ret = ipu6_isys_stream_start(av, &bl, true);
+		if (ret)
+			dev_err(dev, "stream start failed.\n");
+		goto out;
+	}
+
+	/*
+	 * We must queue the buffers in the buffer list to the
+	 * appropriate video buffer queues BEFORE passing them to the
+	 * firmware since we could get a buffer event back before we
+	 * have queued them ourselves to the active queue.
+	 */
+	ipu6_isys_buffer_list_queue(&bl, IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+
+	ret = ipu6_fw_isys_complex_cmd(stream->isys, stream->stream_handle,
+				       buf, msg->dma_addr, sizeof(*buf),
+				       IPU6_FW_ISYS_SEND_TYPE_STREAM_CAPTURE);
+	if (ret < 0)
+		dev_err(dev, "send stream capture failed\n");
+
+out:
+	mutex_unlock(&stream->mutex);
+}
+
+static int ipu6_isys_link_fmt_validate(struct ipu6_isys_queue *aq)
+{
+	struct v4l2_mbus_framefmt format;
+	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	struct media_pad *remote_pad =
+		media_pad_remote_pad_first(av->vdev.entity.pads);
+	struct v4l2_subdev *sd;
+	u32 r_stream;
+	int ret;
+
+	if (!remote_pad)
+		return -ENOTCONN;
+
+	sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+	r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, remote_pad->index);
+
+	ret = ipu6_isys_get_stream_pad_fmt(sd, remote_pad->index, r_stream,
+					   &format);
+
+	if (ret) {
+		dev_dbg(dev, "failed to get %s: pad %d, stream:%d format\n",
+			sd->entity.name, remote_pad->index, r_stream);
+		return ret;
+	}
+
+	if (format.width != av->mpix.width ||
+	    format.height != av->mpix.height) {
+		dev_dbg(dev, "wrong width or height %ux%u (%ux%u expected)\n",
+			av->mpix.width, av->mpix.height,
+			format.width, format.height);
+		return -EINVAL;
+	}
+
+	if (format.field != av->mpix.field) {
+		dev_dbg(dev, "wrong field value 0x%8.8x (0x%8.8x expected)\n",
+			av->mpix.field, format.field);
+		return -EINVAL;
+	}
+
+	if (format.code != av->pfmt->code) {
+		dev_dbg(dev, "wrong mbus code 0x%8.8x (0x%8.8x expected)\n",
+			av->pfmt->code, format.code);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void return_buffers(struct ipu6_isys_queue *aq,
+			   enum vb2_buffer_state state)
+{
+	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+	struct ipu6_isys_buffer *ib;
+	bool need_reset = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&aq->lock, flags);
+	while (!list_empty(&aq->incoming)) {
+		struct vb2_buffer *vb;
+
+		ib = list_first_entry(&aq->incoming, struct ipu6_isys_buffer,
+				      head);
+		vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+		list_del(&ib->head);
+		spin_unlock_irqrestore(&aq->lock, flags);
+
+		vb2_buffer_done(vb, state);
+
+		spin_lock_irqsave(&aq->lock, flags);
+	}
+
+	/*
+	 * Something went wrong (FW crash / HW hang / not all buffers
+	 * returned from isys) if there are still buffers queued in active
+	 * queue. We have to clean up places a bit.
+	 */
+	while (!list_empty(&aq->active)) {
+		struct vb2_buffer *vb;
+
+		ib = list_first_entry(&aq->active, struct ipu6_isys_buffer,
+				      head);
+		vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+
+		list_del(&ib->head);
+		spin_unlock_irqrestore(&aq->lock, flags);
+
+		vb2_buffer_done(vb, state);
+
+		spin_lock_irqsave(&aq->lock, flags);
+		need_reset = true;
+	}
+
+	spin_unlock_irqrestore(&aq->lock, flags);
+
+	if (need_reset) {
+		mutex_lock(&av->isys->mutex);
+		av->isys->need_reset = true;
+		mutex_unlock(&av->isys->mutex);
+	}
+}
+
+static void ipu6_isys_stream_cleanup(struct ipu6_isys_video *av)
+{
+	video_device_pipeline_stop(&av->vdev);
+	ipu6_isys_put_stream(av->stream);
+	av->stream = NULL;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
+	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	struct ipu6_isys_buffer_list __bl, *bl = NULL;
+	struct ipu6_isys_stream *stream;
+	struct media_entity *source_entity = NULL;
+	int nr_queues, ret;
+
+	dev_dbg(dev, "stream: %s: width %u, height %u, css pixelformat %u\n",
+		av->vdev.name, av->mpix.width, av->mpix.height,
+		av->pfmt->css_pixelformat);
+
+	ret = ipu6_isys_setup_video(av, &source_entity, &nr_queues);
+	if (ret < 0) {
+		dev_err(dev, "failed to setup video\n");
+		goto out_return_buffers;
+	}
+
+	ret = ipu6_isys_link_fmt_validate(aq);
+	if (ret) {
+		dev_err(dev,
+			"%s: link format validation failed (%d)\n",
+			av->vdev.name, ret);
+		goto out_pipeline_stop;
+	}
+
+	ret = ipu6_isys_fw_open(av->isys);
+	if (ret)
+		goto out_pipeline_stop;
+
+	stream = av->stream;
+	mutex_lock(&stream->mutex);
+	if (!stream->nr_streaming) {
+		ret = ipu6_isys_video_prepare_stream(av, source_entity,
+						     nr_queues);
+		if (ret)
+			goto out_fw_close;
+	}
+
+	stream->nr_streaming++;
+	dev_dbg(dev, "queue %u of %u\n", stream->nr_streaming,
+		stream->nr_queues);
+
+	list_add(&aq->node, &stream->queues);
+	ipu6_isys_set_csi2_streams_status(av, true);
+	ipu6_isys_configure_stream_watermark(av, true);
+	ipu6_isys_update_stream_watermark(av, true);
+
+	if (stream->nr_streaming != stream->nr_queues)
+		goto out;
+
+	bl = &__bl;
+	ret = buffer_list_get(stream, bl);
+	if (ret < 0) {
+		dev_dbg(dev,
+			"no buffer available, postponing streamon\n");
+		goto out;
+	}
+
+	ret = ipu6_isys_stream_start(av, bl, false);
+	if (ret)
+		goto out_stream_start;
+
+out:
+	mutex_unlock(&stream->mutex);
+
+	return 0;
+
+out_stream_start:
+	list_del(&aq->node);
+	stream->nr_streaming--;
+
+out_fw_close:
+	mutex_unlock(&stream->mutex);
+	ipu6_isys_fw_close(av->isys);
+
+out_pipeline_stop:
+	ipu6_isys_stream_cleanup(av);
+
+out_return_buffers:
+	return_buffers(aq, VB2_BUF_STATE_QUEUED);
+
+	return ret;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+	struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
+	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+	struct ipu6_isys_stream *stream = av->stream;
+
+	ipu6_isys_set_csi2_streams_status(av, false);
+
+	mutex_lock(&stream->mutex);
+
+	ipu6_isys_update_stream_watermark(av, false);
+
+	mutex_lock(&av->isys->stream_mutex);
+	if (stream->nr_streaming == stream->nr_queues && stream->streaming)
+		ipu6_isys_video_set_streaming(av, 0, NULL);
+	mutex_unlock(&av->isys->stream_mutex);
+
+	stream->nr_streaming--;
+	list_del(&aq->node);
+	stream->streaming = 0;
+	mutex_unlock(&stream->mutex);
+
+	ipu6_isys_stream_cleanup(av);
+
+	return_buffers(aq, VB2_BUF_STATE_ERROR);
+
+	ipu6_isys_fw_close(av->isys);
+}
+
+static unsigned int
+get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream,
+			      struct ipu6_fw_isys_resp_info_abi *info)
+{
+	u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0];
+	struct ipu6_isys *isys = stream->isys;
+	struct device *dev = &isys->adev->auxdev.dev;
+	unsigned int i;
+
+	/*
+	 * The timestamp is invalid as no TSC in some FPGA platform,
+	 * so get the sequence from pipeline directly in this case.
+	 */
+	if (time == 0)
+		return atomic_read(&stream->sequence) - 1;
+
+	for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++)
+		if (time == stream->seq[i].timestamp) {
+			dev_dbg(dev, "sof: using seq nr %u for ts %llu\n",
+				stream->seq[i].sequence, time);
+			return stream->seq[i].sequence;
+		}
+
+	for (i = 0; i < IPU6_ISYS_MAX_PARALLEL_SOF; i++)
+		dev_dbg(dev, "sof: sequence %u, timestamp value %llu\n",
+			stream->seq[i].sequence, stream->seq[i].timestamp);
+
+	return 0;
+}
+
+static u64 get_sof_ns_delta(struct ipu6_isys_video *av,
+			    struct ipu6_fw_isys_resp_info_abi *info)
+{
+	struct ipu6_bus_device *adev = av->isys->adev;
+	struct ipu6_device *isp = adev->isp;
+	u64 delta, tsc_now;
+
+	ipu6_buttress_tsc_read(isp, &tsc_now);
+	if (!tsc_now)
+		return 0;
+
+	delta = tsc_now - ((u64)info->timestamp[1] << 32 | info->timestamp[0]);
+
+	return ipu6_buttress_tsc_ticks_to_ns(delta, isp);
+}
+
+void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
+				      struct ipu6_fw_isys_resp_info_abi *info)
+{
+	struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	struct ipu6_isys_stream *stream = av->stream;
+	u64 ns;
+	u32 sequence;
+
+	ns = ktime_get_ns() - get_sof_ns_delta(av, info);
+	sequence = get_sof_sequence_by_timestamp(stream, info);
+
+	vbuf->vb2_buf.timestamp = ns;
+	vbuf->sequence = sequence;
+
+	dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n",
+		av->vdev.name, ktime_get_ns(), sequence);
+	dev_dbg(dev, "index:%d, vbuf timestamp:%lld\n", vb->index,
+		vbuf->vb2_buf.timestamp);
+}
+
+void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib)
+{
+	struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+
+	if (atomic_read(&ib->str2mmio_flag)) {
+		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+		/*
+		 * Operation on buffer is ended with error and will be reported
+		 * to the userspace when it is de-queued
+		 */
+		atomic_set(&ib->str2mmio_flag, 0);
+	} else {
+		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	}
+}
+
+void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
+			       struct ipu6_fw_isys_resp_info_abi *info)
+{
+	struct ipu6_isys_queue *aq = stream->output_pins[info->pin_id].aq;
+	struct ipu6_isys *isys = stream->isys;
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct ipu6_isys_buffer *ib;
+	struct vb2_buffer *vb;
+	unsigned long flags;
+	bool first = true;
+	struct vb2_v4l2_buffer *buf;
+
+	spin_lock_irqsave(&aq->lock, flags);
+	if (list_empty(&aq->active)) {
+		spin_unlock_irqrestore(&aq->lock, flags);
+		dev_err(dev, "active queue empty\n");
+		return;
+	}
+
+	list_for_each_entry_reverse(ib, &aq->active, head) {
+		dma_addr_t addr;
+
+		vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+		addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+		if (info->pin.addr != addr) {
+			if (first)
+				dev_err(dev, "Unexpected buffer address %pad\n",
+					&addr);
+			first = false;
+			continue;
+		}
+
+		if (info->error_info.error ==
+		    IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) {
+			/*
+			 * Check for error message:
+			 * 'IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO'
+			 */
+			atomic_set(&ib->str2mmio_flag, 1);
+		}
+		dev_dbg(dev, "buffer: found buffer %pad\n", &addr);
+
+		buf = to_vb2_v4l2_buffer(vb);
+		buf->field = V4L2_FIELD_NONE;
+
+		list_del(&ib->head);
+		spin_unlock_irqrestore(&aq->lock, flags);
+
+		ipu6_isys_buf_calc_sequence_time(ib, info);
+
+		ipu6_isys_queue_buf_done(ib);
+
+		return;
+	}
+
+	dev_err(dev, "Failed to find a matching video buffer");
+
+	spin_unlock_irqrestore(&aq->lock, flags);
+}
+
+static const struct vb2_ops ipu6_isys_queue_ops = {
+	.queue_setup = queue_setup,
+	.wait_prepare = vb2_ops_wait_prepare,
+	.wait_finish = vb2_ops_wait_finish,
+	.buf_prepare = ipu6_isys_buf_prepare,
+	.start_streaming = start_streaming,
+	.stop_streaming = stop_streaming,
+	.buf_queue = buf_queue,
+};
+
+int ipu6_isys_queue_init(struct ipu6_isys_queue *aq)
+{
+	struct ipu6_isys *isys = ipu6_isys_queue_to_video(aq)->isys;
+	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+	int ret;
+
+	/* no support for userptr */
+	if (!aq->vbq.io_modes)
+		aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF;
+
+	aq->vbq.drv_priv = aq;
+	aq->vbq.ops = &ipu6_isys_queue_ops;
+	aq->vbq.lock = &av->mutex;
+	aq->vbq.mem_ops = &vb2_dma_contig_memops;
+	aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+	ret = vb2_queue_init(&aq->vbq);
+	if (ret)
+		return ret;
+
+	aq->dev = &isys->adev->auxdev.dev;
+	aq->vbq.dev = &isys->adev->auxdev.dev;
+	spin_lock_init(&aq->lock);
+	INIT_LIST_HEAD(&aq->active);
+	INIT_LIST_HEAD(&aq->incoming);
+
+	return 0;
+}
+
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
new file mode 100644
index 000000000000..9fb454577bb5
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_ISYS_QUEUE_H
+#define IPU6_ISYS_QUEUE_H
+
+#include <linux/container_of.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+
+#include <media/videobuf2-v4l2.h>
+
+#include "ipu6-fw-isys.h"
+#include "ipu6-isys-video.h"
+
+struct ipu6_isys_queue {
+	struct vb2_queue vbq;
+	struct list_head node;
+	struct device *dev;
+	/*
+	 * @lock: serialise access to queued and pre_streamon_queued
+	 */
+	spinlock_t lock;
+	struct list_head active;
+	struct list_head incoming;
+	unsigned int fw_output;
+};
+
+struct ipu6_isys_buffer {
+	struct list_head head;
+	atomic_t str2mmio_flag;
+};
+
+struct ipu6_isys_video_buffer {
+	struct vb2_v4l2_buffer vb_v4l2;
+	struct ipu6_isys_buffer ib;
+};
+
+#define IPU6_ISYS_BUFFER_LIST_FL_INCOMING	BIT(0)
+#define IPU6_ISYS_BUFFER_LIST_FL_ACTIVE	BIT(1)
+#define IPU6_ISYS_BUFFER_LIST_FL_SET_STATE	BIT(2)
+
+struct ipu6_isys_buffer_list {
+	struct list_head head;
+	unsigned int nbufs;
+};
+
+#define vb2_queue_to_isys_queue(__vb2) \
+	container_of(__vb2, struct ipu6_isys_queue, vbq)
+
+#define ipu6_isys_to_isys_video_buffer(__ib) \
+	container_of(__ib, struct ipu6_isys_video_buffer, ib)
+
+#define vb2_buffer_to_ipu6_isys_video_buffer(__vvb) \
+	container_of(__vvb, struct ipu6_isys_video_buffer, vb_v4l2)
+
+#define ipu6_isys_buffer_to_vb2_buffer(__ib) \
+	(&ipu6_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf)
+
+void ipu6_isys_buffer_list_queue(struct ipu6_isys_buffer_list *bl,
+				 unsigned long op_flags,
+				 enum vb2_buffer_state state);
+void
+ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set,
+			      struct ipu6_isys_stream *stream,
+			      struct ipu6_isys_buffer_list *bl);
+void
+ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
+				 struct ipu6_fw_isys_resp_info_abi *info);
+void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib);
+void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
+			       struct ipu6_fw_isys_resp_info_abi *info);
+int ipu6_isys_queue_init(struct ipu6_isys_queue *aq);
+#endif /* IPU6_ISYS_QUEUE_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
new file mode 100644
index 000000000000..847eac26bcd6
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -0,0 +1,1253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2023 Intel Corporation
+ */
+
+#include <linux/align.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/math64.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-cpd.h"
+#include "ipu6-fw-isys.h"
+#include "ipu6-isys.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-queue.h"
+#include "ipu6-isys-video.h"
+#include "ipu6-platform-regs.h"
+
+const struct ipu6_isys_pixelformat ipu6_isys_pfmts[] = {
+	{V4L2_PIX_FMT_SBGGR12, 16, 12, MEDIA_BUS_FMT_SBGGR12_1X12,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGBRG12, 16, 12, MEDIA_BUS_FMT_SGBRG12_1X12,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGRBG12, 16, 12, MEDIA_BUS_FMT_SGRBG12_1X12,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SRGGB12, 16, 12, MEDIA_BUS_FMT_SRGGB12_1X12,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SBGGR10, 16, 10, MEDIA_BUS_FMT_SBGGR10_1X10,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGBRG10, 16, 10, MEDIA_BUS_FMT_SGBRG10_1X10,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SGRBG10, 16, 10, MEDIA_BUS_FMT_SGRBG10_1X10,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SRGGB10, 16, 10, MEDIA_BUS_FMT_SRGGB10_1X10,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW16},
+	{V4L2_PIX_FMT_SBGGR8, 8, 8, MEDIA_BUS_FMT_SBGGR8_1X8,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SGBRG8, 8, 8, MEDIA_BUS_FMT_SGBRG8_1X8,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SGRBG8, 8, 8, MEDIA_BUS_FMT_SGRBG8_1X8,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SGRBG12P, 12, 12, MEDIA_BUS_FMT_SGRBG12_1X12,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SRGGB12P, 12, 12, MEDIA_BUS_FMT_SRGGB12_1X12,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW12},
+	{V4L2_PIX_FMT_SBGGR10P, 10, 10, MEDIA_BUS_FMT_SBGGR10_1X10,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW10},
+	{V4L2_PIX_FMT_SGBRG10P, 10, 10, MEDIA_BUS_FMT_SGBRG10_1X10,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW10},
+	{V4L2_PIX_FMT_SGRBG10P, 10, 10, MEDIA_BUS_FMT_SGRBG10_1X10,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW10},
+	{V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RAW10},
+	{V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16,
+	 IPU6_FW_ISYS_FRAME_FORMAT_UYVY},
+	{V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16,
+	 IPU6_FW_ISYS_FRAME_FORMAT_YUYV},
+	{V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RGB565},
+	{V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RGBA888},
+};
+
+static int video_open(struct file *file)
+{
+	struct ipu6_isys_video *av = video_drvdata(file);
+	struct ipu6_isys *isys = av->isys;
+	struct ipu6_bus_device *adev = isys->adev;
+
+	mutex_lock(&isys->mutex);
+	if (isys->need_reset) {
+		mutex_unlock(&isys->mutex);
+		dev_warn(&adev->auxdev.dev, "isys power cycle required\n");
+		return -EIO;
+	}
+	mutex_unlock(&isys->mutex);
+
+	return v4l2_fh_open(file);
+}
+
+static int video_release(struct file *file)
+{
+	return vb2_fop_release(file);
+}
+
+static const struct ipu6_isys_pixelformat *
+ipu6_isys_get_pixelformat(u32 pixelformat)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) {
+		const struct ipu6_isys_pixelformat *pfmt = &ipu6_isys_pfmts[i];
+
+		if (pfmt->pixelformat == pixelformat)
+			return pfmt;
+	}
+
+	return &ipu6_isys_pfmts[0];
+}
+
+int ipu6_isys_vidioc_querycap(struct file *file, void *fh,
+			      struct v4l2_capability *cap)
+{
+	struct ipu6_isys_video *av = video_drvdata(file);
+
+	strscpy(cap->driver, IPU6_ISYS_NAME, sizeof(cap->driver));
+	strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card));
+
+	return 0;
+}
+
+int ipu6_isys_vidioc_enum_fmt(struct file *file, void *fh,
+			      struct v4l2_fmtdesc *f)
+{
+	unsigned int i, found = 0;
+
+	if (f->index >= ARRAY_SIZE(ipu6_isys_pfmts))
+		return -EINVAL;
+
+	if (!f->mbus_code) {
+		f->flags = 0;
+		f->pixelformat = ipu6_isys_pfmts[f->index].pixelformat;
+		return 0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) {
+		if (f->mbus_code != ipu6_isys_pfmts[i].code)
+			continue;
+
+		if (f->index == found) {
+			f->flags = 0;
+			f->pixelformat = ipu6_isys_pfmts[i].pixelformat;
+			return 0;
+		}
+		found++;
+	}
+
+	return -EINVAL;
+}
+
+static int ipu6_isys_vidioc_enum_framesizes(struct file *file, void *fh,
+					    struct v4l2_frmsizeenum *fsize)
+{
+	if (fsize->index > 0)
+		return -EINVAL;
+
+	fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+	fsize->stepwise.min_width = IPU6_ISYS_MIN_WIDTH;
+	fsize->stepwise.max_width = IPU6_ISYS_MAX_WIDTH;
+	fsize->stepwise.min_height = IPU6_ISYS_MIN_HEIGHT;
+	fsize->stepwise.max_height = IPU6_ISYS_MAX_HEIGHT;
+	fsize->stepwise.step_width = 2;
+	fsize->stepwise.step_height = 2;
+
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *fh,
+				       struct v4l2_format *fmt)
+{
+	struct ipu6_isys_video *av = video_drvdata(file);
+
+	fmt->fmt.pix_mp = av->mpix;
+
+	return 0;
+}
+
+static const struct ipu6_isys_pixelformat *
+ipu6_isys_video_try_fmt_vid_mplane(struct ipu6_isys_video *av,
+				   struct v4l2_pix_format_mplane *mpix)
+{
+	const struct ipu6_isys_pixelformat *pfmt =
+		ipu6_isys_get_pixelformat(mpix->pixelformat);
+
+	mpix->pixelformat = pfmt->pixelformat;
+	mpix->num_planes = 1;
+
+	mpix->width = clamp(mpix->width, IPU6_ISYS_MIN_WIDTH,
+			    IPU6_ISYS_MAX_WIDTH);
+	mpix->height = clamp(mpix->height, IPU6_ISYS_MIN_HEIGHT,
+			     IPU6_ISYS_MAX_HEIGHT);
+
+	if (pfmt->bpp != pfmt->bpp_packed)
+		mpix->plane_fmt[0].bytesperline =
+			mpix->width * DIV_ROUND_UP(pfmt->bpp, BITS_PER_BYTE);
+	else
+		mpix->plane_fmt[0].bytesperline =
+			DIV_ROUND_UP((unsigned int)mpix->width * pfmt->bpp,
+				     BITS_PER_BYTE);
+
+	mpix->plane_fmt[0].bytesperline = ALIGN(mpix->plane_fmt[0].bytesperline,
+						av->isys->line_align);
+
+	/*
+	 * (height + 1) * bytesperline due to a hardware issue: the DMA unit
+	 * is a power of two, and a line should be transferred as few units
+	 * as possible. The result is that up to line length more data than
+	 * the image size may be transferred to memory after the image.
+	 * Another limitation is the GDA allocation unit size. For low
+	 * resolution it gives a bigger number. Use larger one to avoid
+	 * memory corruption.
+	 */
+	mpix->plane_fmt[0].sizeimage =
+		max(mpix->plane_fmt[0].sizeimage,
+		    mpix->plane_fmt[0].bytesperline * mpix->height +
+		    max(mpix->plane_fmt[0].bytesperline,
+			av->isys->pdata->ipdata->isys_dma_overshoot));
+
+	memset(mpix->plane_fmt[0].reserved, 0,
+	       sizeof(mpix->plane_fmt[0].reserved));
+
+	mpix->field = V4L2_FIELD_NONE;
+
+	mpix->colorspace = V4L2_COLORSPACE_RAW;
+	mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+	mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
+	mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+	return pfmt;
+}
+
+static int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh,
+				       struct v4l2_format *f)
+{
+	struct ipu6_isys_video *av = video_drvdata(file);
+
+	if (av->aq.vbq.streaming)
+		return -EBUSY;
+
+	av->pfmt = ipu6_isys_video_try_fmt_vid_mplane(av, &f->fmt.pix_mp);
+	av->mpix = f->fmt.pix_mp;
+
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *fh,
+					 struct v4l2_format *f)
+{
+	struct ipu6_isys_video *av = video_drvdata(file);
+
+	ipu6_isys_video_try_fmt_vid_mplane(av, &f->fmt.pix_mp);
+
+	return 0;
+}
+
+static int link_validate(struct media_link *link)
+{
+	struct ipu6_isys_video *av =
+		container_of(link->sink, struct ipu6_isys_video, pad);
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	struct v4l2_subdev_state *s_state;
+	struct v4l2_subdev *s_sd;
+	struct v4l2_mbus_framefmt *s_fmt;
+	struct media_pad *s_pad;
+	u32 s_stream;
+	int ret = -EPIPE;
+
+	if (!link->source->entity)
+		return ret;
+
+	s_sd = media_entity_to_v4l2_subdev(link->source->entity);
+	s_state = v4l2_subdev_get_unlocked_active_state(s_sd);
+	if (!s_state)
+		return ret;
+
+	dev_dbg(dev, "validating link \"%s\":%u -> \"%s\"\n",
+		link->source->entity->name, link->source->index,
+		link->sink->entity->name);
+
+	s_pad = media_pad_remote_pad_first(&av->pad);
+	s_stream = ipu6_isys_get_src_stream_by_src_pad(s_sd, s_pad->index);
+
+	v4l2_subdev_lock_state(s_state);
+
+	s_fmt = v4l2_subdev_state_get_stream_format(s_state, s_pad->index,
+						    s_stream);
+	if (!s_fmt) {
+		dev_err(dev, "failed to get source pad format\n");
+		goto unlock;
+	}
+
+	if (s_fmt->width != av->mpix.width ||
+	    s_fmt->height != av->mpix.height || s_fmt->code != av->pfmt->code) {
+		dev_err(dev, "format mismatch %dx%d,%x != %dx%d,%x\n",
+			s_fmt->width, s_fmt->height, s_fmt->code,
+			av->mpix.width, av->mpix.height, av->pfmt->code);
+		goto unlock;
+	}
+
+	v4l2_subdev_unlock_state(s_state);
+
+	return 0;
+unlock:
+	v4l2_subdev_unlock_state(s_state);
+
+	return ret;
+}
+
+static void get_stream_opened(struct ipu6_isys_video *av)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&av->isys->streams_lock, flags);
+	av->isys->stream_opened++;
+	spin_unlock_irqrestore(&av->isys->streams_lock, flags);
+}
+
+static void put_stream_opened(struct ipu6_isys_video *av)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&av->isys->streams_lock, flags);
+	av->isys->stream_opened--;
+	spin_unlock_irqrestore(&av->isys->streams_lock, flags);
+}
+
+static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av,
+				struct ipu6_fw_isys_stream_cfg_data_abi *cfg)
+{
+	struct media_pad *src_pad = media_pad_remote_pad_first(&av->pad);
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(src_pad->entity);
+	struct ipu6_fw_isys_input_pin_info_abi *input_pin;
+	struct ipu6_fw_isys_output_pin_info_abi *output_pin;
+	struct ipu6_isys_stream *stream = av->stream;
+	struct ipu6_isys_queue *aq = &av->aq;
+	struct v4l2_mbus_framefmt fmt;
+	struct v4l2_rect v4l2_crop;
+	struct ipu6_isys *isys = av->isys;
+	struct device *dev = &isys->adev->auxdev.dev;
+	int input_pins = cfg->nof_input_pins++;
+	int output_pins;
+	u32 src_stream;
+	int ret;
+
+	src_stream = ipu6_isys_get_src_stream_by_src_pad(sd, src_pad->index);
+	ret = ipu6_isys_get_stream_pad_fmt(sd, src_pad->index, src_stream,
+					   &fmt);
+	if (ret < 0) {
+		dev_err(dev, "can't get stream format (%d)\n", ret);
+		return ret;
+	}
+
+	ret = ipu6_isys_get_stream_pad_crop(sd, src_pad->index, src_stream,
+					    &v4l2_crop);
+	if (ret < 0) {
+		dev_err(dev, "can't get stream crop (%d)\n", ret);
+		return ret;
+	}
+
+	input_pin = &cfg->input_pins[input_pins];
+	input_pin->input_res.width = fmt.width;
+	input_pin->input_res.height = fmt.height;
+	input_pin->dt = av->dt;
+	input_pin->bits_per_pix = av->pfmt->bpp_packed;
+	input_pin->mapped_dt = 0x40; /* invalid mipi data type */
+	input_pin->mipi_decompression = 0;
+	input_pin->capture_mode = IPU6_FW_ISYS_CAPTURE_MODE_REGULAR;
+	input_pin->mipi_store_mode = av->pfmt->bpp == av->pfmt->bpp_packed ?
+		IPU6_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER :
+		IPU6_FW_ISYS_MIPI_STORE_MODE_NORMAL;
+	input_pin->crop_first_and_last_lines = v4l2_crop.top & 1;
+
+	output_pins = cfg->nof_output_pins++;
+	aq->fw_output = output_pins;
+	stream->output_pins[output_pins].pin_ready = ipu6_isys_queue_buf_ready;
+	stream->output_pins[output_pins].aq = aq;
+
+	output_pin = &cfg->output_pins[output_pins];
+	output_pin->input_pin_id = input_pins;
+	output_pin->output_res.width = av->mpix.width;
+	output_pin->output_res.height = av->mpix.height;
+
+	output_pin->stride = av->mpix.plane_fmt[0].bytesperline;
+	if (av->pfmt->bpp != av->pfmt->bpp_packed)
+		output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_RAW_SOC;
+	else
+		output_pin->pt = IPU6_FW_ISYS_PIN_TYPE_MIPI;
+	output_pin->ft = av->pfmt->css_pixelformat;
+	output_pin->send_irq = 1;
+	memset(output_pin->ts_offsets, 0, sizeof(output_pin->ts_offsets));
+	output_pin->s2m_pixel_soc_pixel_remapping =
+		S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+	output_pin->csi_be_soc_pixel_remapping =
+		CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+
+	output_pin->snoopable = true;
+	output_pin->error_handling_enable = false;
+	output_pin->sensor_type = isys->sensor_type++;
+	if (isys->sensor_type > isys->pdata->ipdata->sensor_type_end)
+		isys->sensor_type = isys->pdata->ipdata->sensor_type_start;
+
+	return 0;
+}
+
+static int start_stream_firmware(struct ipu6_isys_video *av,
+				 struct ipu6_isys_buffer_list *bl)
+{
+	struct ipu6_fw_isys_stream_cfg_data_abi *stream_cfg;
+	struct ipu6_fw_isys_frame_buff_set_abi *buf = NULL;
+	struct ipu6_isys_stream *stream = av->stream;
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	struct isys_fw_msgs *msg = NULL;
+	struct ipu6_isys_queue *aq;
+	int ret, retout, tout;
+	u16 send_type;
+
+	msg = ipu6_get_fw_msg_buf(stream);
+	if (!msg)
+		return -ENOMEM;
+
+	stream_cfg = &msg->fw_msg.stream;
+	stream_cfg->src = stream->stream_source;
+	stream_cfg->vc = stream->vc;
+	stream_cfg->isl_use = 0;
+	stream_cfg->sensor_type = IPU6_FW_ISYS_SENSOR_MODE_NORMAL;
+
+	list_for_each_entry(aq, &stream->queues, node) {
+		struct ipu6_isys_video *__av = ipu6_isys_queue_to_video(aq);
+
+		ret = ipu6_isys_fw_pin_cfg(__av, stream_cfg);
+		if (ret < 0) {
+			ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg);
+			return ret;
+		}
+	}
+
+	ipu6_fw_isys_dump_stream_cfg(dev, stream_cfg);
+
+	stream->nr_output_pins = stream_cfg->nof_output_pins;
+
+	reinit_completion(&stream->stream_open_completion);
+
+	ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle,
+				       stream_cfg, msg->dma_addr,
+				       sizeof(*stream_cfg),
+				       IPU6_FW_ISYS_SEND_TYPE_STREAM_OPEN);
+	if (ret < 0) {
+		dev_err(dev, "can't open stream (%d)\n", ret);
+		ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg);
+		return ret;
+	}
+
+	get_stream_opened(av);
+
+	tout = wait_for_completion_timeout(&stream->stream_open_completion,
+					   IPU6_FW_CALL_TIMEOUT_JIFFIES);
+
+	ipu6_put_fw_msg_buf(av->isys, (u64)stream_cfg);
+
+	if (!tout) {
+		dev_err(dev, "stream open time out\n");
+		ret = -ETIMEDOUT;
+		goto out_put_stream_opened;
+	}
+	if (stream->error) {
+		dev_err(dev, "stream open error: %d\n", stream->error);
+		ret = -EIO;
+		goto out_put_stream_opened;
+	}
+	dev_dbg(dev, "start stream: open complete\n");
+
+	if (bl) {
+		msg = ipu6_get_fw_msg_buf(stream);
+		if (!msg) {
+			ret = -ENOMEM;
+			goto out_put_stream_opened;
+		}
+		buf = &msg->fw_msg.frame;
+		ipu6_isys_buf_to_fw_frame_buf(buf, stream, bl);
+		ipu6_isys_buffer_list_queue(bl,
+					    IPU6_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+	}
+
+	reinit_completion(&stream->stream_start_completion);
+
+	if (bl) {
+		send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE;
+		ipu6_fw_isys_dump_frame_buff_set(dev, buf,
+						 stream_cfg->nof_output_pins);
+		ret = ipu6_fw_isys_complex_cmd(av->isys, stream->stream_handle,
+					       buf, msg->dma_addr,
+					       sizeof(*buf), send_type);
+	} else {
+		send_type = IPU6_FW_ISYS_SEND_TYPE_STREAM_START;
+		ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle,
+					      send_type);
+	}
+
+	if (ret < 0) {
+		dev_err(dev, "can't start streaming (%d)\n", ret);
+		goto out_stream_close;
+	}
+
+	tout = wait_for_completion_timeout(&stream->stream_start_completion,
+					   IPU6_FW_CALL_TIMEOUT_JIFFIES);
+	if (!tout) {
+		dev_err(dev, "stream start time out\n");
+		ret = -ETIMEDOUT;
+		goto out_stream_close;
+	}
+	if (stream->error) {
+		dev_err(dev, "stream start error: %d\n", stream->error);
+		ret = -EIO;
+		goto out_stream_close;
+	}
+	dev_dbg(dev, "start stream: complete\n");
+
+	return 0;
+
+out_stream_close:
+	reinit_completion(&stream->stream_close_completion);
+
+	retout = ipu6_fw_isys_simple_cmd(av->isys,
+					 stream->stream_handle,
+					 IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE);
+	if (retout < 0) {
+		dev_dbg(dev, "can't close stream (%d)\n", retout);
+		goto out_put_stream_opened;
+	}
+
+	tout = wait_for_completion_timeout(&stream->stream_close_completion,
+					   IPU6_FW_CALL_TIMEOUT_JIFFIES);
+	if (!tout)
+		dev_err(dev, "stream close time out\n");
+	else if (stream->error)
+		dev_err(dev, "stream close error: %d\n", stream->error);
+	else
+		dev_dbg(dev, "stream close complete\n");
+
+out_put_stream_opened:
+	put_stream_opened(av);
+
+	return ret;
+}
+
+static void stop_streaming_firmware(struct ipu6_isys_video *av)
+{
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	struct ipu6_isys_stream *stream = av->stream;
+	int ret, tout;
+
+	reinit_completion(&stream->stream_stop_completion);
+
+	ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle,
+				      IPU6_FW_ISYS_SEND_TYPE_STREAM_FLUSH);
+
+	if (ret < 0) {
+		dev_err(dev, "can't stop stream (%d)\n", ret);
+		return;
+	}
+
+	tout = wait_for_completion_timeout(&stream->stream_stop_completion,
+					   IPU6_FW_CALL_TIMEOUT_JIFFIES);
+	if (!tout)
+		dev_warn(dev, "stream stop time out\n");
+	else if (stream->error)
+		dev_warn(dev, "stream stop error: %d\n", stream->error);
+	else
+		dev_dbg(dev, "stop stream: complete\n");
+}
+
+static void close_streaming_firmware(struct ipu6_isys_video *av)
+{
+	struct ipu6_isys_stream *stream = av->stream;
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	int ret, tout;
+
+	reinit_completion(&stream->stream_close_completion);
+
+	ret = ipu6_fw_isys_simple_cmd(av->isys, stream->stream_handle,
+				      IPU6_FW_ISYS_SEND_TYPE_STREAM_CLOSE);
+	if (ret < 0) {
+		dev_err(dev, "can't close stream (%d)\n", ret);
+		return;
+	}
+
+	tout = wait_for_completion_timeout(&stream->stream_close_completion,
+					   IPU6_FW_CALL_TIMEOUT_JIFFIES);
+	if (!tout)
+		dev_warn(dev, "stream close time out\n");
+	else if (stream->error)
+		dev_warn(dev, "stream close error: %d\n", stream->error);
+	else
+		dev_dbg(dev, "close stream: complete\n");
+
+	put_stream_opened(av);
+}
+
+int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av,
+				   struct media_entity *source_entity,
+				   int nr_queues)
+{
+	struct ipu6_isys_stream *stream = av->stream;
+	struct ipu6_isys_csi2 *csi2;
+
+	if (WARN_ON(stream->nr_streaming))
+		return -EINVAL;
+
+	stream->nr_queues = nr_queues;
+	atomic_set(&stream->sequence, 0);
+
+	stream->seq_index = 0;
+	memset(stream->seq, 0, sizeof(stream->seq));
+
+	if (WARN_ON(!list_empty(&stream->queues)))
+		return -EINVAL;
+
+	stream->stream_source = stream->asd->source;
+	csi2 = ipu6_isys_subdev_to_csi2(stream->asd);
+	csi2->receiver_errors = 0;
+	stream->source_entity = source_entity;
+
+	dev_dbg(&av->isys->adev->auxdev.dev,
+		"prepare stream: external entity %s\n",
+		stream->source_entity->name);
+
+	return 0;
+}
+
+void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av,
+					  bool state)
+{
+	struct ipu6_isys *isys = av->isys;
+	struct ipu6_isys_csi2 *csi2 = NULL;
+	struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark;
+	struct device *dev = &isys->adev->auxdev.dev;
+	struct v4l2_mbus_framefmt format;
+	struct v4l2_subdev *esd;
+	struct v4l2_control hb = { .id = V4L2_CID_HBLANK, .value = 0 };
+	unsigned int bpp, lanes;
+	s64 link_freq = 0;
+	u64 pixel_rate = 0;
+	int ret;
+
+	if (!state)
+		return;
+
+	esd = media_entity_to_v4l2_subdev(av->stream->source_entity);
+
+	av->watermark.width = av->mpix.width;
+	av->watermark.height = av->mpix.height;
+	av->watermark.sram_gran_shift = isys->pdata->ipdata->sram_gran_shift;
+	av->watermark.sram_gran_size = isys->pdata->ipdata->sram_gran_size;
+
+	ret = v4l2_g_ctrl(esd->ctrl_handler, &hb);
+	if (!ret && hb.value >= 0)
+		av->watermark.hblank = hb.value;
+	else
+		av->watermark.hblank = 0;
+
+	csi2 = ipu6_isys_subdev_to_csi2(av->stream->asd);
+	link_freq = ipu6_isys_csi2_get_link_freq(csi2);
+	if (link_freq > 0) {
+		lanes = csi2->nlanes;
+		ret = ipu6_isys_get_stream_pad_fmt(&csi2->asd.sd, 0,
+						   av->source_stream, &format);
+		if (!ret) {
+			bpp = ipu6_isys_mbus_code_to_bpp(format.code);
+			pixel_rate = mul_u64_u32_div(link_freq, lanes * 2, bpp);
+		}
+	}
+
+	av->watermark.pixel_rate = pixel_rate;
+
+	if (!pixel_rate) {
+		mutex_lock(&iwake_watermark->mutex);
+		iwake_watermark->force_iwake_disable = true;
+		mutex_unlock(&iwake_watermark->mutex);
+		dev_warn(dev, "unexpected pixel_rate from %s, disable iwake.\n",
+			 av->stream->source_entity->name);
+	}
+}
+
+static void calculate_stream_datarate(struct ipu6_isys_video *av)
+{
+	struct video_stream_watermark *watermark = &av->watermark;
+	u32 bpp = av->pfmt->bpp;
+	u32 pages_per_line, pb_bytes_per_line, pixels_per_line, bytes_per_line;
+	u64 line_time_ns, stream_data_rate;
+	u16 shift, size;
+
+	shift = watermark->sram_gran_shift;
+	size = watermark->sram_gran_size;
+
+	pixels_per_line = watermark->width + watermark->hblank;
+	line_time_ns =  div_u64(pixels_per_line * NSEC_PER_SEC,
+				watermark->pixel_rate);
+	bytes_per_line = watermark->width * bpp / 8;
+	pages_per_line = DIV_ROUND_UP(bytes_per_line, size);
+	pb_bytes_per_line = pages_per_line << shift;
+	stream_data_rate = div64_u64(pb_bytes_per_line * 1000, line_time_ns);
+
+	watermark->stream_data_rate = stream_data_rate;
+}
+
+void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state)
+{
+	struct isys_iwake_watermark *iwake_watermark =
+		&av->isys->iwake_watermark;
+
+	if (!av->watermark.pixel_rate)
+		return;
+
+	if (state) {
+		calculate_stream_datarate(av);
+		mutex_lock(&iwake_watermark->mutex);
+		list_add(&av->watermark.stream_node,
+			 &iwake_watermark->video_list);
+		mutex_unlock(&iwake_watermark->mutex);
+	} else {
+		av->watermark.stream_data_rate = 0;
+		mutex_lock(&iwake_watermark->mutex);
+		list_del(&av->watermark.stream_node);
+		mutex_unlock(&iwake_watermark->mutex);
+	}
+
+	update_watermark_setting(av->isys);
+}
+
+void ipu6_isys_put_stream(struct ipu6_isys_stream *stream)
+{
+	struct device *dev = &stream->isys->adev->auxdev.dev;
+	unsigned int i;
+	unsigned long flags;
+
+	if (!stream) {
+		dev_err(dev, "no available stream\n");
+		return;
+	}
+
+	spin_lock_irqsave(&stream->isys->streams_lock, flags);
+	for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+		if (&stream->isys->streams[i] == stream) {
+			if (stream->isys->streams_ref_count[i] > 0)
+				stream->isys->streams_ref_count[i]--;
+			else
+				dev_warn(dev, "invalid stream %d\n", i);
+
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&stream->isys->streams_lock, flags);
+}
+
+static struct ipu6_isys_stream *
+ipu6_isys_get_stream(struct ipu6_isys_video *av, struct ipu6_isys_subdev *asd)
+{
+	struct ipu6_isys_stream *stream = NULL;
+	struct ipu6_isys *isys = av->isys;
+	unsigned long flags;
+	unsigned int i;
+	u8 vc = av->vc;
+
+	if (!isys)
+		return NULL;
+
+	spin_lock_irqsave(&isys->streams_lock, flags);
+	for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+		if (isys->streams_ref_count[i] && isys->streams[i].vc == vc &&
+		    isys->streams[i].asd == asd) {
+			isys->streams_ref_count[i]++;
+			stream = &isys->streams[i];
+			break;
+		}
+	}
+
+	if (!stream) {
+		for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+			if (!isys->streams_ref_count[i]) {
+				isys->streams_ref_count[i]++;
+				stream = &isys->streams[i];
+				stream->vc = vc;
+				stream->asd = asd;
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&isys->streams_lock, flags);
+
+	return stream;
+}
+
+struct ipu6_isys_stream *
+ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle)
+{
+	unsigned long flags;
+	struct ipu6_isys_stream *stream = NULL;
+
+	if (!isys)
+		return NULL;
+
+	if (stream_handle >= IPU6_ISYS_MAX_STREAMS) {
+		dev_err(&isys->adev->auxdev.dev,
+			"stream_handle %d is invalid\n", stream_handle);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&isys->streams_lock, flags);
+	if (isys->streams_ref_count[stream_handle] > 0) {
+		isys->streams_ref_count[stream_handle]++;
+		stream = &isys->streams[stream_handle];
+	}
+	spin_unlock_irqrestore(&isys->streams_lock, flags);
+
+	return stream;
+}
+
+struct ipu6_isys_stream *
+ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc)
+{
+	struct ipu6_isys_stream *stream = NULL;
+	unsigned long flags;
+	unsigned int i;
+
+	if (!isys)
+		return NULL;
+
+	if (source < 0) {
+		dev_err(&stream->isys->adev->auxdev.dev,
+			"query stream with invalid port number\n");
+		return NULL;
+	}
+
+	spin_lock_irqsave(&isys->streams_lock, flags);
+	for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) {
+		if (!isys->streams_ref_count[i])
+			continue;
+
+		if (isys->streams[i].stream_source == source &&
+		    isys->streams[i].vc == vc) {
+			stream = &isys->streams[i];
+			isys->streams_ref_count[i]++;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&isys->streams_lock, flags);
+
+	return stream;
+}
+
+static u64 get_stream_mask_by_pipeline(struct ipu6_isys_video *av)
+{
+	struct media_pipeline *pipeline =
+		media_entity_pipeline(&av->vdev.entity);
+	struct media_entity *entity;
+	unsigned int i;
+	u64 stream_mask = 0;
+
+	for (i = 0; i < NR_OF_VIDEO_DEVICE; i++) {
+		entity = &av->isys->av[i].vdev.entity;
+		if (pipeline == media_entity_pipeline(entity))
+			stream_mask |= BIT_ULL(av->isys->av[i].source_stream);
+	}
+
+	return stream_mask;
+}
+
+int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state,
+				  struct ipu6_isys_buffer_list *bl)
+{
+	struct v4l2_subdev_krouting *routing;
+	struct ipu6_isys_stream *stream = av->stream;
+	struct v4l2_subdev_state *subdev_state;
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	struct v4l2_subdev *sd = NULL;
+	struct v4l2_subdev *ssd = NULL;
+	struct media_pad *r_pad;
+	struct media_pad *s_pad = NULL;
+	u32 sink_pad, sink_stream;
+	u64 r_stream;
+	u64 stream_mask = 0;
+	int ret = 0;
+
+	dev_dbg(dev, "set stream: %d\n", state);
+
+	if (WARN(!stream->source_entity, "No source entity for stream\n"))
+		return -ENODEV;
+
+	ssd = media_entity_to_v4l2_subdev(stream->source_entity);
+	sd = &stream->asd->sd;
+	r_pad = media_pad_remote_pad_first(&av->pad);
+	r_stream = ipu6_isys_get_src_stream_by_src_pad(sd, r_pad->index);
+
+	subdev_state = v4l2_subdev_lock_and_get_active_state(sd);
+	routing = &subdev_state->routing;
+	ret = v4l2_subdev_routing_find_opposite_end(routing, r_pad->index,
+						    r_stream, &sink_pad,
+						    &sink_stream);
+	v4l2_subdev_unlock_state(subdev_state);
+	if (ret)
+		return ret;
+
+	s_pad = media_pad_remote_pad_first(&stream->asd->pad[sink_pad]);
+
+	stream_mask = get_stream_mask_by_pipeline(av);
+	if (!state) {
+		stop_streaming_firmware(av);
+
+		/* stop external sub-device now. */
+		dev_dbg(dev, "disable streams 0x%llx of %s\n", stream_mask,
+			ssd->name);
+		ret = v4l2_subdev_disable_streams(ssd, s_pad->index,
+						  stream_mask);
+		if (ret) {
+			dev_err(dev, "disable streams of %s failed with %d\n",
+				ssd->name, ret);
+			return ret;
+		}
+
+		/* stop sub-device which connects with video */
+		dev_dbg(dev, "stream off entity %s pad:%d\n", sd->name,
+			r_pad->index);
+		ret = v4l2_subdev_call(sd, video, s_stream, state);
+		if (ret) {
+			dev_err(dev, "stream off %s failed with %d\n", sd->name,
+				ret);
+			return ret;
+		}
+		close_streaming_firmware(av);
+	} else {
+		ret = start_stream_firmware(av, bl);
+		if (ret) {
+			dev_err(dev, "start stream of firmware failed\n");
+			goto out_clear_stream_watermark;
+		}
+
+		/* start sub-device which connects with video */
+		dev_dbg(dev, "stream on %s pad %d\n", sd->name, r_pad->index);
+		ret = v4l2_subdev_call(sd, video, s_stream, state);
+		if (ret) {
+			dev_err(dev, "stream on %s failed with %d\n", sd->name,
+				ret);
+			goto out_media_entity_stop_streaming_firmware;
+		}
+
+		/* start external sub-device now. */
+		dev_dbg(dev, "enable streams 0x%llx of %s\n", stream_mask,
+			ssd->name);
+		ret = v4l2_subdev_enable_streams(ssd, s_pad->index,
+						 stream_mask);
+		if (ret) {
+			dev_err(dev,
+				"enable streams 0x%llx of %s failed with %d\n",
+				stream_mask, stream->source_entity->name, ret);
+			goto out_media_entity_stop_streaming;
+		}
+	}
+
+	av->streaming = state;
+
+	return 0;
+
+out_media_entity_stop_streaming:
+	v4l2_subdev_disable_streams(sd, r_pad->index, BIT(r_stream));
+
+out_media_entity_stop_streaming_firmware:
+	stop_streaming_firmware(av);
+
+out_clear_stream_watermark:
+	ipu6_isys_update_stream_watermark(av, 0);
+
+	return ret;
+}
+
+static const struct v4l2_ioctl_ops ioctl_ops_mplane = {
+	.vidioc_querycap = ipu6_isys_vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap = ipu6_isys_vidioc_enum_fmt,
+	.vidioc_enum_framesizes = ipu6_isys_vidioc_enum_framesizes,
+	.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane,
+	.vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane,
+	.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+	.vidioc_reqbufs = vb2_ioctl_reqbufs,
+	.vidioc_create_bufs = vb2_ioctl_create_bufs,
+	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+	.vidioc_querybuf = vb2_ioctl_querybuf,
+	.vidioc_qbuf = vb2_ioctl_qbuf,
+	.vidioc_dqbuf = vb2_ioctl_dqbuf,
+	.vidioc_streamon = vb2_ioctl_streamon,
+	.vidioc_streamoff = vb2_ioctl_streamoff,
+	.vidioc_expbuf = vb2_ioctl_expbuf,
+};
+
+static const struct media_entity_operations entity_ops = {
+	.link_validate = link_validate,
+};
+
+static const struct v4l2_file_operations isys_fops = {
+	.owner = THIS_MODULE,
+	.poll = vb2_fop_poll,
+	.unlocked_ioctl = video_ioctl2,
+	.mmap = vb2_fop_mmap,
+	.open = video_open,
+	.release = video_release,
+};
+
+int ipu6_isys_fw_open(struct ipu6_isys *isys)
+{
+	struct ipu6_bus_device *adev = isys->adev;
+	const struct ipu6_isys_internal_pdata *ipdata = isys->pdata->ipdata;
+	int ret;
+
+	ret = pm_runtime_resume_and_get(&adev->auxdev.dev);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&isys->mutex);
+
+	if (isys->ref_count++)
+		goto unlock;
+
+	ipu6_configure_spc(adev->isp, &ipdata->hw_variant,
+			   IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX, isys->pdata->base,
+			   adev->pkg_dir, adev->pkg_dir_dma_addr);
+
+	/*
+	 * Buffers could have been left to wrong queue at last closure.
+	 * Move them now back to empty buffer queue.
+	 */
+	ipu6_cleanup_fw_msg_bufs(isys);
+
+	if (isys->fwcom) {
+		/*
+		 * Something went wrong in previous shutdown. As we are now
+		 * restarting isys we can safely delete old context.
+		 */
+		dev_warn(&adev->auxdev.dev, "clearing old context\n");
+		ipu6_fw_isys_cleanup(isys);
+	}
+
+	ret = ipu6_fw_isys_init(isys, ipdata->num_parallel_streams);
+	if (ret < 0)
+		goto out;
+
+unlock:
+	mutex_unlock(&isys->mutex);
+
+	return 0;
+
+out:
+	isys->ref_count--;
+	mutex_unlock(&isys->mutex);
+	pm_runtime_put(&adev->auxdev.dev);
+
+	return ret;
+}
+
+void ipu6_isys_fw_close(struct ipu6_isys *isys)
+{
+	mutex_lock(&isys->mutex);
+
+	isys->ref_count--;
+	if (!isys->ref_count) {
+		ipu6_fw_isys_close(isys);
+		if (isys->fwcom) {
+			isys->need_reset = true;
+			dev_warn(&isys->adev->auxdev.dev,
+				 "failed to close fw isys\n");
+		}
+	}
+
+	mutex_unlock(&isys->mutex);
+
+	if (isys->need_reset)
+		pm_runtime_put_sync(&isys->adev->auxdev.dev);
+	else
+		pm_runtime_put(&isys->adev->auxdev.dev);
+}
+
+int ipu6_isys_setup_video(struct ipu6_isys_video *av,
+			  struct media_entity **source_entity, int *nr_queues)
+{
+	struct device *dev = &av->isys->adev->auxdev.dev;
+	struct v4l2_mbus_frame_desc_entry entry;
+	struct v4l2_subdev_route *route = NULL;
+	struct v4l2_subdev_route *r;
+	struct v4l2_subdev_state *state;
+	struct ipu6_isys_subdev *asd;
+	struct v4l2_subdev *remote_sd;
+	struct media_pipeline *pipeline;
+	struct media_pad *source_pad, *remote_pad;
+	int ret = -EINVAL;
+
+	remote_pad = media_pad_remote_pad_first(&av->pad);
+	if (!remote_pad) {
+		dev_dbg(dev, "failed to get remote pad\n");
+		return -ENODEV;
+	}
+
+	remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+	asd = to_ipu6_isys_subdev(remote_sd);
+	source_pad = media_pad_remote_pad_first(&remote_pad->entity->pads[0]);
+	if (!source_pad) {
+		dev_dbg(dev, "No external source entity\n");
+		return -ENODEV;
+	}
+
+	*source_entity = source_pad->entity;
+
+	/* Find the root */
+	state = v4l2_subdev_lock_and_get_active_state(remote_sd);
+	for_each_active_route(&state->routing, r) {
+		if (r->source_pad != remote_pad->index)
+			continue;
+
+		route = r;
+		break;
+	}
+
+	if (!route) {
+		v4l2_subdev_unlock_state(state);
+		dev_dbg(dev, "Failed to find route\n");
+		return -ENODEV;
+	}
+	v4l2_subdev_unlock_state(state);
+	av->source_stream = route->sink_stream;
+
+	ret = ipu6_isys_csi2_get_remote_desc(av->source_stream,
+					     to_ipu6_isys_csi2(asd),
+					     *source_entity, &entry,
+					     nr_queues);
+	if (ret == -ENOIOCTLCMD) {
+		av->vc = 0;
+		av->dt = ipu6_isys_mbus_code_to_mipi(av->pfmt->code);
+		*nr_queues = 1;
+	} else if (!ret) {
+		dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n",
+			entry.stream, entry.length, entry.bus.csi2.vc,
+			entry.bus.csi2.dt);
+
+		av->vc = entry.bus.csi2.vc;
+		av->dt = entry.bus.csi2.dt;
+	} else {
+		dev_err(dev, "failed to get remote frame desc\n");
+		return ret;
+	}
+
+	pipeline = media_entity_pipeline(&av->vdev.entity);
+	if (!pipeline)
+		ret = video_device_pipeline_alloc_start(&av->vdev);
+	else
+		ret = video_device_pipeline_start(&av->vdev, pipeline);
+	if (ret < 0) {
+		dev_dbg(dev, "media pipeline start failed\n");
+		return ret;
+	}
+
+	av->stream = ipu6_isys_get_stream(av, asd);
+	if (!av->stream) {
+		video_device_pipeline_stop(&av->vdev);
+		dev_err(dev, "no available stream for firmware\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Do everything that's needed to initialise things related to video
+ * buffer queue, video node, and the related media entity. The caller
+ * is expected to assign isys field and set the name of the video
+ * device.
+ */
+int ipu6_isys_video_init(struct ipu6_isys_video *av)
+{
+	struct v4l2_format format = {
+		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+		.fmt.pix_mp = {
+			.width = 1920,
+			.height = 1080,
+		},
+	};
+	int ret;
+
+	mutex_init(&av->mutex);
+	av->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC |
+			       V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+	av->vdev.vfl_dir = VFL_DIR_RX;
+
+	ret = ipu6_isys_queue_init(&av->aq);
+	if (ret)
+		goto out_free_watermark;
+
+	av->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+	ret = media_entity_pads_init(&av->vdev.entity, 1, &av->pad);
+	if (ret)
+		goto out_vb2_queue_release;
+
+	av->vdev.entity.ops = &entity_ops;
+	av->vdev.release = video_device_release_empty;
+	av->vdev.fops = &isys_fops;
+	av->vdev.v4l2_dev = &av->isys->v4l2_dev;
+	if (!av->vdev.ioctl_ops)
+		av->vdev.ioctl_ops = &ioctl_ops_mplane;
+	av->vdev.queue = &av->aq.vbq;
+	av->vdev.lock = &av->mutex;
+
+	ipu6_isys_video_try_fmt_vid_mplane(av, &format.fmt.pix_mp);
+	av->mpix = format.fmt.pix_mp;
+
+	set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags);
+	video_set_drvdata(&av->vdev, av);
+
+	ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1);
+	if (ret)
+		goto out_media_entity_cleanup;
+
+	return ret;
+
+out_media_entity_cleanup:
+	vb2_video_unregister_device(&av->vdev);
+	media_entity_cleanup(&av->vdev.entity);
+
+out_vb2_queue_release:
+	vb2_queue_release(&av->aq.vbq);
+
+out_free_watermark:
+	mutex_destroy(&av->mutex);
+
+	return ret;
+}
+
+void ipu6_isys_video_cleanup(struct ipu6_isys_video *av)
+{
+	vb2_video_unregister_device(&av->vdev);
+	media_entity_cleanup(&av->vdev.entity);
+	mutex_destroy(&av->mutex);
+}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
new file mode 100644
index 000000000000..21cd33c7e277
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_ISYS_VIDEO_H
+#define IPU6_ISYS_VIDEO_H
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+
+#include "ipu6-isys-queue.h"
+
+#define IPU6_ISYS_OUTPUT_PINS 11
+#define IPU6_ISYS_MAX_PARALLEL_SOF 2
+#define NR_OF_VIDEO_DEVICE 31
+
+struct file;
+struct ipu6_isys;
+struct ipu6_isys_subdev;
+
+struct ipu6_isys_pixelformat {
+	u32 pixelformat;
+	u32 bpp;
+	u32 bpp_packed;
+	u32 code;
+	u32 css_pixelformat;
+};
+
+struct sequence_info {
+	unsigned int sequence;
+	u64 timestamp;
+};
+
+struct output_pin_data {
+	void (*pin_ready)(struct ipu6_isys_stream *stream,
+			  struct ipu6_fw_isys_resp_info_abi *info);
+	struct ipu6_isys_queue *aq;
+};
+
+/*
+ * Align with firmware stream. Each stream represents a CSI virtual channel.
+ * May map to multiple video devices
+ */
+struct ipu6_isys_stream {
+	struct mutex mutex;
+	struct media_entity *source_entity;
+	atomic_t sequence;
+	unsigned int seq_index;
+	struct sequence_info seq[IPU6_ISYS_MAX_PARALLEL_SOF];
+	int stream_source;
+	int stream_handle;
+	unsigned int nr_output_pins;
+	struct ipu6_isys_subdev *asd;
+
+	int nr_queues;	/* Number of capture queues */
+	int nr_streaming;
+	int streaming;	/* Has streaming been really started? */
+	struct list_head queues;
+	struct completion stream_open_completion;
+	struct completion stream_close_completion;
+	struct completion stream_start_completion;
+	struct completion stream_stop_completion;
+	struct ipu6_isys *isys;
+
+	struct output_pin_data output_pins[IPU6_ISYS_OUTPUT_PINS];
+	int error;
+	u8 vc;
+};
+
+struct video_stream_watermark {
+	u32 width;
+	u32 height;
+	u32 hblank;
+	u32 frame_rate;
+	u64 pixel_rate;
+	u64 stream_data_rate;
+	u16 sram_gran_shift;
+	u16 sram_gran_size;
+	struct list_head stream_node;
+};
+
+struct ipu6_isys_video {
+	struct ipu6_isys_queue aq;
+	/* Serialise access to other fields in the struct. */
+	struct mutex mutex;
+	struct media_pad pad;
+	struct video_device vdev;
+	struct v4l2_pix_format_mplane mpix;
+	const struct ipu6_isys_pixelformat *pfmt;
+	struct ipu6_isys *isys;
+	struct ipu6_isys_stream *stream;
+	unsigned int streaming;
+	struct video_stream_watermark watermark;
+	u32 source_stream;
+	u8 vc;
+	u8 dt;
+};
+
+#define ipu6_isys_queue_to_video(__aq) \
+	container_of(__aq, struct ipu6_isys_video, aq)
+
+extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts[];
+extern const struct ipu6_isys_pixelformat ipu6_isys_pfmts_packed[];
+
+int ipu6_isys_vidioc_querycap(struct file *file, void *fh,
+			      struct v4l2_capability *cap);
+
+int ipu6_isys_vidioc_enum_fmt(struct file *file, void *fh,
+			      struct v4l2_fmtdesc *f);
+int ipu6_isys_video_prepare_stream(struct ipu6_isys_video *av,
+				   struct media_entity *source_entity,
+				   int nr_queues);
+int ipu6_isys_video_set_streaming(struct ipu6_isys_video *av, int state,
+				  struct ipu6_isys_buffer_list *bl);
+int ipu6_isys_fw_open(struct ipu6_isys *isys);
+void ipu6_isys_fw_close(struct ipu6_isys *isys);
+int ipu6_isys_setup_video(struct ipu6_isys_video *av,
+			  struct media_entity **source_entity, int *nr_queues);
+int ipu6_isys_video_init(struct ipu6_isys_video *av);
+void ipu6_isys_video_cleanup(struct ipu6_isys_video *av);
+void ipu6_isys_put_stream(struct ipu6_isys_stream *stream);
+struct ipu6_isys_stream *
+ipu6_isys_query_stream_by_handle(struct ipu6_isys *isys, u8 stream_handle);
+struct ipu6_isys_stream *
+ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc);
+
+void ipu6_isys_configure_stream_watermark(struct ipu6_isys_video *av,
+					  bool state);
+void ipu6_isys_update_stream_watermark(struct ipu6_isys_video *av, bool state);
+
+#endif /* IPU6_ISYS_VIDEO_H */
-- 
2.43.0

From abf929b4fbd35689080a8629c6bfebe0de699fe5 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:26 +0800
Subject: [PATCH 12/31] media: add Kconfig and Makefile for IPU6

Add IPU6 support in Kconfig and Makefile, with this patch you can
build the Intel IPU6 and input system modules by select the
CONFIG_VIDEO_INTEL_IPU6 in config.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Signed-off-by: Andreas Helbech Kleist <andreaskleist@gmail.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-13-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/pci/intel/Kconfig       |  1 +
 drivers/media/pci/intel/Makefile      |  1 +
 drivers/media/pci/intel/ipu6/Kconfig  | 17 +++++++++++++++++
 drivers/media/pci/intel/ipu6/Makefile | 23 +++++++++++++++++++++++
 4 files changed, 42 insertions(+)
 create mode 100644 drivers/media/pci/intel/ipu6/Kconfig
 create mode 100644 drivers/media/pci/intel/ipu6/Makefile

diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig
index ee4684159d3d..04cb3d253486 100644
--- a/drivers/media/pci/intel/Kconfig
+++ b/drivers/media/pci/intel/Kconfig
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
 source "drivers/media/pci/intel/ipu3/Kconfig"
+source "drivers/media/pci/intel/ipu6/Kconfig"
 source "drivers/media/pci/intel/ivsc/Kconfig"
 
 config IPU_BRIDGE
diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile
index f199a97e1d78..3a2cc6567159 100644
--- a/drivers/media/pci/intel/Makefile
+++ b/drivers/media/pci/intel/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o
 obj-y	+= ipu3/
 obj-y	+= ivsc/
+obj-$(CONFIG_VIDEO_INTEL_IPU6)	+= ipu6/
diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig
new file mode 100644
index 000000000000..5cb4f3c2d59f
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/Kconfig
@@ -0,0 +1,17 @@
+config VIDEO_INTEL_IPU6
+	tristate "Intel IPU6 driver"
+	depends on ACPI || COMPILE_TEST
+	depends on MEDIA_SUPPORT
+	depends on MEDIA_PCI_SUPPORT
+	depends on X86 && X86_64
+	select IOMMU_IOVA
+	select VIDEO_V4L2_SUBDEV_API
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_FWNODE
+	select IPU_BRIDGE
+	help
+	  This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs
+	  and used for capturing images and video from camera sensors.
+
+	  To compile this driver, say Y here! It contains 2 modules -
+	  intel_ipu6 and intel_ipu6_isys.
diff --git a/drivers/media/pci/intel/ipu6/Makefile b/drivers/media/pci/intel/ipu6/Makefile
new file mode 100644
index 000000000000..a821b0a1567f
--- /dev/null
+++ b/drivers/media/pci/intel/ipu6/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+intel-ipu6-y			:= ipu6.o \
+				ipu6-bus.o \
+				ipu6-dma.o \
+				ipu6-mmu.o \
+				ipu6-buttress.o \
+				ipu6-cpd.o \
+				ipu6-fw-com.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU6)	+= intel-ipu6.o
+
+intel-ipu6-isys-y		:= ipu6-isys.o \
+				ipu6-isys-csi2.o \
+				ipu6-fw-isys.o \
+				ipu6-isys-video.o \
+				ipu6-isys-queue.o \
+				ipu6-isys-subdev.o \
+				ipu6-isys-mcd-phy.o \
+				ipu6-isys-jsl-phy.o \
+				ipu6-isys-dwc-phy.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU6)	+= intel-ipu6-isys.o
-- 
2.43.0

From 59872e5aa96479948da4b1d04a10101d8cb86da2 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:27 +0800
Subject: [PATCH 13/31] MAINTAINERS: add maintainers for Intel IPU6 input
 system driver

Update MAINTAINERS file for Intel IPU6 input system driver.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-14-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 MAINTAINERS | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index a7c4cf8201e0..bedffa6941ab 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10733,6 +10733,16 @@ F:	Documentation/admin-guide/media/ipu3_rcb.svg
 F:	Documentation/userspace-api/media/v4l/metafmt-intel-ipu3.rst
 F:	drivers/staging/media/ipu3/
 
+INTEL IPU6 INPUT SYSTEM DRIVER
+M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+M:	Bingbu Cao <bingbu.cao@intel.com>
+R:	Tianshu Qiu <tian.shu.qiu@intel.com>
+L:	linux-media@vger.kernel.org
+S:	Maintained
+T:	git git://linuxtv.org/media_tree.git
+F:	Documentation/admin-guide/media/ipu6-isys.rst
+F:	drivers/media/pci/intel/ipu6/
+
 INTEL ISHTP ECLITE DRIVER
 M:	Sumesh K Naduvalath <sumesh.k.naduvalath@intel.com>
 L:	platform-driver-x86@vger.kernel.org
-- 
2.43.0

From 7acc1618d2f9cbb0d6b07ce9a1eace25663b9fb2 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:28 +0800
Subject: [PATCH 14/31] Documentation: add Intel IPU6 ISYS driver admin-guide
 doc

This document mainly describe the functionality of IPU6 and
IPU6 isys driver, and gives an example that how user can do
imaging capture with tools.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-15-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 Documentation/admin-guide/media/ipu6-isys.rst | 158 ++++++++++++++++
 .../admin-guide/media/ipu6_isys_graph.svg     | 174 ++++++++++++++++++
 .../admin-guide/media/v4l-drivers.rst         |   1 +
 3 files changed, 333 insertions(+)
 create mode 100644 Documentation/admin-guide/media/ipu6-isys.rst
 create mode 100644 Documentation/admin-guide/media/ipu6_isys_graph.svg

diff --git a/Documentation/admin-guide/media/ipu6-isys.rst b/Documentation/admin-guide/media/ipu6-isys.rst
new file mode 100644
index 000000000000..5e78ab88c649
--- /dev/null
+++ b/Documentation/admin-guide/media/ipu6-isys.rst
@@ -0,0 +1,158 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: <isonum.txt>
+
+========================================================
+Intel Image Processing Unit 6 (IPU6) Input System driver
+========================================================
+
+Copyright |copy| 2023 Intel Corporation
+
+Introduction
+============
+
+This file documents the Intel IPU6 (6th generation Image Processing Unit)
+Input System (MIPI CSI2 receiver) drivers located under
+drivers/media/pci/intel/ipu6.
+
+The Intel IPU6 can be found in certain Intel Chipsets but not in all SKUs:
+
+* TigerLake
+* JasperLake
+* AlderLake
+* RaptorLake
+* MeteorLake
+
+Intel IPU6 is made up of two components - Input System (ISYS) and Processing
+System (PSYS).
+
+The Input System mainly works as MIPI CSI2 receiver which receives and
+processes the imaging data from the sensors and outputs the frames to memory.
+
+There are 2 driver modules - intel_ipu6 and intel_ipu6_isys. intel_ipu6 is an
+IPU6 common driver which does PCI configuration, firmware loading and parsing,
+firmware authentication, DMA mapping and IPU-MMU (internal Memory mapping Unit)
+configuration. intel_ipu6_isys implements V4L2, Media Controller and V4L2
+sub-device interfaces. The IPU6 ISYS driver supports camera sensors connected
+to the IPU6 ISYS through V4L2 sub-device sensor drivers.
+
+.. Note:: See Documentation/driver-api/media/drivers/ipu6.rst for more
+	  information about the IPU6 hardware.
+
+
+Input system driver
+===================
+
+The input System driver mainly configures CSI2 DPHY, constructs the firmware
+stream configuration, sends commands to firmware, gets response from hardware
+and firmware and then returns buffers to user.
+The ISYS is represented as several V4L2 sub-devices - 'Intel IPU6 CSI2 $port',
+which provide V4L2 subdev interfaces to the user space, there are also several
+video nodes for each CSI-2 stream capture - 'Intel IPU6 ISYS capture $num' which
+provide interface to user to set formats, queue buffers and streaming.
+
+.. kernel-figure::  ipu6_isys_graph.svg
+   :alt: ipu6 isys media graph with multiple streams support
+
+   ipu6 isys media graph with multiple streams support
+
+Capturing frames by IPU6 ISYS
+-----------------------------
+
+IPU6 ISYS is used to capture frames from the camera sensors connected to the
+CSI2 ports. The supported input formats of ISYS are listed in table below:
+
+.. tabularcolumns:: |p{0.8cm}|p{4.0cm}|p{4.0cm}|
+
+.. flat-table::
+    :header-rows: 1
+
+    * - IPU6 ISYS supported input formats
+
+    * - RGB565, RGB888
+
+    * - UYVY8, YUYV8
+
+    * - RAW8, RAW10, RAW12
+
+.. _ipu6_isys_capture_examples:
+
+Examples
+~~~~~~~~
+Here is an example of IPU6 ISYS raw capture on Dell XPS 9315 laptop. On this
+machine, ov01a10 sensor is connected to IPU ISYS CSI2 port 2, which can
+generate images at sBGGR10 with resolution 1280x800.
+
+Using the media controller APIs, we can configure ov01a10 sensor by
+media-ctl [#f1]_ and yavta [#f2]_ to transmit frames to IPU6 ISYS.
+
+.. code-block:: none
+
+    # Example 1 capture frame from ov01a10 camera sensor
+    # This example assumes /dev/media0 as the IPU ISYS media device
+    export MDEV=/dev/media0
+
+    # Establish the link for the media devices using media-ctl
+    media-ctl -d $MDEV -l "\"ov01a10 3-0036\":0 -> \"Intel IPU6 CSI2 2\":0[1]"
+
+    # Set the format for the media devices
+    media-ctl -d $MDEV -V "ov01a10:0 [fmt:SBGGR10/1280x800]"
+    media-ctl -d $MDEV -V "Intel IPU6 CSI2 2:0 [fmt:SBGGR10/1280x800]"
+    media-ctl -d $MDEV -V "Intel IPU6 CSI2 2:1 [fmt:SBGGR10/1280x800]"
+
+Once the media pipeline is configured, desired sensor specific settings
+(such as exposure and gain settings) can be set, using the yavta tool.
+
+e.g
+
+.. code-block:: none
+
+    # and that ov01a10 sensor is connected to i2c bus 3 with address 0x36
+    export SDEV=$(media-ctl -d $MDEV -e "ov01a10 3-0036")
+
+    yavta -w 0x009e0903 400 $SDEV
+    yavta -w 0x009e0913 1000 $SDEV
+    yavta -w 0x009e0911 2000 $SDEV
+
+Once the desired sensor settings are set, frame captures can be done as below.
+
+e.g
+
+.. code-block:: none
+
+    yavta --data-prefix -u -c10 -n5 -I -s 1280x800 --file=/tmp/frame-#.bin \
+          -f SBGGR10 $(media-ctl -d $MDEV -e "Intel IPU6 ISYS Capture 0")
+
+With the above command, 10 frames are captured at 1280x800 resolution with
+sBGGR10 format. The captured frames are available as /tmp/frame-#.bin files.
+
+Here is another example of IPU6 ISYS RAW and metadata capture from camera
+sensor ov2740 on Lenovo X1 Yoga laptop.
+
+.. code-block:: none
+
+    media-ctl -l "\"ov2740 14-0036\":0 -> \"Intel IPU6 CSI2 1\":0[1]"
+    media-ctl -l "\"Intel IPU6 CSI2 1\":1 -> \"Intel IPU6 ISYS Capture 0\":0[5]"
+    media-ctl -l "\"Intel IPU6 CSI2 1\":2 -> \"Intel IPU6 ISYS Capture 1\":0[5]"
+
+    # set routing
+    media-ctl -v -R "\"Intel IPU6 CSI2 1\" [0/0->1/0[1],0/1->2/1[1]]"
+
+    media-ctl -v "\"Intel IPU6 CSI2 1\":0/0 [fmt:SGRBG10/1932x1092]"
+    media-ctl -v "\"Intel IPU6 CSI2 1\":0/1 [fmt:GENERIC_8/97x1]"
+    media-ctl -v "\"Intel IPU6 CSI2 1\":1/0 [fmt:SGRBG10/1932x1092]"
+    media-ctl -v "\"Intel IPU6 CSI2 1\":2/1 [fmt:GENERIC_8/97x1]"
+
+    CAPTURE_DEV=$(media-ctl -e "Intel IPU6 ISYS Capture 0")
+    ./yavta --data-prefix -c100 -n5 -I -s1932x1092 --file=/tmp/frame-#.bin \
+    -f SGRBG10 ${CAPTURE_DEV}
+
+    CAPTURE_META=$(media-ctl -e "Intel IPU6 ISYS Capture 1")
+    ./yavta --data-prefix -c100 -n5 -I -s97x1 -B meta-capture \
+    --file=/tmp/meta-#.bin -f GENERIC_8 ${CAPTURE_META}
+
+References
+==========
+
+.. [#f1] https://git.ideasonboard.org/?p=media-ctl.git;a=summary
+.. [#f2] https://git.ideasonboard.org/yavta.git
diff --git a/Documentation/admin-guide/media/ipu6_isys_graph.svg b/Documentation/admin-guide/media/ipu6_isys_graph.svg
new file mode 100644
index 000000000000..707747c75280
--- /dev/null
+++ b/Documentation/admin-guide/media/ipu6_isys_graph.svg
@@ -0,0 +1,174 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
+ "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by graphviz version 2.38.0 (20140413.2041)
+ -->
+<!-- Title: board Pages: 1 -->
+<svg width="559pt" height="810pt"
+ viewBox="0.00 0.00 559.00 809.50" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 805.5)">
+<title>board</title>
+<polygon fill="white" stroke="none" points="-4,4 -4,-805.5 555,-805.5 555,4 -4,4"/>
+<!-- n00000001 -->
+<g id="node1" class="node"><title>n00000001</title>
+<polygon fill="#66cd00" stroke="black" points="551,-192.5 387,-192.5 387,-154.5 551,-154.5 551,-192.5"/>
+<text text-anchor="middle" x="469" y="-177.3" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 0</text>
+<text text-anchor="middle" x="469" y="-162.3" font-family="Times,serif" font-size="14.00">/dev/video0</text>
+</g>
+<!-- n00000002 -->
+<g id="node2" class="node"><title>n00000002</title>
+<polygon fill="#66cd00" stroke="black" points="551,-395.5 387,-395.5 387,-357.5 551,-357.5 551,-395.5"/>
+<text text-anchor="middle" x="469" y="-380.3" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 1</text>
+<text text-anchor="middle" x="469" y="-365.3" font-family="Times,serif" font-size="14.00">/dev/video1</text>
+</g>
+<!-- n00000003 -->
+<g id="node3" class="node"><title>n00000003</title>
+<polygon fill="#66cd00" stroke="black" points="551,-598.5 387,-598.5 387,-560.5 551,-560.5 551,-598.5"/>
+<text text-anchor="middle" x="469" y="-583.3" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 2</text>
+<text text-anchor="middle" x="469" y="-568.3" font-family="Times,serif" font-size="14.00">/dev/video2</text>
+</g>
+<!-- n00000004 -->
+<g id="node4" class="node"><title>n00000004</title>
+<polygon fill="#66cd00" stroke="black" points="551,-801.5 387,-801.5 387,-763.5 551,-763.5 551,-801.5"/>
+<text text-anchor="middle" x="469" y="-786.3" font-family="Times,serif" font-size="14.00">Intel IPU6 ISYS Capture 3</text>
+<text text-anchor="middle" x="469" y="-771.3" font-family="Times,serif" font-size="14.00">/dev/video3</text>
+</g>
+<!-- n0000007d -->
+<g id="node5" class="node"><title>n0000007d</title>
+<path fill="#ffb90f" stroke="black" d="M201,-0.5C201,-0.5 339,-0.5 339,-0.5 345,-0.5 351,-6.5 351,-12.5 351,-12.5 351,-172.5 351,-172.5 351,-178.5 345,-184.5 339,-184.5 339,-184.5 201,-184.5 201,-184.5 195,-184.5 189,-178.5 189,-172.5 189,-172.5 189,-12.5 189,-12.5 189,-6.5 195,-0.5 201,-0.5"/>
+<text text-anchor="middle" x="200.5" y="-88.8" font-family="Times,serif" font-size="14.00">0</text>
+<polyline fill="none" stroke="black" points="212,-0.5 212,-184.5 "/>
+<text text-anchor="middle" x="270" y="-96.3" font-family="Times,serif" font-size="14.00">Intel IPU6 CSI2 0</text>
+<text text-anchor="middle" x="270" y="-81.3" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev0</text>
+<polyline fill="none" stroke="black" points="328,-0.5 328,-184.5 "/>
+<text text-anchor="middle" x="339.5" y="-169.3" font-family="Times,serif" font-size="14.00">1</text>
+<polyline fill="none" stroke="black" points="328,-161.5 351,-161.5 "/>
+<text text-anchor="middle" x="339.5" y="-146.3" font-family="Times,serif" font-size="14.00">2</text>
+<polyline fill="none" stroke="black" points="328,-138.5 351,-138.5 "/>
+<text text-anchor="middle" x="339.5" y="-123.3" font-family="Times,serif" font-size="14.00">3</text>
+<polyline fill="none" stroke="black" points="328,-115.5 351,-115.5 "/>
+<text text-anchor="middle" x="339.5" y="-100.3" font-family="Times,serif" font-size="14.00">4</text>
+<polyline fill="none" stroke="black" points="328,-92.5 351,-92.5 "/>
+<text text-anchor="middle" x="339.5" y="-77.3" font-family="Times,serif" font-size="14.00">5</text>
+<polyline fill="none" stroke="black" points="328,-69.5 351,-69.5 "/>
+<text text-anchor="middle" x="339.5" y="-54.3" font-family="Times,serif" font-size="14.00">6</text>
+<polyline fill="none" stroke="black" points="328,-46.5 351,-46.5 "/>
+<text text-anchor="middle" x="339.5" y="-31.3" font-family="Times,serif" font-size="14.00">7</text>
+<polyline fill="none" stroke="black" points="328,-23.5 351,-23.5 "/>
+<text text-anchor="middle" x="339.5" y="-8.3" font-family="Times,serif" font-size="14.00">8</text>
+</g>
+<!-- n0000007d&#45;&gt;n00000001 -->
+<g id="edge1" class="edge"><title>n0000007d:port1&#45;&gt;n00000001</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M351,-173.5C359.322,-173.5 367.976,-173.5 376.644,-173.5"/>
+<polygon fill="black" stroke="black" points="376.807,-177 386.807,-173.5 376.807,-170 376.807,-177"/>
+</g>
+<!-- n00000087 -->
+<g id="node6" class="node"><title>n00000087</title>
+<path fill="#ffb90f" stroke="black" d="M201,-203.5C201,-203.5 339,-203.5 339,-203.5 345,-203.5 351,-209.5 351,-215.5 351,-215.5 351,-375.5 351,-375.5 351,-381.5 345,-387.5 339,-387.5 339,-387.5 201,-387.5 201,-387.5 195,-387.5 189,-381.5 189,-375.5 189,-375.5 189,-215.5 189,-215.5 189,-209.5 195,-203.5 201,-203.5"/>
+<text text-anchor="middle" x="200.5" y="-291.8" font-family="Times,serif" font-size="14.00">0</text>
+<polyline fill="none" stroke="black" points="212,-203.5 212,-387.5 "/>
+<text text-anchor="middle" x="270" y="-299.3" font-family="Times,serif" font-size="14.00">Intel IPU6 CSI2 1</text>
+<text text-anchor="middle" x="270" y="-284.3" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev1</text>
+<polyline fill="none" stroke="black" points="328,-203.5 328,-387.5 "/>
+<text text-anchor="middle" x="339.5" y="-372.3" font-family="Times,serif" font-size="14.00">1</text>
+<polyline fill="none" stroke="black" points="328,-364.5 351,-364.5 "/>
+<text text-anchor="middle" x="339.5" y="-349.3" font-family="Times,serif" font-size="14.00">2</text>
+<polyline fill="none" stroke="black" points="328,-341.5 351,-341.5 "/>
+<text text-anchor="middle" x="339.5" y="-326.3" font-family="Times,serif" font-size="14.00">3</text>
+<polyline fill="none" stroke="black" points="328,-318.5 351,-318.5 "/>
+<text text-anchor="middle" x="339.5" y="-303.3" font-family="Times,serif" font-size="14.00">4</text>
+<polyline fill="none" stroke="black" points="328,-295.5 351,-295.5 "/>
+<text text-anchor="middle" x="339.5" y="-280.3" font-family="Times,serif" font-size="14.00">5</text>
+<polyline fill="none" stroke="black" points="328,-272.5 351,-272.5 "/>
+<text text-anchor="middle" x="339.5" y="-257.3" font-family="Times,serif" font-size="14.00">6</text>
+<polyline fill="none" stroke="black" points="328,-249.5 351,-249.5 "/>
+<text text-anchor="middle" x="339.5" y="-234.3" font-family="Times,serif" font-size="14.00">7</text>
+<polyline fill="none" stroke="black" points="328,-226.5 351,-226.5 "/>
+<text text-anchor="middle" x="339.5" y="-211.3" font-family="Times,serif" font-size="14.00">8</text>
+</g>
+<!-- n00000087&#45;&gt;n00000002 -->
+<g id="edge2" class="edge"><title>n00000087:port1&#45;&gt;n00000002</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M351,-376.5C359.322,-376.5 367.976,-376.5 376.644,-376.5"/>
+<polygon fill="black" stroke="black" points="376.807,-380 386.807,-376.5 376.807,-373 376.807,-380"/>
+</g>
+<!-- n00000091 -->
+<g id="node7" class="node"><title>n00000091</title>
+<path fill="#ffb90f" stroke="black" d="M201,-406.5C201,-406.5 339,-406.5 339,-406.5 345,-406.5 351,-412.5 351,-418.5 351,-418.5 351,-578.5 351,-578.5 351,-584.5 345,-590.5 339,-590.5 339,-590.5 201,-590.5 201,-590.5 195,-590.5 189,-584.5 189,-578.5 189,-578.5 189,-418.5 189,-418.5 189,-412.5 195,-406.5 201,-406.5"/>
+<text text-anchor="middle" x="200.5" y="-494.8" font-family="Times,serif" font-size="14.00">0</text>
+<polyline fill="none" stroke="black" points="212,-406.5 212,-590.5 "/>
+<text text-anchor="middle" x="270" y="-502.3" font-family="Times,serif" font-size="14.00">Intel IPU6 CSI2 2</text>
+<text text-anchor="middle" x="270" y="-487.3" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev2</text>
+<polyline fill="none" stroke="black" points="328,-406.5 328,-590.5 "/>
+<text text-anchor="middle" x="339.5" y="-575.3" font-family="Times,serif" font-size="14.00">1</text>
+<polyline fill="none" stroke="black" points="328,-567.5 351,-567.5 "/>
+<text text-anchor="middle" x="339.5" y="-552.3" font-family="Times,serif" font-size="14.00">2</text>
+<polyline fill="none" stroke="black" points="328,-544.5 351,-544.5 "/>
+<text text-anchor="middle" x="339.5" y="-529.3" font-family="Times,serif" font-size="14.00">3</text>
+<polyline fill="none" stroke="black" points="328,-521.5 351,-521.5 "/>
+<text text-anchor="middle" x="339.5" y="-506.3" font-family="Times,serif" font-size="14.00">4</text>
+<polyline fill="none" stroke="black" points="328,-498.5 351,-498.5 "/>
+<text text-anchor="middle" x="339.5" y="-483.3" font-family="Times,serif" font-size="14.00">5</text>
+<polyline fill="none" stroke="black" points="328,-475.5 351,-475.5 "/>
+<text text-anchor="middle" x="339.5" y="-460.3" font-family="Times,serif" font-size="14.00">6</text>
+<polyline fill="none" stroke="black" points="328,-452.5 351,-452.5 "/>
+<text text-anchor="middle" x="339.5" y="-437.3" font-family="Times,serif" font-size="14.00">7</text>
+<polyline fill="none" stroke="black" points="328,-429.5 351,-429.5 "/>
+<text text-anchor="middle" x="339.5" y="-414.3" font-family="Times,serif" font-size="14.00">8</text>
+</g>
+<!-- n00000091&#45;&gt;n00000003 -->
+<g id="edge3" class="edge"><title>n00000091:port1&#45;&gt;n00000003</title>
+<path fill="none" stroke="black" d="M351,-579.5C359.322,-579.5 367.976,-579.5 376.644,-579.5"/>
+<polygon fill="black" stroke="black" points="376.807,-583 386.807,-579.5 376.807,-576 376.807,-583"/>
+</g>
+<!-- n0000009b -->
+<g id="node8" class="node"><title>n0000009b</title>
+<path fill="#ffb90f" stroke="black" d="M201,-609.5C201,-609.5 339,-609.5 339,-609.5 345,-609.5 351,-615.5 351,-621.5 351,-621.5 351,-781.5 351,-781.5 351,-787.5 345,-793.5 339,-793.5 339,-793.5 201,-793.5 201,-793.5 195,-793.5 189,-787.5 189,-781.5 189,-781.5 189,-621.5 189,-621.5 189,-615.5 195,-609.5 201,-609.5"/>
+<text text-anchor="middle" x="200.5" y="-697.8" font-family="Times,serif" font-size="14.00">0</text>
+<polyline fill="none" stroke="black" points="212,-609.5 212,-793.5 "/>
+<text text-anchor="middle" x="270" y="-705.3" font-family="Times,serif" font-size="14.00">Intel IPU6 CSI2 3</text>
+<text text-anchor="middle" x="270" y="-690.3" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev3</text>
+<polyline fill="none" stroke="black" points="328,-609.5 328,-793.5 "/>
+<text text-anchor="middle" x="339.5" y="-778.3" font-family="Times,serif" font-size="14.00">1</text>
+<polyline fill="none" stroke="black" points="328,-770.5 351,-770.5 "/>
+<text text-anchor="middle" x="339.5" y="-755.3" font-family="Times,serif" font-size="14.00">2</text>
+<polyline fill="none" stroke="black" points="328,-747.5 351,-747.5 "/>
+<text text-anchor="middle" x="339.5" y="-732.3" font-family="Times,serif" font-size="14.00">3</text>
+<polyline fill="none" stroke="black" points="328,-724.5 351,-724.5 "/>
+<text text-anchor="middle" x="339.5" y="-709.3" font-family="Times,serif" font-size="14.00">4</text>
+<polyline fill="none" stroke="black" points="328,-701.5 351,-701.5 "/>
+<text text-anchor="middle" x="339.5" y="-686.3" font-family="Times,serif" font-size="14.00">5</text>
+<polyline fill="none" stroke="black" points="328,-678.5 351,-678.5 "/>
+<text text-anchor="middle" x="339.5" y="-663.3" font-family="Times,serif" font-size="14.00">6</text>
+<polyline fill="none" stroke="black" points="328,-655.5 351,-655.5 "/>
+<text text-anchor="middle" x="339.5" y="-640.3" font-family="Times,serif" font-size="14.00">7</text>
+<polyline fill="none" stroke="black" points="328,-632.5 351,-632.5 "/>
+<text text-anchor="middle" x="339.5" y="-617.3" font-family="Times,serif" font-size="14.00">8</text>
+</g>
+<!-- n0000009b&#45;&gt;n00000004 -->
+<g id="edge4" class="edge"><title>n0000009b:port1&#45;&gt;n00000004</title>
+<path fill="none" stroke="black" stroke-dasharray="5,2" d="M351,-782.5C359.322,-782.5 367.976,-782.5 376.644,-782.5"/>
+<polygon fill="black" stroke="black" points="376.807,-786 386.807,-782.5 376.807,-779 376.807,-786"/>
+</g>
+<!-- n00000865 -->
+<g id="node9" class="node"><title>n00000865</title>
+<path fill="cornflowerblue" stroke="black" d="M12,-479.5C12,-479.5 141,-479.5 141,-479.5 147,-479.5 153,-485.5 153,-491.5 153,-491.5 153,-505.5 153,-505.5 153,-511.5 147,-517.5 141,-517.5 141,-517.5 12,-517.5 12,-517.5 6,-517.5 0,-511.5 0,-505.5 0,-505.5 0,-491.5 0,-491.5 0,-485.5 6,-479.5 12,-479.5"/>
+<text text-anchor="middle" x="10" y="-494.8" font-family="Times,serif" font-size="14.00"> </text>
+<polyline fill="none" stroke="black" points="20,-479.5 20,-517.5 "/>
+<text text-anchor="middle" x="75" y="-502.3" font-family="Times,serif" font-size="14.00">ov01a10 3&#45;0036</text>
+<text text-anchor="middle" x="75" y="-487.3" font-family="Times,serif" font-size="14.00">/dev/v4l&#45;subdev4</text>
+<polyline fill="none" stroke="black" points="130,-479.5 130,-517.5 "/>
+<text text-anchor="middle" x="141.5" y="-494.8" font-family="Times,serif" font-size="14.00">0</text>
+</g>
+<!-- n00000865&#45;&gt;n00000091 -->
+<g id="edge5" class="edge"><title>n00000865:port0&#45;&gt;n00000091:port0</title>
+<path fill="none" stroke="black" d="M153,-498.5C165,-498.5 170.25,-498.5 178.875,-498.5"/>
+<polygon fill="black" stroke="black" points="179,-502 189,-498.5 179,-495 179,-502"/>
+</g>
+<!-- n00000866 -->
+<!-- n00000866&#45;&gt;n0000007d -->
+<!-- n00000867 -->
+<!-- n00000867&#45;&gt;n00000087 -->
+<!-- n00000868 -->
+<!-- n00000868&#45;&gt;n0000009b -->
+</g>
+</svg>
diff --git a/Documentation/admin-guide/media/v4l-drivers.rst b/Documentation/admin-guide/media/v4l-drivers.rst
index 61283d67ceef..50bdef2d1762 100644
--- a/Documentation/admin-guide/media/v4l-drivers.rst
+++ b/Documentation/admin-guide/media/v4l-drivers.rst
@@ -16,6 +16,7 @@ Video4Linux (V4L) driver-specific documentation
 	imx
 	imx7
 	ipu3
+	ipu6-isys
 	ivtv
 	mgb4
 	omap3isp
-- 
2.43.0

From 878747fcd0bb4f27933d3d32525472b8a0425724 Mon Sep 17 00:00:00 2001
From: Bingbu Cao <bingbu.cao@intel.com>
Date: Thu, 11 Jan 2024 14:55:29 +0800
Subject: [PATCH 15/31] Documentation: add documentation of Intel IPU6 driver
 and hardware overview

Add a documentation for an overview of IPU6 hardware and describe the main
the components of IPU6 driver.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Link: https://lore.kernel.org/r/20240111065531.2418836-16-bingbu.cao@intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 .../driver-api/media/drivers/index.rst        |   1 +
 .../driver-api/media/drivers/ipu6.rst         | 205 ++++++++++++++++++
 2 files changed, 206 insertions(+)
 create mode 100644 Documentation/driver-api/media/drivers/ipu6.rst

diff --git a/Documentation/driver-api/media/drivers/index.rst b/Documentation/driver-api/media/drivers/index.rst
index c4123a16b5f9..7f6f3dcd5c90 100644
--- a/Documentation/driver-api/media/drivers/index.rst
+++ b/Documentation/driver-api/media/drivers/index.rst
@@ -26,6 +26,7 @@ Video4Linux (V4L) drivers
 	vimc-devel
 	zoran
 	ccs/ccs
+	ipu6
 
 
 Digital TV drivers
diff --git a/Documentation/driver-api/media/drivers/ipu6.rst b/Documentation/driver-api/media/drivers/ipu6.rst
new file mode 100644
index 000000000000..b6357155c13b
--- /dev/null
+++ b/Documentation/driver-api/media/drivers/ipu6.rst
@@ -0,0 +1,205 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+Intel IPU6 Driver
+==================
+
+Author: Bingbu Cao <bingbu.cao@intel.com>
+
+Overview
+=========
+
+Intel IPU6 is the sixth generation of Intel Image Processing Unit used in some
+Intel Chipsets such as Tiger Lake, Jasper Lake, Alder Lake, Raptor Lake and
+Meteor Lake. IPU6 consists of two major systems: Input System (IS) and
+Processing System (PS). IPU6 are visible on the PCI bus as a single device,
+it can be found by ``lspci``:
+
+``0000:00:05.0 Multimedia controller: Intel Corporation Device xxxx (rev xx)``
+
+IPU6 has a 16 MB BAR in PCI configuration Space for MMIO registers which is
+visible for driver.
+
+Buttress
+=========
+
+The IPU6 is connecting to the system fabric with ``Buttress`` which is enabling
+host driver to control the IPU6, it also allows IPU6 access the system memory to
+store and load frame pixel streams and any other metadata.
+
+``Buttress`` mainly manages several system functionalities - power management,
+interrupt handling, firmware authentication and global timer sync.
+
+IS and PS Power flow
+---------------------------
+
+IPU6 driver initialize the IS and PS power up or down request by setting the
+Buttress frequency control register for IS and PS -
+``IPU6_BUTTRESS_REG_IS_FREQ_CTL`` and ``IPU6_BUTTRESS_REG_PS_FREQ_CTL`` in
+function:
+
+.. c:function:: int ipu6_buttress_power(..., bool on)
+
+Buttress forwards the request to Punit, after Punit execute the power up flow,
+buttress indicates driver that IS or PS is powered up by updating the power
+status registers.
+
+.. Note:: IS power up needs take place prior to PS power up, IS power down needs
+	  take place after PS power down due to hardware limitation.
+
+
+Interrupt
+------------
+
+IPU6 interrupt can be generated as MSI or INTA, interrupt will be triggered
+when IS, PS, Buttress event or error happen, driver can get the interrupt
+cause by reading the interrupt status register ``BUTTRESS_REG_ISR_STATUS``,
+driver firstly clear the irq status and then call specific IS or PS irq handler.
+
+.. c:function:: irqreturn_t ipu6_buttress_isr(int irq, ...)
+
+Security and firmware authentication
+-------------------------------------
+To address the IPU6 firmware security concerns, the IPU6 firmware needs to
+undergo an authentication process before it is allowed to executed on the IPU6
+internal processors. Driver will work with Converged Security Engine (CSE) to
+complete authentication process. CSE is responsible of authenticating the
+IPU6 firmware, the authenticated firmware binary is copied into an isolated
+memory region. Firmware authentication process is implemented by CSE following
+an IPC handshake with driver. There are some Buttress registers used by CSE and
+driver to communicate with each other as IPC messages.
+
+.. c:function:: int ipu6_buttress_authenticate(...)
+
+Global timer sync
+------------------
+IPU driver initiates a Hammock Harbor synchronization flow each time it starts
+camera operation. IPU will synchronizes an internal counter in the Buttress
+with a copy of SoC time, this counter keeps the updated time until camera
+operation is stopped. Driver can use this time counter to calibrate the
+timestamp based on the timestamp in response event from firmware.
+
+.. c:function:: int ipu6_buttress_start_tsc_sync(...)
+
+
+DMA and MMU
+============
+
+IPU6 has its own scalar processor where the firmware run at, it has
+an internal 32-bits virtual address space. IPU6 has MMU address translation
+hardware to allow that scalar process access the internal memory and external
+system memory through IPU6 virtual address. The address translation is
+based on two levels of page lookup tables stored in system memory which are
+maintained by IPU6 driver. IPU6 driver sets the level-1 page table base address
+to MMU register and allow MMU to lookup the page table.
+
+IPU6 driver exports its own DMA operations. Driver will update the page table
+entries for each DMA operation and invalidate the MMU TLB after each unmap and
+free.
+
+.. code-block:: none
+
+    const struct dma_map_ops ipu6_dma_ops = {
+	   .alloc = ipu6_dma_alloc,
+	   .free = ipu6_dma_free,
+	   .mmap = ipu6_dma_mmap,
+	   .map_sg = ipu6_dma_map_sg,
+	   .unmap_sg = ipu6_dma_unmap_sg,
+	   ...
+    };
+
+.. Note:: IPU6 MMU works behind IOMMU, so for each IPU6 DMA ops, driver will
+	  call generic PCI DMA ops to ask IOMMU to do the additional mapping
+	  if VT-d enabled.
+
+
+Firmware file format
+=====================
+
+IPU6 release the firmware in Code Partition Directory (CPD) file format. The
+CPD firmware contains a CPD header, several CPD entries and CPD components.
+CPD component includes 3 entries - manifest, metadata and module data. Manifest
+and metadata are defined by CSE and used by CSE for authentication. Module data
+is defined by IPU6 which holds the binary data of firmware called package
+directory. IPU6 driver (``ipu6-cpd.c``) parses and validates the CPD firmware
+file and get the package directory binary data of IPU6 firmware, copy it to
+specific DMA buffer and sets its base address to Buttress ``FW_SOURCE_BASE``
+register, CSE will do authentication for this firmware binary.
+
+
+Syscom interface
+================
+
+IPU6 driver communicates with firmware via syscom ABI. Syscom is an
+inter-processor communication mechanism between IPU scalar processor and CPU.
+There are a number of resources shared between firmware and software.
+A system memory region where the message queues reside, firmware can access the
+memory region via IPU MMU. Syscom queues are FIFO fixed depth queues with
+configurable elements ``token`` (message). There is also a common IPU MMIO
+registers where the queue read and write indices reside. Software and firmware
+work as producer and consumer of tokens in queue, and update the write and read
+indices separately when sending or receiving each message.
+
+IPU6 driver must prepare and configure the number of input and output queues,
+configure the count of tokens per queue and the size of per token before
+initiate and start the communication with firmware, firmware and software must
+use same configurations. IPU6 Buttress has a number of firmware boot parameter
+registers which can be used to store the address of configuration and initiate
+the Syscom state, then driver can request firmware to start and run via setting
+the scalar processor control status register.
+
+
+Input System
+==============
+
+IPU6 input system consists of MIPI D-PHY and several CSI receiver controllers,
+it can capture image pixel data from camera sensors or other MIPI CSI output
+devices.
+
+D-PHYs and CSI-2 ports lane mapping
+-----------------------------------
+
+IPU6 integrates different D-PHY IPs on different SoCs, on Tiger Lake and Alder
+Lake, IPU6 integrates MCD10 D-PHY, IPU6SE on Jasper Lake integrates JSL D-PHY
+and IPU6EP on Meteor Lake integrates a Synopsys DWC D-PHY. There is an adaption
+layer between D-PHY and CSI receiver controller which includes port
+configuration, PHY wrapper or private test interfaces for D-PHY. There are 3
+D-PHY drivers ``ipu6-isys-mcd-phy.c``, ``ipu6-isys-jsl-phy.c`` and
+``ipu6-isys-dwc-phy.c`` program the above 3 D-PHYs in IPU6.
+
+Different IPU6 version has different D-PHY lanes mappings, On Tiger Lake, there
+are 12 data lanes and 8 clock lanes, IPU6 support maximum 8 CSI-2 ports, see
+the ppi mmapping in ``ipu6-isys-mcd-phy.c`` for more information. On Jasper Lake
+and Alder Lake, D-PHY has 8 data lanes and 4 clock lanes, IPU6 support maximum 4
+CSI-2 ports. For Meteor Lake, D-PHY has 12 data lanes and 6 clock lanes, IPU6
+support maximum 6 CSI-2 ports.
+
+.. Note:: Each adjacent CSI ports work as a pair and share the data lanes.
+	  For example, for CSI port 0 and 1, CSI port 0 support maximum 4
+	  data lanes, CSI port 1 support maximum 2 data lanes, CSI port 0
+	  with 2 data lanes can work together with CSI port 1 with 2 data lanes.
+	  If trying to use CSI port 0 with 4 lanes, CSI port 1 will not be
+	  available as the 4 data lanes are shared by CSI port 0 and 1. Same
+	  scenario is also applied for CSI port 2/3, 4/5 and 7/8.
+
+IS firmware ABIs
+----------------
+
+IPU6 firmware define a series of ABIs to software. In general, software firstly
+prepare the stream configuration ``struct ipu6_fw_isys_stream_cfg_data_abi``
+and send the configuration to firmware via sending ``STREAM_OPEN`` command.
+Stream configuration includes input pins and output pins, input pin
+``struct ipu6_fw_isys_input_pin_info_abi`` defines the resolution and data type
+of input source, output pin ``struct ipu6_fw_isys_output_pin_info_abi``
+defines the output resolution, stride and frame format, etc. Once driver get the
+interrupt from firmware that indicates stream open successfully, driver will
+send the ``STREAM_START`` and ``STREAM_CAPTURE`` command to request firmware to
+start capturing image frames. ``STREAM_CAPTURE`` command queues the buffers to
+firmware with ``struct ipu6_fw_isys_frame_buff_set``, software then wait the
+interrupt and response from firmware, ``PIN_DATA_READY`` means data ready
+on specific output pin and then software return the buffers to user.
+
+.. Note:: See :ref:`Examples<ipu6_isys_capture_examples>` about how to do
+	  capture by IPU6 IS driver.
+
+
-- 
2.43.0

From 7103b8fabe3158d203b72036abc2a964687d46b8 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 15 Jan 2024 15:57:06 +0100
Subject: [PATCH 16/31] media: intel/ipu6: Disable packed bayer v4l2-buffer
 formats on TGL

Using CSI2 packing to store 10bpp bayer data in the v4l2-buffers does not
work on Tiger Lake when testing with an ov01a1s sensor.

Disable packed bayer formats on Tiger Lake for now.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 .../media/pci/intel/ipu6/ipu6-isys-video.c    | 60 +++++++++++++------
 1 file changed, 43 insertions(+), 17 deletions(-)

diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
index 847eac26bcd6..16d15f05a6dc 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -61,6 +61,17 @@ const struct ipu6_isys_pixelformat ipu6_isys_pfmts[] = {
 	 IPU6_FW_ISYS_FRAME_FORMAT_RAW8},
 	{V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8,
 	 IPU6_FW_ISYS_FRAME_FORMAT_RAW8},
+	{V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16,
+	 IPU6_FW_ISYS_FRAME_FORMAT_UYVY},
+	{V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16,
+	 IPU6_FW_ISYS_FRAME_FORMAT_YUYV},
+	{V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RGB565},
+	{V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24,
+	 IPU6_FW_ISYS_FRAME_FORMAT_RGBA888},
+};
+
+const struct ipu6_isys_pixelformat ipu6_isys_pfmts_packed[] = {
 	{V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12,
 	 IPU6_FW_ISYS_FRAME_FORMAT_RAW12},
 	{V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12,
@@ -77,14 +88,6 @@ const struct ipu6_isys_pixelformat ipu6_isys_pfmts[] = {
 	 IPU6_FW_ISYS_FRAME_FORMAT_RAW10},
 	{V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10,
 	 IPU6_FW_ISYS_FRAME_FORMAT_RAW10},
-	{V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16,
-	 IPU6_FW_ISYS_FRAME_FORMAT_UYVY},
-	{V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16,
-	 IPU6_FW_ISYS_FRAME_FORMAT_YUYV},
-	{V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16,
-	 IPU6_FW_ISYS_FRAME_FORMAT_RGB565},
-	{V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24,
-	 IPU6_FW_ISYS_FRAME_FORMAT_RGBA888},
 };
 
 static int video_open(struct file *file)
@@ -109,14 +112,27 @@ static int video_release(struct file *file)
 	return vb2_fop_release(file);
 }
 
+static const struct ipu6_isys_pixelformat *
+ipu6_isys_get_pixelformat_by_idx(unsigned int idx)
+{
+	if (idx < ARRAY_SIZE(ipu6_isys_pfmts))
+		return &ipu6_isys_pfmts[idx];
+
+	idx -= ARRAY_SIZE(ipu6_isys_pfmts);
+
+	if (idx < ARRAY_SIZE(ipu6_isys_pfmts_packed))
+		return &ipu6_isys_pfmts_packed[idx];
+
+	return NULL;
+}
+
 static const struct ipu6_isys_pixelformat *
 ipu6_isys_get_pixelformat(u32 pixelformat)
 {
+	const struct ipu6_isys_pixelformat *pfmt;
 	unsigned int i;
 
-	for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) {
-		const struct ipu6_isys_pixelformat *pfmt = &ipu6_isys_pfmts[i];
-
+	for (i = 0; (pfmt = ipu6_isys_get_pixelformat_by_idx(i)); i++) {
 		if (pfmt->pixelformat == pixelformat)
 			return pfmt;
 	}
@@ -138,24 +154,34 @@ int ipu6_isys_vidioc_querycap(struct file *file, void *fh,
 int ipu6_isys_vidioc_enum_fmt(struct file *file, void *fh,
 			      struct v4l2_fmtdesc *f)
 {
-	unsigned int i, found = 0;
+	struct ipu6_isys_video *av = video_drvdata(file);
+	const struct ipu6_isys_pixelformat *fmt;
+	unsigned int i, nfmts, found = 0;
 
-	if (f->index >= ARRAY_SIZE(ipu6_isys_pfmts))
+	nfmts = ARRAY_SIZE(ipu6_isys_pfmts);
+	/* Disable packed formats on TGL for now, TGL has 8 CSI ports */
+	if (av->isys->pdata->ipdata->csi2.nports != 8)
+		nfmts += ARRAY_SIZE(ipu6_isys_pfmts_packed);
+
+	if (f->index >= nfmts)
 		return -EINVAL;
 
 	if (!f->mbus_code) {
+		fmt = ipu6_isys_get_pixelformat_by_idx(f->index);
 		f->flags = 0;
-		f->pixelformat = ipu6_isys_pfmts[f->index].pixelformat;
+		f->pixelformat = fmt->pixelformat;
 		return 0;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(ipu6_isys_pfmts); i++) {
-		if (f->mbus_code != ipu6_isys_pfmts[i].code)
+	for (i = 0; i < nfmts; i++) {
+		fmt = ipu6_isys_get_pixelformat_by_idx(i);
+
+		if (f->mbus_code != fmt->code)
 			continue;
 
 		if (f->index == found) {
 			f->flags = 0;
-			f->pixelformat = ipu6_isys_pfmts[i].pixelformat;
+			f->pixelformat = fmt->pixelformat;
 			return 0;
 		}
 		found++;
-- 
2.43.0

From ddd2826369e8ee4ba5b04502e1e20869590742da Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 6 Nov 2023 12:13:42 +0100
Subject: [PATCH 17/31] media: Add ov01a1s driver

Add ov01a1s driver from:
https://github.com/intel/ipu6-drivers/

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/i2c/Kconfig   |    9 +
 drivers/media/i2c/Makefile  |    1 +
 drivers/media/i2c/ov01a1s.c | 1191 +++++++++++++++++++++++++++++++++++
 3 files changed, 1201 insertions(+)
 create mode 100644 drivers/media/i2c/ov01a1s.c

diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 59ee0ca2c978..d5a516e4fce2 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -283,6 +283,15 @@ config VIDEO_OV01A10
 	  To compile this driver as a module, choose M here: the
 	  module will be called ov01a10.
 
+config VIDEO_OV01A1S
+	tristate "OmniVision OV01A1S sensor support"
+	help
+	  This is a Video4Linux2 sensor driver for the OmniVision
+	  OV01A1S camera.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ov01a1s.
+
 config VIDEO_OV02A10
 	tristate "OmniVision OV02A10 sensor support"
 	help
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index f5010f80a21f..ed5aa45e3506 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
 obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
 obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o
 obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o
+obj-$(CONFIG_VIDEO_OV01A1S) += ov01a1s.o
 obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o
 obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o
 obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o
diff --git a/drivers/media/i2c/ov01a1s.c b/drivers/media/i2c/ov01a1s.c
new file mode 100644
index 000000000000..0dcce8b492b4
--- /dev/null
+++ b/drivers/media/i2c/ov01a1s.c
@@ -0,0 +1,1191 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020-2022 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#if IS_ENABLED(CONFIG_INTEL_SKL_INT3472)
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC)
+#include "power_ctrl_logic.h"
+#endif
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+#include <linux/vsc.h>
+#endif
+
+#define OV01A1S_LINK_FREQ_400MHZ	400000000ULL
+#define OV01A1S_SCLK			40000000LL
+#define OV01A1S_MCLK			19200000
+#define OV01A1S_DATA_LANES		1
+#define OV01A1S_RGB_DEPTH		10
+
+#define OV01A1S_REG_CHIP_ID		0x300a
+#define OV01A1S_CHIP_ID			0x560141
+
+#define OV01A1S_REG_MODE_SELECT		0x0100
+#define OV01A1S_MODE_STANDBY		0x00
+#define OV01A1S_MODE_STREAMING		0x01
+
+/* vertical-timings from sensor */
+#define OV01A1S_REG_VTS			0x380e
+#define OV01A1S_VTS_DEF			0x0380
+#define OV01A1S_VTS_MIN			0x0380
+#define OV01A1S_VTS_MAX			0xffff
+
+/* Exposure controls from sensor */
+#define OV01A1S_REG_EXPOSURE		0x3501
+#define OV01A1S_EXPOSURE_MIN		4
+#define OV01A1S_EXPOSURE_MAX_MARGIN	8
+#define OV01A1S_EXPOSURE_STEP		1
+
+/* Analog gain controls from sensor */
+#define OV01A1S_REG_ANALOG_GAIN		0x3508
+#define OV01A1S_ANAL_GAIN_MIN		0x100
+#define OV01A1S_ANAL_GAIN_MAX		0xffff
+#define OV01A1S_ANAL_GAIN_STEP		1
+
+/* Digital gain controls from sensor */
+#define OV01A1S_REG_DIGILAL_GAIN_B	0x350A
+#define OV01A1S_REG_DIGITAL_GAIN_GB	0x3510
+#define OV01A1S_REG_DIGITAL_GAIN_GR	0x3513
+#define OV01A1S_REG_DIGITAL_GAIN_R	0x3516
+#define OV01A1S_DGTL_GAIN_MIN		0
+#define OV01A1S_DGTL_GAIN_MAX		0x3ffff
+#define OV01A1S_DGTL_GAIN_STEP		1
+#define OV01A1S_DGTL_GAIN_DEFAULT	1024
+
+/* Test Pattern Control */
+#define OV01A1S_REG_TEST_PATTERN		0x4503
+#define OV01A1S_TEST_PATTERN_ENABLE	BIT(7)
+#define OV01A1S_TEST_PATTERN_BAR_SHIFT	0
+
+enum {
+	OV01A1S_LINK_FREQ_400MHZ_INDEX,
+};
+
+struct ov01a1s_reg {
+	u16 address;
+	u8 val;
+};
+
+struct ov01a1s_reg_list {
+	u32 num_of_regs;
+	const struct ov01a1s_reg *regs;
+};
+
+struct ov01a1s_link_freq_config {
+	const struct ov01a1s_reg_list reg_list;
+};
+
+struct ov01a1s_mode {
+	/* Frame width in pixels */
+	u32 width;
+
+	/* Frame height in pixels */
+	u32 height;
+
+	/* Horizontal timining size */
+	u32 hts;
+
+	/* Default vertical timining size */
+	u32 vts_def;
+
+	/* Min vertical timining size */
+	u32 vts_min;
+
+	/* Link frequency needed for this resolution */
+	u32 link_freq_index;
+
+	/* Sensor register settings for this resolution */
+	const struct ov01a1s_reg_list reg_list;
+};
+
+static const struct ov01a1s_reg mipi_data_rate_720mbps[] = {
+};
+
+static const struct ov01a1s_reg sensor_1296x800_setting[] = {
+	{0x0103, 0x01},
+	{0x0302, 0x00},
+	{0x0303, 0x06},
+	{0x0304, 0x01},
+	{0x0305, 0x90},
+	{0x0306, 0x00},
+	{0x0308, 0x01},
+	{0x0309, 0x00},
+	{0x030c, 0x01},
+	{0x0322, 0x01},
+	{0x0323, 0x06},
+	{0x0324, 0x01},
+	{0x0325, 0x68},
+	{0x3002, 0xa1},
+	{0x301e, 0xf0},
+	{0x3022, 0x01},
+	{0x3501, 0x03},
+	{0x3502, 0x78},
+	{0x3504, 0x0c},
+	{0x3508, 0x01},
+	{0x3509, 0x00},
+	{0x3601, 0xc0},
+	{0x3603, 0x71},
+	{0x3610, 0x68},
+	{0x3611, 0x86},
+	{0x3640, 0x10},
+	{0x3641, 0x80},
+	{0x3642, 0xdc},
+	{0x3646, 0x55},
+	{0x3647, 0x57},
+	{0x364b, 0x00},
+	{0x3653, 0x10},
+	{0x3655, 0x00},
+	{0x3656, 0x00},
+	{0x365f, 0x0f},
+	{0x3661, 0x45},
+	{0x3662, 0x24},
+	{0x3663, 0x11},
+	{0x3664, 0x07},
+	{0x3709, 0x34},
+	{0x370b, 0x6f},
+	{0x3714, 0x22},
+	{0x371b, 0x27},
+	{0x371c, 0x67},
+	{0x371d, 0xa7},
+	{0x371e, 0xe7},
+	{0x3730, 0x81},
+	{0x3733, 0x10},
+	{0x3734, 0x40},
+	{0x3737, 0x04},
+	{0x3739, 0x1c},
+	{0x3767, 0x00},
+	{0x376c, 0x81},
+	{0x3772, 0x14},
+	{0x37c2, 0x04},
+	{0x37d8, 0x03},
+	{0x37d9, 0x0c},
+	{0x37e0, 0x00},
+	{0x37e1, 0x08},
+	{0x37e2, 0x10},
+	{0x37e3, 0x04},
+	{0x37e4, 0x04},
+	{0x37e5, 0x03},
+	{0x37e6, 0x04},
+	{0x3800, 0x00},
+	{0x3801, 0x00},
+	{0x3802, 0x00},
+	{0x3803, 0x00},
+	{0x3804, 0x05},
+	{0x3805, 0x0f},
+	{0x3806, 0x03},
+	{0x3807, 0x2f},
+	{0x3808, 0x05},
+	{0x3809, 0x00},
+	{0x380a, 0x03},
+	{0x380b, 0x1e},
+	{0x380c, 0x05},
+	{0x380d, 0xd0},
+	{0x380e, 0x03},
+	{0x380f, 0x80},
+	{0x3810, 0x00},
+	{0x3811, 0x09},
+	{0x3812, 0x00},
+	{0x3813, 0x08},
+	{0x3814, 0x01},
+	{0x3815, 0x01},
+	{0x3816, 0x01},
+	{0x3817, 0x01},
+	{0x3820, 0xa8},
+	{0x3822, 0x03},
+	{0x3832, 0x28},
+	{0x3833, 0x10},
+	{0x3b00, 0x00},
+	{0x3c80, 0x00},
+	{0x3c88, 0x02},
+	{0x3c8c, 0x07},
+	{0x3c8d, 0x40},
+	{0x3cc7, 0x80},
+	{0x4000, 0xc3},
+	{0x4001, 0xe0},
+	{0x4003, 0x40},
+	{0x4008, 0x02},
+	{0x4009, 0x19},
+	{0x400a, 0x01},
+	{0x400b, 0x6c},
+	{0x4011, 0x00},
+	{0x4041, 0x00},
+	{0x4300, 0xff},
+	{0x4301, 0x00},
+	{0x4302, 0x0f},
+	{0x4503, 0x00},
+	{0x4601, 0x50},
+	{0x481f, 0x34},
+	{0x4825, 0x33},
+	{0x4837, 0x14},
+	{0x4881, 0x40},
+	{0x4883, 0x01},
+	{0x4890, 0x00},
+	{0x4901, 0x00},
+	{0x4902, 0x00},
+	{0x4b00, 0x2a},
+	{0x4b0d, 0x00},
+	{0x450a, 0x04},
+	{0x450b, 0x00},
+	{0x5000, 0x65},
+	{0x5004, 0x00},
+	{0x5080, 0x40},
+	{0x5200, 0x18},
+	{0x4837, 0x14},
+	{0x0305, 0xf4},
+	{0x0325, 0xc2},
+	{0x3808, 0x05},
+	{0x3809, 0x10},
+	{0x380a, 0x03},
+	{0x380b, 0x1e},
+	{0x3810, 0x00},
+	{0x3811, 0x00},
+	{0x3812, 0x00},
+	{0x3813, 0x09},
+	{0x3820, 0x88},
+	{0x373d, 0x24},
+};
+
+static const char * const ov01a1s_test_pattern_menu[] = {
+	"Disabled",
+	"Color Bar",
+	"Top-Bottom Darker Color Bar",
+	"Right-Left Darker Color Bar",
+	"Color Bar type 4",
+};
+
+static const s64 link_freq_menu_items[] = {
+	OV01A1S_LINK_FREQ_400MHZ,
+};
+
+static const struct ov01a1s_link_freq_config link_freq_configs[] = {
+	[OV01A1S_LINK_FREQ_400MHZ_INDEX] = {
+		.reg_list = {
+			.num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps),
+			.regs = mipi_data_rate_720mbps,
+		}
+	},
+};
+
+static const struct ov01a1s_mode supported_modes[] = {
+	{
+		.width = 1296,
+		.height = 798,
+		.hts = 1488,
+		.vts_def = OV01A1S_VTS_DEF,
+		.vts_min = OV01A1S_VTS_MIN,
+		.reg_list = {
+			.num_of_regs = ARRAY_SIZE(sensor_1296x800_setting),
+			.regs = sensor_1296x800_setting,
+		},
+		.link_freq_index = OV01A1S_LINK_FREQ_400MHZ_INDEX,
+	},
+};
+
+struct ov01a1s {
+	struct v4l2_subdev sd;
+	struct media_pad pad;
+	struct v4l2_ctrl_handler ctrl_handler;
+
+	/* V4L2 Controls */
+	struct v4l2_ctrl *link_freq;
+	struct v4l2_ctrl *pixel_rate;
+	struct v4l2_ctrl *vblank;
+	struct v4l2_ctrl *hblank;
+	struct v4l2_ctrl *exposure;
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+	struct v4l2_ctrl *privacy_status;
+
+	/* VSC settings */
+	struct vsc_mipi_config conf;
+	struct vsc_camera_status status;
+#endif
+
+	/* Current mode */
+	const struct ov01a1s_mode *cur_mode;
+
+	/* To serialize asynchronus callbacks */
+	struct mutex mutex;
+
+	/* i2c client */
+	struct i2c_client *client;
+
+#if IS_ENABLED(CONFIG_INTEL_SKL_INT3472)
+	/* GPIO for reset */
+	struct gpio_desc *reset_gpio;
+	/* GPIO for powerdown */
+	struct gpio_desc *powerdown_gpio;
+	/* Power enable */
+	struct regulator *avdd;
+	/* Clock provider */
+	struct clk *clk;
+#endif
+
+	enum {
+		OV01A1S_USE_DEFAULT = 0,
+#if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) || IS_ENABLED(CONFIG_POWER_CTRL_LOGIC)
+		OV01A1S_USE_INT3472 = 1,
+#endif
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+		OV01A1S_USE_INTEL_VSC = 2,
+#endif
+	} power_type;
+
+	/* Streaming on/off */
+	bool streaming;
+};
+
+static inline struct ov01a1s *to_ov01a1s(struct v4l2_subdev *subdev)
+{
+	return container_of(subdev, struct ov01a1s, sd);
+}
+
+static int ov01a1s_read_reg(struct ov01a1s *ov01a1s, u16 reg, u16 len, u32 *val)
+{
+	struct i2c_client *client = ov01a1s->client;
+	struct i2c_msg msgs[2];
+	u8 addr_buf[2];
+	u8 data_buf[4] = {0};
+	int ret = 0;
+
+	if (len > sizeof(data_buf))
+		return -EINVAL;
+
+	put_unaligned_be16(reg, addr_buf);
+	msgs[0].addr = client->addr;
+	msgs[0].flags = 0;
+	msgs[0].len = sizeof(addr_buf);
+	msgs[0].buf = addr_buf;
+	msgs[1].addr = client->addr;
+	msgs[1].flags = I2C_M_RD;
+	msgs[1].len = len;
+	msgs[1].buf = &data_buf[sizeof(data_buf) - len];
+
+	ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+	if (ret != ARRAY_SIZE(msgs))
+		return ret < 0 ? ret : -EIO;
+
+	*val = get_unaligned_be32(data_buf);
+
+	return 0;
+}
+
+static int ov01a1s_write_reg(struct ov01a1s *ov01a1s, u16 reg, u16 len, u32 val)
+{
+	struct i2c_client *client = ov01a1s->client;
+	u8 buf[6];
+	int ret = 0;
+
+	if (len > 4)
+		return -EINVAL;
+
+	put_unaligned_be16(reg, buf);
+	put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+
+	ret = i2c_master_send(client, buf, len + 2);
+	if (ret != len + 2)
+		return ret < 0 ? ret : -EIO;
+
+	return 0;
+}
+
+static int ov01a1s_write_reg_list(struct ov01a1s *ov01a1s,
+				  const struct ov01a1s_reg_list *r_list)
+{
+	struct i2c_client *client = ov01a1s->client;
+	unsigned int i;
+	int ret = 0;
+
+	for (i = 0; i < r_list->num_of_regs; i++) {
+		ret = ov01a1s_write_reg(ov01a1s, r_list->regs[i].address, 1,
+					r_list->regs[i].val);
+		if (ret) {
+			dev_err_ratelimited(&client->dev,
+					    "write reg 0x%4.4x return err = %d",
+					    r_list->regs[i].address, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ov01a1s_update_digital_gain(struct ov01a1s *ov01a1s, u32 d_gain)
+{
+	struct i2c_client *client = ov01a1s->client;
+	u32 real = d_gain << 6;
+	int ret = 0;
+
+	ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGILAL_GAIN_B, 3, real);
+	if (ret) {
+		dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_B");
+		return ret;
+	}
+	ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGITAL_GAIN_GB, 3, real);
+	if (ret) {
+		dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_GB");
+		return ret;
+	}
+	ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGITAL_GAIN_GR, 3, real);
+	if (ret) {
+		dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_GR");
+		return ret;
+	}
+
+	ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGITAL_GAIN_R, 3, real);
+	if (ret) {
+		dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_R");
+		return ret;
+	}
+	return ret;
+}
+
+static int ov01a1s_test_pattern(struct ov01a1s *ov01a1s, u32 pattern)
+{
+	if (pattern)
+		pattern = (pattern - 1) << OV01A1S_TEST_PATTERN_BAR_SHIFT |
+			  OV01A1S_TEST_PATTERN_ENABLE;
+
+	return ov01a1s_write_reg(ov01a1s, OV01A1S_REG_TEST_PATTERN, 1, pattern);
+}
+
+static int ov01a1s_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct ov01a1s *ov01a1s = container_of(ctrl->handler,
+					     struct ov01a1s, ctrl_handler);
+	struct i2c_client *client = ov01a1s->client;
+	s64 exposure_max;
+	int ret = 0;
+
+	/* Propagate change of current control to all related controls */
+	if (ctrl->id == V4L2_CID_VBLANK) {
+		/* Update max exposure while meeting expected vblanking */
+		exposure_max = ov01a1s->cur_mode->height + ctrl->val -
+			       OV01A1S_EXPOSURE_MAX_MARGIN;
+		__v4l2_ctrl_modify_range(ov01a1s->exposure,
+					 ov01a1s->exposure->minimum,
+					 exposure_max, ov01a1s->exposure->step,
+					 exposure_max);
+	}
+
+	/* V4L2 controls values will be applied only when power is already up */
+	if (!pm_runtime_get_if_in_use(&client->dev))
+		return 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_ANALOGUE_GAIN:
+		ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_ANALOG_GAIN, 2,
+					ctrl->val);
+		break;
+
+	case V4L2_CID_DIGITAL_GAIN:
+		ret = ov01a1s_update_digital_gain(ov01a1s, ctrl->val);
+		break;
+
+	case V4L2_CID_EXPOSURE:
+		ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_EXPOSURE, 2,
+					ctrl->val);
+		break;
+
+	case V4L2_CID_VBLANK:
+		ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_VTS, 2,
+					ov01a1s->cur_mode->height + ctrl->val);
+		break;
+
+	case V4L2_CID_TEST_PATTERN:
+		ret = ov01a1s_test_pattern(ov01a1s, ctrl->val);
+		break;
+
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+	case V4L2_CID_PRIVACY:
+		dev_dbg(&client->dev, "set privacy to %d", ctrl->val);
+		break;
+
+#endif
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	pm_runtime_put(&client->dev);
+
+	return ret;
+}
+
+static const struct v4l2_ctrl_ops ov01a1s_ctrl_ops = {
+	.s_ctrl = ov01a1s_set_ctrl,
+};
+
+static int ov01a1s_init_controls(struct ov01a1s *ov01a1s)
+{
+	struct v4l2_ctrl_handler *ctrl_hdlr;
+	const struct ov01a1s_mode *cur_mode;
+	s64 exposure_max, h_blank;
+	u32 vblank_min, vblank_max, vblank_default;
+	int size;
+	int ret = 0;
+
+	ctrl_hdlr = &ov01a1s->ctrl_handler;
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+	ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+#else
+	ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+#endif
+	if (ret)
+		return ret;
+
+	ctrl_hdlr->lock = &ov01a1s->mutex;
+	cur_mode = ov01a1s->cur_mode;
+	size = ARRAY_SIZE(link_freq_menu_items);
+
+	ov01a1s->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+						    &ov01a1s_ctrl_ops,
+						    V4L2_CID_LINK_FREQ,
+						    size - 1, 0,
+						    link_freq_menu_items);
+	if (ov01a1s->link_freq)
+		ov01a1s->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+	ov01a1s->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+						V4L2_CID_PIXEL_RATE, 0,
+						OV01A1S_SCLK, 1, OV01A1S_SCLK);
+
+	vblank_min = cur_mode->vts_min - cur_mode->height;
+	vblank_max = OV01A1S_VTS_MAX - cur_mode->height;
+	vblank_default = cur_mode->vts_def - cur_mode->height;
+	ov01a1s->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+					    V4L2_CID_VBLANK, vblank_min,
+					    vblank_max, 1, vblank_default);
+
+	h_blank = cur_mode->hts - cur_mode->width;
+	ov01a1s->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+					    V4L2_CID_HBLANK, h_blank, h_blank,
+					    1, h_blank);
+	if (ov01a1s->hblank)
+		ov01a1s->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+	ov01a1s->privacy_status = v4l2_ctrl_new_std(ctrl_hdlr,
+						    &ov01a1s_ctrl_ops,
+						    V4L2_CID_PRIVACY,
+						    0, 1, 1, 0);
+#endif
+
+	v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+			  OV01A1S_ANAL_GAIN_MIN, OV01A1S_ANAL_GAIN_MAX,
+			  OV01A1S_ANAL_GAIN_STEP, OV01A1S_ANAL_GAIN_MIN);
+	v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+			  OV01A1S_DGTL_GAIN_MIN, OV01A1S_DGTL_GAIN_MAX,
+			  OV01A1S_DGTL_GAIN_STEP, OV01A1S_DGTL_GAIN_DEFAULT);
+	exposure_max = cur_mode->vts_def - OV01A1S_EXPOSURE_MAX_MARGIN;
+	ov01a1s->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+					      V4L2_CID_EXPOSURE,
+					      OV01A1S_EXPOSURE_MIN,
+					      exposure_max,
+					      OV01A1S_EXPOSURE_STEP,
+					      exposure_max);
+	v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov01a1s_ctrl_ops,
+				     V4L2_CID_TEST_PATTERN,
+				     ARRAY_SIZE(ov01a1s_test_pattern_menu) - 1,
+				     0, 0, ov01a1s_test_pattern_menu);
+	if (ctrl_hdlr->error)
+		return ctrl_hdlr->error;
+
+	ov01a1s->sd.ctrl_handler = ctrl_hdlr;
+
+	return 0;
+}
+
+static void ov01a1s_update_pad_format(const struct ov01a1s_mode *mode,
+				      struct v4l2_mbus_framefmt *fmt)
+{
+	fmt->width = mode->width;
+	fmt->height = mode->height;
+	fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+static void ov01a1s_vsc_privacy_callback(void *handle,
+				       enum vsc_privacy_status status)
+{
+	struct ov01a1s *ov01a1s = handle;
+
+	v4l2_ctrl_s_ctrl(ov01a1s->privacy_status, !status);
+}
+
+#endif
+static int ov01a1s_start_streaming(struct ov01a1s *ov01a1s)
+{
+	struct i2c_client *client = ov01a1s->client;
+	const struct ov01a1s_reg_list *reg_list;
+	int link_freq_index;
+	int ret = 0;
+
+	link_freq_index = ov01a1s->cur_mode->link_freq_index;
+	reg_list = &link_freq_configs[link_freq_index].reg_list;
+	ret = ov01a1s_write_reg_list(ov01a1s, reg_list);
+	if (ret) {
+		dev_err(&client->dev, "failed to set plls");
+		return ret;
+	}
+
+	reg_list = &ov01a1s->cur_mode->reg_list;
+	ret = ov01a1s_write_reg_list(ov01a1s, reg_list);
+	if (ret) {
+		dev_err(&client->dev, "failed to set mode");
+		return ret;
+	}
+
+	ret = __v4l2_ctrl_handler_setup(ov01a1s->sd.ctrl_handler);
+	if (ret)
+		return ret;
+
+	ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_MODE_SELECT, 1,
+				OV01A1S_MODE_STREAMING);
+	if (ret)
+		dev_err(&client->dev, "failed to start streaming");
+
+	return ret;
+}
+
+static void ov01a1s_stop_streaming(struct ov01a1s *ov01a1s)
+{
+	struct i2c_client *client = ov01a1s->client;
+	int ret = 0;
+
+	ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_MODE_SELECT, 1,
+				OV01A1S_MODE_STANDBY);
+	if (ret)
+		dev_err(&client->dev, "failed to stop streaming");
+}
+
+static int ov01a1s_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+	struct i2c_client *client = ov01a1s->client;
+	int ret = 0;
+
+	if (ov01a1s->streaming == enable)
+		return 0;
+
+	mutex_lock(&ov01a1s->mutex);
+	if (enable) {
+		ret = pm_runtime_get_sync(&client->dev);
+		if (ret < 0) {
+			pm_runtime_put_noidle(&client->dev);
+			mutex_unlock(&ov01a1s->mutex);
+			return ret;
+		}
+
+		ret = ov01a1s_start_streaming(ov01a1s);
+		if (ret) {
+			enable = 0;
+			ov01a1s_stop_streaming(ov01a1s);
+			pm_runtime_put(&client->dev);
+		}
+	} else {
+		ov01a1s_stop_streaming(ov01a1s);
+		pm_runtime_put(&client->dev);
+	}
+
+	ov01a1s->streaming = enable;
+	mutex_unlock(&ov01a1s->mutex);
+
+	return ret;
+}
+
+static int ov01a1s_power_off(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+	int ret = 0;
+
+#if IS_ENABLED(CONFIG_INTEL_SKL_INT3472)
+	if (ov01a1s->power_type == OV01A1S_USE_INT3472) {
+		gpiod_set_value_cansleep(ov01a1s->reset_gpio, 1);
+		gpiod_set_value_cansleep(ov01a1s->powerdown_gpio, 1);
+		if (ov01a1s->avdd)
+			ret = regulator_disable(ov01a1s->avdd);
+		clk_disable_unprepare(ov01a1s->clk);
+		msleep(20);
+	}
+#elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC)
+	if (ov01a1s->power_type == OV01A1S_USE_INT3472)
+		ret = power_ctrl_logic_set_power(0);
+#endif
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+	if (ov01a1s->power_type == OV01A1S_USE_INTEL_VSC) {
+		ret = vsc_release_camera_sensor(&ov01a1s->status);
+		if (ret && ret != -EAGAIN)
+			dev_err(dev, "Release VSC failed");
+	}
+#endif
+
+	return ret;
+}
+
+static int ov01a1s_power_on(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+	int ret = 0;
+
+#if IS_ENABLED(CONFIG_INTEL_SKL_INT3472)
+	if (ov01a1s->power_type == OV01A1S_USE_INT3472) {
+		ret = clk_prepare_enable(ov01a1s->clk);
+		if (ret)
+			return ret;
+		if (ov01a1s->avdd)
+			ret = regulator_enable(ov01a1s->avdd);
+		if (ret)
+			return ret;
+		gpiod_set_value_cansleep(ov01a1s->powerdown_gpio, 0);
+		gpiod_set_value_cansleep(ov01a1s->reset_gpio, 0);
+		msleep(20);
+	}
+#elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC)
+	if (ov01a1s->power_type == OV01A1S_USE_INT3472)
+		ret = power_ctrl_logic_set_power(1);
+#endif
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+	if (ov01a1s->power_type == OV01A1S_USE_INTEL_VSC) {
+		ret = vsc_acquire_camera_sensor(&ov01a1s->conf,
+						ov01a1s_vsc_privacy_callback,
+						ov01a1s, &ov01a1s->status);
+		if (ret && ret != -EAGAIN) {
+			dev_err(dev, "Acquire VSC failed");
+			return ret;
+		}
+		__v4l2_ctrl_s_ctrl(ov01a1s->privacy_status,
+				   !(ov01a1s->status.status));
+	}
+#endif
+
+	return ret;
+}
+
+static int __maybe_unused ov01a1s_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+	mutex_lock(&ov01a1s->mutex);
+	if (ov01a1s->streaming)
+		ov01a1s_stop_streaming(ov01a1s);
+
+	mutex_unlock(&ov01a1s->mutex);
+
+	return 0;
+}
+
+static int __maybe_unused ov01a1s_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+	int ret = 0;
+
+	mutex_lock(&ov01a1s->mutex);
+	if (!ov01a1s->streaming)
+		goto exit;
+
+	ret = ov01a1s_start_streaming(ov01a1s);
+	if (ret) {
+		ov01a1s->streaming = false;
+		ov01a1s_stop_streaming(ov01a1s);
+	}
+
+exit:
+	mutex_unlock(&ov01a1s->mutex);
+	return ret;
+}
+
+static int ov01a1s_set_format(struct v4l2_subdev *sd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
+			      struct v4l2_subdev_pad_config *cfg,
+#else
+			      struct v4l2_subdev_state *sd_state,
+#endif
+			      struct v4l2_subdev_format *fmt)
+{
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+	const struct ov01a1s_mode *mode;
+	s32 vblank_def, h_blank;
+
+	mode = v4l2_find_nearest_size(supported_modes,
+				      ARRAY_SIZE(supported_modes), width,
+				      height, fmt->format.width,
+				      fmt->format.height);
+
+	mutex_lock(&ov01a1s->mutex);
+	ov01a1s_update_pad_format(mode, &fmt->format);
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
+		*v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+#else
+		*v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+#endif
+	} else {
+		ov01a1s->cur_mode = mode;
+		__v4l2_ctrl_s_ctrl(ov01a1s->link_freq, mode->link_freq_index);
+		__v4l2_ctrl_s_ctrl_int64(ov01a1s->pixel_rate, OV01A1S_SCLK);
+
+		/* Update limits and set FPS to default */
+		vblank_def = mode->vts_def - mode->height;
+		__v4l2_ctrl_modify_range(ov01a1s->vblank,
+					 mode->vts_min - mode->height,
+					 OV01A1S_VTS_MAX - mode->height, 1,
+					 vblank_def);
+		__v4l2_ctrl_s_ctrl(ov01a1s->vblank, vblank_def);
+		h_blank = mode->hts - mode->width;
+		__v4l2_ctrl_modify_range(ov01a1s->hblank, h_blank, h_blank, 1,
+					 h_blank);
+	}
+	mutex_unlock(&ov01a1s->mutex);
+
+	return 0;
+}
+
+static int ov01a1s_get_format(struct v4l2_subdev *sd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
+			      struct v4l2_subdev_pad_config *cfg,
+#else
+			      struct v4l2_subdev_state *sd_state,
+#endif
+			      struct v4l2_subdev_format *fmt)
+{
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+	mutex_lock(&ov01a1s->mutex);
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
+		fmt->format = *v4l2_subdev_get_try_format(&ov01a1s->sd, cfg,
+							  fmt->pad);
+#else
+		fmt->format = *v4l2_subdev_get_try_format(&ov01a1s->sd,
+							  sd_state, fmt->pad);
+#endif
+	else
+		ov01a1s_update_pad_format(ov01a1s->cur_mode, &fmt->format);
+
+	mutex_unlock(&ov01a1s->mutex);
+
+	return 0;
+}
+
+static int ov01a1s_enum_mbus_code(struct v4l2_subdev *sd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
+				  struct v4l2_subdev_pad_config *cfg,
+#else
+				  struct v4l2_subdev_state *sd_state,
+#endif
+				  struct v4l2_subdev_mbus_code_enum *code)
+{
+	if (code->index > 0)
+		return -EINVAL;
+
+	code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+	return 0;
+}
+
+static int ov01a1s_enum_frame_size(struct v4l2_subdev *sd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
+				   struct v4l2_subdev_pad_config *cfg,
+#else
+				   struct v4l2_subdev_state *sd_state,
+#endif
+				   struct v4l2_subdev_frame_size_enum *fse)
+{
+	if (fse->index >= ARRAY_SIZE(supported_modes))
+		return -EINVAL;
+
+	if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+		return -EINVAL;
+
+	fse->min_width = supported_modes[fse->index].width;
+	fse->max_width = fse->min_width;
+	fse->min_height = supported_modes[fse->index].height;
+	fse->max_height = fse->min_height;
+
+	return 0;
+}
+
+static int ov01a1s_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+	mutex_lock(&ov01a1s->mutex);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
+	ov01a1s_update_pad_format(&supported_modes[0],
+				  v4l2_subdev_get_try_format(sd, fh->pad, 0));
+#else
+	ov01a1s_update_pad_format(&supported_modes[0],
+				  v4l2_subdev_get_try_format(sd, fh->state, 0));
+#endif
+	mutex_unlock(&ov01a1s->mutex);
+
+	return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov01a1s_video_ops = {
+	.s_stream = ov01a1s_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov01a1s_pad_ops = {
+	.set_fmt = ov01a1s_set_format,
+	.get_fmt = ov01a1s_get_format,
+	.enum_mbus_code = ov01a1s_enum_mbus_code,
+	.enum_frame_size = ov01a1s_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov01a1s_subdev_ops = {
+	.video = &ov01a1s_video_ops,
+	.pad = &ov01a1s_pad_ops,
+};
+
+static const struct media_entity_operations ov01a1s_subdev_entity_ops = {
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov01a1s_internal_ops = {
+	.open = ov01a1s_open,
+};
+
+static int ov01a1s_identify_module(struct ov01a1s *ov01a1s)
+{
+	struct i2c_client *client = ov01a1s->client;
+	int ret;
+	u32 val;
+
+	ret = ov01a1s_read_reg(ov01a1s, OV01A1S_REG_CHIP_ID, 3, &val);
+	if (ret)
+		return ret;
+
+	if (val != OV01A1S_CHIP_ID) {
+		dev_err(&client->dev, "chip id mismatch: %x!=%x",
+			OV01A1S_CHIP_ID, val);
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)
+static int ov01a1s_remove(struct i2c_client *client)
+#else
+static void ov01a1s_remove(struct i2c_client *client)
+#endif
+{
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+	struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+	v4l2_async_unregister_subdev(sd);
+	media_entity_cleanup(&sd->entity);
+	v4l2_ctrl_handler_free(sd->ctrl_handler);
+	pm_runtime_disable(&client->dev);
+	mutex_destroy(&ov01a1s->mutex);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)
+	return 0;
+#endif
+}
+
+#if IS_ENABLED(CONFIG_INTEL_SKL_INT3472)
+static int ov01a1s_parse_gpio(struct ov01a1s *ov01a1s)
+{
+	struct device *dev = &ov01a1s->client->dev;
+
+	ov01a1s->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ov01a1s->reset_gpio)) {
+		dev_warn(dev, "error while getting reset gpio: %ld\n",
+			 PTR_ERR(ov01a1s->reset_gpio));
+		ov01a1s->reset_gpio = NULL;
+		return -EPROBE_DEFER;
+	}
+
+	/* For optional, don't return or print warn if can't get it */
+	ov01a1s->powerdown_gpio =
+		devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_LOW);
+	if (IS_ERR(ov01a1s->powerdown_gpio)) {
+		dev_dbg(dev, "no powerdown gpio: %ld\n",
+			PTR_ERR(ov01a1s->powerdown_gpio));
+		ov01a1s->powerdown_gpio = NULL;
+	}
+
+	ov01a1s->avdd = devm_regulator_get_optional(dev, "avdd");
+	if (IS_ERR(ov01a1s->avdd)) {
+		dev_dbg(dev, "no regulator avdd: %ld\n",
+			PTR_ERR(ov01a1s->avdd));
+		ov01a1s->avdd = NULL;
+	}
+
+	ov01a1s->clk = devm_clk_get_optional(dev, "clk");
+	if (IS_ERR(ov01a1s->clk)) {
+		dev_dbg(dev, "no clk: %ld\n", PTR_ERR(ov01a1s->clk));
+		ov01a1s->clk = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int ov01a1s_parse_power(struct ov01a1s *ov01a1s)
+{
+	int ret = 0;
+
+#if IS_ENABLED(CONFIG_INTEL_VSC)
+	ov01a1s->conf.lane_num = OV01A1S_DATA_LANES;
+	/* frequency unit 100k */
+	ov01a1s->conf.freq = OV01A1S_LINK_FREQ_400MHZ / 100000;
+	ret = vsc_acquire_camera_sensor(&ov01a1s->conf, NULL, NULL, &ov01a1s->status);
+	if (!ret) {
+		ov01a1s->power_type = OV01A1S_USE_INTEL_VSC;
+		return 0;
+	} else if (ret != -EAGAIN) {
+		return ret;
+	}
+#endif
+#if IS_ENABLED(CONFIG_INTEL_SKL_INT3472)
+	ret = ov01a1s_parse_gpio(ov01a1s);
+#elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC)
+	ret = power_ctrl_logic_set_power(1);
+#endif
+#if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) || IS_ENABLED(CONFIG_POWER_CTRL_LOGIC)
+	if (!ret) {
+		ov01a1s->power_type = OV01A1S_USE_INT3472;
+		return 0;
+	}
+#endif
+	if (ret == -EAGAIN)
+		return -EPROBE_DEFER;
+
+	return ret;
+}
+
+static int ov01a1s_probe(struct i2c_client *client)
+{
+	struct ov01a1s *ov01a1s;
+	int ret = 0;
+
+	ov01a1s = devm_kzalloc(&client->dev, sizeof(*ov01a1s), GFP_KERNEL);
+	if (!ov01a1s)
+		return -ENOMEM;
+
+	ov01a1s->client = client;
+	ret = ov01a1s_parse_power(ov01a1s);
+	if (ret)
+		return ret;
+
+	v4l2_i2c_subdev_init(&ov01a1s->sd, client, &ov01a1s_subdev_ops);
+#if IS_ENABLED(CONFIG_INTEL_SKL_INT3472)
+	/* In other cases, power is up in ov01a1s_parse_power */
+	if (ov01a1s->power_type == OV01A1S_USE_INT3472)
+		ov01a1s_power_on(&client->dev);
+#endif
+	ret = ov01a1s_identify_module(ov01a1s);
+	if (ret) {
+		dev_err(&client->dev, "failed to find sensor: %d", ret);
+		goto probe_error_power_off;
+	}
+
+	mutex_init(&ov01a1s->mutex);
+	ov01a1s->cur_mode = &supported_modes[0];
+	ret = ov01a1s_init_controls(ov01a1s);
+	if (ret) {
+		dev_err(&client->dev, "failed to init controls: %d", ret);
+		goto probe_error_v4l2_ctrl_handler_free;
+	}
+
+	ov01a1s->sd.internal_ops = &ov01a1s_internal_ops;
+	ov01a1s->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	ov01a1s->sd.entity.ops = &ov01a1s_subdev_entity_ops;
+	ov01a1s->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+	ov01a1s->pad.flags = MEDIA_PAD_FL_SOURCE;
+	ret = media_entity_pads_init(&ov01a1s->sd.entity, 1, &ov01a1s->pad);
+	if (ret) {
+		dev_err(&client->dev, "failed to init entity pads: %d", ret);
+		goto probe_error_v4l2_ctrl_handler_free;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)
+	ret = v4l2_async_register_subdev_sensor_common(&ov01a1s->sd);
+#else
+	ret = v4l2_async_register_subdev_sensor(&ov01a1s->sd);
+#endif
+	if (ret < 0) {
+		dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+			ret);
+		goto probe_error_media_entity_cleanup;
+	}
+
+	/*
+	 * Device is already turned on by i2c-core with ACPI domain PM.
+	 * Enable runtime PM and turn off the device.
+	 */
+	pm_runtime_set_active(&client->dev);
+	pm_runtime_enable(&client->dev);
+	pm_runtime_idle(&client->dev);
+
+	return 0;
+
+probe_error_media_entity_cleanup:
+	media_entity_cleanup(&ov01a1s->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+	v4l2_ctrl_handler_free(ov01a1s->sd.ctrl_handler);
+	mutex_destroy(&ov01a1s->mutex);
+
+probe_error_power_off:
+	ov01a1s_power_off(&client->dev);
+
+	return ret;
+}
+
+static const struct dev_pm_ops ov01a1s_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(ov01a1s_suspend, ov01a1s_resume)
+	SET_RUNTIME_PM_OPS(ov01a1s_power_off, ov01a1s_power_on, NULL)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov01a1s_acpi_ids[] = {
+	{ "OVTI01AS" },
+	{}
+};
+
+MODULE_DEVICE_TABLE(acpi, ov01a1s_acpi_ids);
+#endif
+
+static struct i2c_driver ov01a1s_i2c_driver = {
+	.driver = {
+		.name = "ov01a1s",
+		.pm = &ov01a1s_pm_ops,
+		.acpi_match_table = ACPI_PTR(ov01a1s_acpi_ids),
+	},
+#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)
+	.probe_new = ov01a1s_probe,
+#else
+	.probe = ov01a1s_probe,
+#endif
+	.remove = ov01a1s_remove,
+};
+
+module_i2c_driver(ov01a1s_i2c_driver);
+
+MODULE_AUTHOR("Xu, Chongyang <chongyang.xu@intel.com>");
+MODULE_AUTHOR("Lai, Jim <jim.lai@intel.com>");
+MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_DESCRIPTION("OmniVision OV01A1S sensor driver");
+MODULE_LICENSE("GPL v2");
-- 
2.43.0

From 07d707663a69f65c366d3ac75c2bf41749f33224 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 6 Nov 2023 12:33:56 +0100
Subject: [PATCH 18/31] media: ov01a1s: Remove non upstream iVSC support

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/i2c/ov01a1s.c | 71 -------------------------------------
 1 file changed, 71 deletions(-)

diff --git a/drivers/media/i2c/ov01a1s.c b/drivers/media/i2c/ov01a1s.c
index 0dcce8b492b4..c97c1a661022 100644
--- a/drivers/media/i2c/ov01a1s.c
+++ b/drivers/media/i2c/ov01a1s.c
@@ -17,9 +17,6 @@
 #elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC)
 #include "power_ctrl_logic.h"
 #endif
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-#include <linux/vsc.h>
-#endif
 
 #define OV01A1S_LINK_FREQ_400MHZ	400000000ULL
 #define OV01A1S_SCLK			40000000LL
@@ -302,13 +299,6 @@ struct ov01a1s {
 	struct v4l2_ctrl *vblank;
 	struct v4l2_ctrl *hblank;
 	struct v4l2_ctrl *exposure;
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-	struct v4l2_ctrl *privacy_status;
-
-	/* VSC settings */
-	struct vsc_mipi_config conf;
-	struct vsc_camera_status status;
-#endif
 
 	/* Current mode */
 	const struct ov01a1s_mode *cur_mode;
@@ -334,9 +324,6 @@ struct ov01a1s {
 		OV01A1S_USE_DEFAULT = 0,
 #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) || IS_ENABLED(CONFIG_POWER_CTRL_LOGIC)
 		OV01A1S_USE_INT3472 = 1,
-#endif
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-		OV01A1S_USE_INTEL_VSC = 2,
 #endif
 	} power_type;
 
@@ -505,12 +492,6 @@ static int ov01a1s_set_ctrl(struct v4l2_ctrl *ctrl)
 		ret = ov01a1s_test_pattern(ov01a1s, ctrl->val);
 		break;
 
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-	case V4L2_CID_PRIVACY:
-		dev_dbg(&client->dev, "set privacy to %d", ctrl->val);
-		break;
-
-#endif
 	default:
 		ret = -EINVAL;
 		break;
@@ -535,11 +516,7 @@ static int ov01a1s_init_controls(struct ov01a1s *ov01a1s)
 	int ret = 0;
 
 	ctrl_hdlr = &ov01a1s->ctrl_handler;
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-	ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9);
-#else
 	ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
-#endif
 	if (ret)
 		return ret;
 
@@ -572,12 +549,6 @@ static int ov01a1s_init_controls(struct ov01a1s *ov01a1s)
 					    1, h_blank);
 	if (ov01a1s->hblank)
 		ov01a1s->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-	ov01a1s->privacy_status = v4l2_ctrl_new_std(ctrl_hdlr,
-						    &ov01a1s_ctrl_ops,
-						    V4L2_CID_PRIVACY,
-						    0, 1, 1, 0);
-#endif
 
 	v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
 			  OV01A1S_ANAL_GAIN_MIN, OV01A1S_ANAL_GAIN_MAX,
@@ -613,16 +584,6 @@ static void ov01a1s_update_pad_format(const struct ov01a1s_mode *mode,
 	fmt->field = V4L2_FIELD_NONE;
 }
 
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-static void ov01a1s_vsc_privacy_callback(void *handle,
-				       enum vsc_privacy_status status)
-{
-	struct ov01a1s *ov01a1s = handle;
-
-	v4l2_ctrl_s_ctrl(ov01a1s->privacy_status, !status);
-}
-
-#endif
 static int ov01a1s_start_streaming(struct ov01a1s *ov01a1s)
 {
 	struct i2c_client *client = ov01a1s->client;
@@ -722,13 +683,6 @@ static int ov01a1s_power_off(struct device *dev)
 	if (ov01a1s->power_type == OV01A1S_USE_INT3472)
 		ret = power_ctrl_logic_set_power(0);
 #endif
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-	if (ov01a1s->power_type == OV01A1S_USE_INTEL_VSC) {
-		ret = vsc_release_camera_sensor(&ov01a1s->status);
-		if (ret && ret != -EAGAIN)
-			dev_err(dev, "Release VSC failed");
-	}
-#endif
 
 	return ret;
 }
@@ -756,19 +710,6 @@ static int ov01a1s_power_on(struct device *dev)
 	if (ov01a1s->power_type == OV01A1S_USE_INT3472)
 		ret = power_ctrl_logic_set_power(1);
 #endif
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-	if (ov01a1s->power_type == OV01A1S_USE_INTEL_VSC) {
-		ret = vsc_acquire_camera_sensor(&ov01a1s->conf,
-						ov01a1s_vsc_privacy_callback,
-						ov01a1s, &ov01a1s->status);
-		if (ret && ret != -EAGAIN) {
-			dev_err(dev, "Acquire VSC failed");
-			return ret;
-		}
-		__v4l2_ctrl_s_ctrl(ov01a1s->privacy_status,
-				   !(ov01a1s->status.status));
-	}
-#endif
 
 	return ret;
 }
@@ -1044,18 +985,6 @@ static int ov01a1s_parse_power(struct ov01a1s *ov01a1s)
 {
 	int ret = 0;
 
-#if IS_ENABLED(CONFIG_INTEL_VSC)
-	ov01a1s->conf.lane_num = OV01A1S_DATA_LANES;
-	/* frequency unit 100k */
-	ov01a1s->conf.freq = OV01A1S_LINK_FREQ_400MHZ / 100000;
-	ret = vsc_acquire_camera_sensor(&ov01a1s->conf, NULL, NULL, &ov01a1s->status);
-	if (!ret) {
-		ov01a1s->power_type = OV01A1S_USE_INTEL_VSC;
-		return 0;
-	} else if (ret != -EAGAIN) {
-		return ret;
-	}
-#endif
 #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472)
 	ret = ov01a1s_parse_gpio(ov01a1s);
 #elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC)
-- 
2.43.0

From 3f59512af8a39b5d22694e2996d8441d5e723423 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 4 Dec 2023 13:39:38 +0100
Subject: [PATCH 19/31] media: ov2740: Add support for reset GPIO

On some ACPI platforms, such as Chromebooks the ACPI methods to
change the power-state (_PS0 and _PS3) fully take care of powering
on/off the sensor.

On other ACPI platforms, such as e.g. various ThinkPad models with
IPU6 + ov2740 sensor, the sensor driver must control the reset GPIO
and the sensor's clock itself.

Add support for having the driver control an optional reset GPIO.

Reviewed-by: Bingbu Cao <bingbu.cao@intel.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
---
 drivers/media/i2c/ov2740.c | 48 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 46 insertions(+), 2 deletions(-)

diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 24e468485fbf..e5f9569a229d 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -4,6 +4,7 @@
 #include <asm/unaligned.h>
 #include <linux/acpi.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
@@ -333,6 +334,9 @@ struct ov2740 {
 	struct v4l2_ctrl *hblank;
 	struct v4l2_ctrl *exposure;
 
+	/* GPIOs, clocks */
+	struct gpio_desc *reset_gpio;
+
 	/* Current mode */
 	const struct ov2740_mode *cur_mode;
 
@@ -1058,6 +1062,26 @@ static int ov2740_register_nvmem(struct i2c_client *client,
 	return 0;
 }
 
+static int ov2740_suspend(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct ov2740 *ov2740 = to_ov2740(sd);
+
+	gpiod_set_value_cansleep(ov2740->reset_gpio, 1);
+	return 0;
+}
+
+static int ov2740_resume(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct ov2740 *ov2740 = to_ov2740(sd);
+
+	gpiod_set_value_cansleep(ov2740->reset_gpio, 0);
+	msleep(20);
+
+	return 0;
+}
+
 static int ov2740_probe(struct i2c_client *client)
 {
 	struct device *dev = &client->dev;
@@ -1073,12 +1097,24 @@ static int ov2740_probe(struct i2c_client *client)
 	if (!ov2740)
 		return -ENOMEM;
 
+	ov2740->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(ov2740->reset_gpio))
+		return dev_err_probe(dev, PTR_ERR(ov2740->reset_gpio),
+				     "failed to get reset GPIO\n");
+
 	v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
 	full_power = acpi_dev_state_d0(&client->dev);
 	if (full_power) {
-		ret = ov2740_identify_module(ov2740);
+		/* ACPI does not always clear the reset GPIO / enable the clock */
+		ret = ov2740_resume(dev);
 		if (ret)
-			return dev_err_probe(dev, ret, "failed to find sensor\n");
+			return dev_err_probe(dev, ret, "failed to power on sensor\n");
+
+		ret = ov2740_identify_module(ov2740);
+		if (ret) {
+			dev_err_probe(dev, ret, "failed to find sensor\n");
+			goto probe_error_power_off;
+		}
 	}
 
 	ov2740->cur_mode = &supported_modes[0];
@@ -1132,9 +1168,16 @@ static int ov2740_probe(struct i2c_client *client)
 probe_error_v4l2_ctrl_handler_free:
 	v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler);
 
+probe_error_power_off:
+	if (full_power)
+		ov2740_suspend(dev);
+
 	return ret;
 }
 
+static DEFINE_RUNTIME_DEV_PM_OPS(ov2740_pm_ops, ov2740_suspend, ov2740_resume,
+				 NULL);
+
 static const struct acpi_device_id ov2740_acpi_ids[] = {
 	{"INT3474"},
 	{}
@@ -1146,6 +1189,7 @@ static struct i2c_driver ov2740_i2c_driver = {
 	.driver = {
 		.name = "ov2740",
 		.acpi_match_table = ov2740_acpi_ids,
+		.pm = pm_sleep_ptr(&ov2740_pm_ops),
 	},
 	.probe = ov2740_probe,
 	.remove = ov2740_remove,
-- 
2.43.0

From ddde5ef9b19c4e4755be62c588209db986e57400 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 4 Dec 2023 13:39:39 +0100
Subject: [PATCH 20/31] media: ov2740: Add support for external clock

On some ACPI platforms, such as Chromebooks the ACPI methods to
change the power-state (_PS0 and _PS3) fully take care of powering
on/off the sensor.

On other ACPI platforms, such as e.g. various ThinkPad models with
IPU6 + ov2740 sensor, the sensor driver must control the reset GPIO
and the sensor's clock itself.

Add support for having the driver control an optional clock.

Reviewed-by: Bingbu Cao <bingbu.cao@intel.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
---
 drivers/media/i2c/ov2740.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index e5f9569a229d..0396e40419ca 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -3,6 +3,7 @@
 
 #include <asm/unaligned.h>
 #include <linux/acpi.h>
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
@@ -336,6 +337,7 @@ struct ov2740 {
 
 	/* GPIOs, clocks */
 	struct gpio_desc *reset_gpio;
+	struct clk *clk;
 
 	/* Current mode */
 	const struct ov2740_mode *cur_mode;
@@ -1068,6 +1070,7 @@ static int ov2740_suspend(struct device *dev)
 	struct ov2740 *ov2740 = to_ov2740(sd);
 
 	gpiod_set_value_cansleep(ov2740->reset_gpio, 1);
+	clk_disable_unprepare(ov2740->clk);
 	return 0;
 }
 
@@ -1075,6 +1078,11 @@ static int ov2740_resume(struct device *dev)
 {
 	struct v4l2_subdev *sd = dev_get_drvdata(dev);
 	struct ov2740 *ov2740 = to_ov2740(sd);
+	int ret;
+
+	ret = clk_prepare_enable(ov2740->clk);
+	if (ret)
+		return ret;
 
 	gpiod_set_value_cansleep(ov2740->reset_gpio, 0);
 	msleep(20);
@@ -1102,6 +1110,11 @@ static int ov2740_probe(struct i2c_client *client)
 		return dev_err_probe(dev, PTR_ERR(ov2740->reset_gpio),
 				     "failed to get reset GPIO\n");
 
+	ov2740->clk = devm_clk_get_optional(dev, "clk");
+	if (IS_ERR(ov2740->clk))
+		return dev_err_probe(dev, PTR_ERR(ov2740->clk),
+				     "failed to get clock\n");
+
 	v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
 	full_power = acpi_dev_state_d0(&client->dev);
 	if (full_power) {
-- 
2.43.0

From be361f059239a5444a436a73888cc1c1665d90a7 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 4 Dec 2023 13:39:40 +0100
Subject: [PATCH 21/31] media: ov2740: Move fwnode_graph_get_next_endpoint()
 call up

If the bridge has not yet setup the fwnode-graph then
the fwnode_property_read_u32("clock-frequency") call will fail.

Move the fwnode_graph_get_next_endpoint() call to above reading
the clock-frequency.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
---
 drivers/media/i2c/ov2740.c | 26 +++++++++++++++++---------
 1 file changed, 17 insertions(+), 9 deletions(-)

diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 0396e40419ca..35b2f43bd3e5 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -926,19 +926,27 @@ static int ov2740_check_hwcfg(struct device *dev)
 	int ret;
 	unsigned int i, j;
 
-	ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
-	if (ret)
-		return ret;
-
-	if (mclk != OV2740_MCLK)
-		return dev_err_probe(dev, -EINVAL,
-				     "external clock %d is not supported\n",
-				     mclk);
-
+	/*
+	 * Sometimes the fwnode graph is initialized by the bridge driver,
+	 * wait for this.
+	 */
 	ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
 	if (!ep)
 		return -EPROBE_DEFER;
 
+	ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+	if (ret) {
+		fwnode_handle_put(ep);
+		return ret;
+	}
+
+	if (mclk != OV2740_MCLK) {
+		fwnode_handle_put(ep);
+		return dev_err_probe(dev, -EINVAL,
+				     "external clock %d is not supported\n",
+				     mclk);
+	}
+
 	ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
 	fwnode_handle_put(ep);
 	if (ret)
-- 
2.43.0

From 4d499411eccc2db42d0d21365039f479c9367214 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 4 Dec 2023 13:39:41 +0100
Subject: [PATCH 22/31] media: ov2740: Improve ov2740_check_hwcfg() error
 reporting

Make ov2740_check_hwcfg() report an error on failure in all error paths,
so that it is always clear why the probe() failed.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
---
 drivers/media/i2c/ov2740.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 35b2f43bd3e5..87176948f766 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -937,7 +937,8 @@ static int ov2740_check_hwcfg(struct device *dev)
 	ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
 	if (ret) {
 		fwnode_handle_put(ep);
-		return ret;
+		return dev_err_probe(dev, ret,
+				     "reading clock-frequency property\n");
 	}
 
 	if (mclk != OV2740_MCLK) {
@@ -950,7 +951,7 @@ static int ov2740_check_hwcfg(struct device *dev)
 	ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
 	fwnode_handle_put(ep);
 	if (ret)
-		return ret;
+		return dev_err_probe(dev, ret, "parsing endpoint failed\n");
 
 	if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV2740_DATA_LANES) {
 		ret = dev_err_probe(dev, -EINVAL,
-- 
2.43.0

From 28c5209404274ded2f2fb2c93611da32e03a2538 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 4 Dec 2023 13:39:43 +0100
Subject: [PATCH 24/31] media: ov2740: Check hwcfg after allocating the ov2740
 struct

Alloc ov2740_data and set up the drvdata pointer before calling
ov2740_check_hwcfg().

This is a preparation patch to allow ov2740_check_hwcfg()
to store some of the parsed data in the ov2740 struct.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
---
 drivers/media/i2c/ov2740.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index a646be427ab2..28f4659a6bfb 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1095,14 +1095,16 @@ static int ov2740_probe(struct i2c_client *client)
 	bool full_power;
 	int ret;
 
-	ret = ov2740_check_hwcfg(&client->dev);
-	if (ret)
-		return dev_err_probe(dev, ret, "failed to check HW configuration\n");
-
 	ov2740 = devm_kzalloc(&client->dev, sizeof(*ov2740), GFP_KERNEL);
 	if (!ov2740)
 		return -ENOMEM;
 
+	v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
+
+	ret = ov2740_check_hwcfg(dev);
+	if (ret)
+		return dev_err_probe(dev, ret, "failed to check HW configuration\n");
+
 	ov2740->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
 	if (IS_ERR(ov2740->reset_gpio))
 		return dev_err_probe(dev, PTR_ERR(ov2740->reset_gpio),
@@ -1113,7 +1115,6 @@ static int ov2740_probe(struct i2c_client *client)
 		return dev_err_probe(dev, PTR_ERR(ov2740->clk),
 				     "failed to get clock\n");
 
-	v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
 	full_power = acpi_dev_state_d0(&client->dev);
 	if (full_power) {
 		/* ACPI does not always clear the reset GPIO / enable the clock */
-- 
2.43.0

From 5e06f2a1cbbaf0ceffd62475ad4170f7224372be Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 4 Dec 2023 13:39:44 +0100
Subject: [PATCH 25/31] media: ov2740: Add support for 180 MHz link frequency

On various Lenovo Thinkpad models with an ov2740 sensor the 360 MHz
link frequency is not supported.

Add support for 180 MHz link frequency, even though this has half the
pixel clock, this supports the same framerate by using half the VTS value
(significantly reducing the amount of empty lines send during vblank).

Normally if there are multiple link-frequencies then the sensor driver
choses the lowest link-frequency still usable for the chosen resolution.

In this case the board supports only 1 link-frequency. Which frequency
is supported is checked in ov2740_check_hwcfg() and then a different
set of supported_modes (using only the supported link-freq) is selected.

The register settings for this were taken from the ov2740 sensor driver
in the out of tree IPU6 driver:

https://github.com/intel/ipu6-drivers/

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
---
 drivers/media/i2c/ov2740.c | 263 +++++++++++++++++++++++++++++++++----
 1 file changed, 239 insertions(+), 24 deletions(-)

diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 28f4659a6bfb..85629992d3aa 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -16,6 +16,7 @@
 #include <media/v4l2-fwnode.h>
 
 #define OV2740_LINK_FREQ_360MHZ		360000000ULL
+#define OV2740_LINK_FREQ_180MHZ		180000000ULL
 #define OV2740_SCLK			72000000LL
 #define OV2740_MCLK			19200000
 #define OV2740_DATA_LANES		2
@@ -30,9 +31,6 @@
 
 /* vertical-timings from sensor */
 #define OV2740_REG_VTS			0x380e
-#define OV2740_VTS_DEF			0x088a
-#define OV2740_VTS_MIN			0x0460
-#define OV2740_VTS_MAX			0x7fff
 
 /* horizontal-timings from sensor */
 #define OV2740_REG_HTS			0x380c
@@ -86,6 +84,7 @@ struct nvm_data {
 
 enum {
 	OV2740_LINK_FREQ_360MHZ_INDEX,
+	OV2740_LINK_FREQ_180MHZ_INDEX,
 };
 
 struct ov2740_reg {
@@ -118,6 +117,9 @@ struct ov2740_mode {
 	/* Min vertical timining size */
 	u32 vts_min;
 
+	/* Max vertical timining size */
+	u32 vts_max;
+
 	/* Link frequency needed for this resolution */
 	u32 link_freq_index;
 
@@ -134,7 +136,18 @@ static const struct ov2740_reg mipi_data_rate_720mbps[] = {
 	{0x0312, 0x11},
 };
 
-static const struct ov2740_reg mode_1932x1092_regs[] = {
+static const struct ov2740_reg mipi_data_rate_360mbps[] = {
+	{0x0103, 0x01},
+	{0x0302, 0x4b},
+	{0x0303, 0x01},
+	{0x030d, 0x4b},
+	{0x030e, 0x02},
+	{0x030a, 0x01},
+	{0x0312, 0x11},
+	{0x4837, 0x2c},
+};
+
+static const struct ov2740_reg mode_1932x1092_regs_360mhz[] = {
 	{0x3000, 0x00},
 	{0x3018, 0x32},
 	{0x3031, 0x0a},
@@ -287,6 +300,159 @@ static const struct ov2740_reg mode_1932x1092_regs[] = {
 	{0x3813, 0x01},
 };
 
+static const struct ov2740_reg mode_1932x1092_regs_180mhz[] = {
+	{0x3000, 0x00},
+	{0x3018, 0x32},	/* 0x32 for 2 lanes, 0x12 for 1 lane */
+	{0x3031, 0x0a},
+	{0x3080, 0x08},
+	{0x3083, 0xB4},
+	{0x3103, 0x00},
+	{0x3104, 0x01},
+	{0x3106, 0x01},
+	{0x3500, 0x00},
+	{0x3501, 0x44},
+	{0x3502, 0x40},
+	{0x3503, 0x88},
+	{0x3507, 0x00},
+	{0x3508, 0x00},
+	{0x3509, 0x80},
+	{0x350c, 0x00},
+	{0x350d, 0x80},
+	{0x3510, 0x00},
+	{0x3511, 0x00},
+	{0x3512, 0x20},
+	{0x3632, 0x00},
+	{0x3633, 0x10},
+	{0x3634, 0x10},
+	{0x3635, 0x10},
+	{0x3645, 0x13},
+	{0x3646, 0x81},
+	{0x3636, 0x10},
+	{0x3651, 0x0a},
+	{0x3656, 0x02},
+	{0x3659, 0x04},
+	{0x365a, 0xda},
+	{0x365b, 0xa2},
+	{0x365c, 0x04},
+	{0x365d, 0x1d},
+	{0x365e, 0x1a},
+	{0x3662, 0xd7},
+	{0x3667, 0x78},
+	{0x3669, 0x0a},
+	{0x366a, 0x92},
+	{0x3700, 0x54},
+	{0x3702, 0x10},
+	{0x3706, 0x42},
+	{0x3709, 0x30},
+	{0x370b, 0xc2},
+	{0x3714, 0x63},
+	{0x3715, 0x01},
+	{0x3716, 0x00},
+	{0x371a, 0x3e},
+	{0x3732, 0x0e},
+	{0x3733, 0x10},
+	{0x375f, 0x0e},
+	{0x3768, 0x30},
+	{0x3769, 0x44},
+	{0x376a, 0x22},
+	{0x377b, 0x20},
+	{0x377c, 0x00},
+	{0x377d, 0x0c},
+	{0x3798, 0x00},
+	{0x37a1, 0x55},
+	{0x37a8, 0x6d},
+	{0x37c2, 0x04},
+	{0x37c5, 0x00},
+	{0x37c8, 0x00},
+	{0x3800, 0x00},
+	{0x3801, 0x00},
+	{0x3802, 0x00},
+	{0x3803, 0x00},
+	{0x3804, 0x07},
+	{0x3805, 0x8f},
+	{0x3806, 0x04},
+	{0x3807, 0x47},
+	{0x3808, 0x07},
+	{0x3809, 0x88},
+	{0x380a, 0x04},
+	{0x380b, 0x40},
+	{0x380c, 0x08},
+	{0x380d, 0x70},
+	{0x380e, 0x04},
+	{0x380f, 0x56},
+	{0x3810, 0x00},
+	{0x3811, 0x04},
+	{0x3812, 0x00},
+	{0x3813, 0x04},
+	{0x3814, 0x01},
+	{0x3815, 0x01},
+	{0x3820, 0x80},
+	{0x3821, 0x46},
+	{0x3822, 0x84},
+	{0x3829, 0x00},
+	{0x382a, 0x01},
+	{0x382b, 0x01},
+	{0x3830, 0x04},
+	{0x3836, 0x01},
+	{0x3837, 0x08},
+	{0x3839, 0x01},
+	{0x383a, 0x00},
+	{0x383b, 0x08},
+	{0x383c, 0x00},
+	{0x3f0b, 0x00},
+	{0x4001, 0x20},
+	{0x4009, 0x07},
+	{0x4003, 0x10},
+	{0x4010, 0xe0},
+	{0x4016, 0x00},
+	{0x4017, 0x10},
+	{0x4044, 0x02},
+	{0x4304, 0x08},
+	{0x4307, 0x30},
+	{0x4320, 0x80},
+	{0x4322, 0x00},
+	{0x4323, 0x00},
+	{0x4324, 0x00},
+	{0x4325, 0x00},
+	{0x4326, 0x00},
+	{0x4327, 0x00},
+	{0x4328, 0x00},
+	{0x4329, 0x00},
+	{0x432c, 0x03},
+	{0x432d, 0x81},
+	{0x4501, 0x84},
+	{0x4502, 0x40},
+	{0x4503, 0x18},
+	{0x4504, 0x04},
+	{0x4508, 0x02},
+	{0x4601, 0x10},
+	{0x4800, 0x00},
+	{0x4816, 0x52},
+	{0x5000, 0x73},	/* 0x7f enable DPC */
+	{0x5001, 0x00},
+	{0x5005, 0x38},
+	{0x501e, 0x0d},
+	{0x5040, 0x00},
+	{0x5901, 0x00},
+	{0x3800, 0x00},
+	{0x3801, 0x00},
+	{0x3802, 0x00},
+	{0x3803, 0x00},
+	{0x3804, 0x07},
+	{0x3805, 0x8f},
+	{0x3806, 0x04},
+	{0x3807, 0x47},
+	{0x3808, 0x07},
+	{0x3809, 0x8c},
+	{0x380a, 0x04},
+	{0x380b, 0x44},
+	{0x3810, 0x00},
+	{0x3811, 0x00},
+	{0x3812, 0x00},
+	{0x3813, 0x01},
+	{0x4003, 0x40},	/* set Black level to 0x40 */
+};
+
 static const char * const ov2740_test_pattern_menu[] = {
 	"Disabled",
 	"Color Bar",
@@ -297,6 +463,7 @@ static const char * const ov2740_test_pattern_menu[] = {
 
 static const s64 link_freq_menu_items[] = {
 	OV2740_LINK_FREQ_360MHZ,
+	OV2740_LINK_FREQ_180MHZ,
 };
 
 static const struct ov2740_link_freq_config link_freq_configs[] = {
@@ -306,23 +473,46 @@ static const struct ov2740_link_freq_config link_freq_configs[] = {
 			.regs = mipi_data_rate_720mbps,
 		}
 	},
+	[OV2740_LINK_FREQ_180MHZ_INDEX] = {
+		.reg_list = {
+			.num_of_regs = ARRAY_SIZE(mipi_data_rate_360mbps),
+			.regs = mipi_data_rate_360mbps,
+		}
+	},
 };
 
-static const struct ov2740_mode supported_modes[] = {
+static const struct ov2740_mode supported_modes_360mhz[] = {
 	{
 		.width = 1932,
 		.height = 1092,
 		.hts = 2160,
-		.vts_def = OV2740_VTS_DEF,
-		.vts_min = OV2740_VTS_MIN,
+		.vts_min = 1120,
+		.vts_def = 2186,
+		.vts_max = 32767,
 		.reg_list = {
-			.num_of_regs = ARRAY_SIZE(mode_1932x1092_regs),
-			.regs = mode_1932x1092_regs,
+			.num_of_regs = ARRAY_SIZE(mode_1932x1092_regs_360mhz),
+			.regs = mode_1932x1092_regs_360mhz,
 		},
 		.link_freq_index = OV2740_LINK_FREQ_360MHZ_INDEX,
 	},
 };
 
+static const struct ov2740_mode supported_modes_180mhz[] = {
+	{
+		.width = 1932,
+		.height = 1092,
+		.hts = 2160,
+		.vts_min = 1110,
+		.vts_def = 1110,
+		.vts_max = 2047,
+		.reg_list = {
+			.num_of_regs = ARRAY_SIZE(mode_1932x1092_regs_180mhz),
+			.regs = mode_1932x1092_regs_180mhz,
+		},
+		.link_freq_index = OV2740_LINK_FREQ_180MHZ_INDEX,
+	},
+};
+
 struct ov2740 {
 	struct v4l2_subdev sd;
 	struct media_pad pad;
@@ -345,6 +535,10 @@ struct ov2740 {
 	/* NVM data inforamtion */
 	struct nvm_data *nvm;
 
+	/* Supported modes */
+	const struct ov2740_mode *supported_modes;
+	int supported_modes_count;
+
 	/* True if the device has been identified */
 	bool identified;
 };
@@ -589,7 +783,7 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
 					       pixel_rate, 1, pixel_rate);
 
 	vblank_min = cur_mode->vts_min - cur_mode->height;
-	vblank_max = OV2740_VTS_MAX - cur_mode->height;
+	vblank_max = cur_mode->vts_max - cur_mode->height;
 	vblank_default = cur_mode->vts_def - cur_mode->height;
 	ov2740->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
 					   V4L2_CID_VBLANK, vblank_min,
@@ -816,10 +1010,10 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
 	const struct ov2740_mode *mode;
 	s32 vblank_def, h_blank;
 
-	mode = v4l2_find_nearest_size(supported_modes,
-				      ARRAY_SIZE(supported_modes), width,
-				      height, fmt->format.width,
-				      fmt->format.height);
+	mode = v4l2_find_nearest_size(ov2740->supported_modes,
+				      ov2740->supported_modes_count,
+				      width, height,
+				      fmt->format.width, fmt->format.height);
 
 	ov2740_update_pad_format(mode, &fmt->format);
 	*v4l2_subdev_get_pad_format(sd, sd_state, fmt->pad) = fmt->format;
@@ -836,7 +1030,7 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
 	vblank_def = mode->vts_def - mode->height;
 	__v4l2_ctrl_modify_range(ov2740->vblank,
 				 mode->vts_min - mode->height,
-				 OV2740_VTS_MAX - mode->height, 1, vblank_def);
+				 mode->vts_max - mode->height, 1, vblank_def);
 	__v4l2_ctrl_s_ctrl(ov2740->vblank, vblank_def);
 	h_blank = mode->hts - mode->width;
 	__v4l2_ctrl_modify_range(ov2740->hblank, h_blank, h_blank, 1, h_blank);
@@ -860,7 +1054,10 @@ static int ov2740_enum_frame_size(struct v4l2_subdev *sd,
 				  struct v4l2_subdev_state *sd_state,
 				  struct v4l2_subdev_frame_size_enum *fse)
 {
-	if (fse->index >= ARRAY_SIZE(supported_modes))
+	struct ov2740 *ov2740 = to_ov2740(sd);
+	const struct ov2740_mode *supported_modes = ov2740->supported_modes;
+
+	if (fse->index >= ov2740->supported_modes_count)
 		return -EINVAL;
 
 	if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
@@ -877,9 +1074,10 @@ static int ov2740_enum_frame_size(struct v4l2_subdev *sd,
 static int ov2740_init_cfg(struct v4l2_subdev *sd,
 			   struct v4l2_subdev_state *sd_state)
 {
-	ov2740_update_pad_format(&supported_modes[0],
-				 v4l2_subdev_get_pad_format(sd, sd_state, 0));
+	struct ov2740 *ov2740 = to_ov2740(sd);
 
+	ov2740_update_pad_format(&ov2740->supported_modes[0],
+				 v4l2_subdev_get_pad_format(sd, sd_state, 0));
 	return 0;
 }
 
@@ -906,6 +1104,8 @@ static const struct media_entity_operations ov2740_subdev_entity_ops = {
 
 static int ov2740_check_hwcfg(struct device *dev)
 {
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct ov2740 *ov2740 = to_ov2740(sd);
 	struct fwnode_handle *ep;
 	struct fwnode_handle *fwnode = dev_fwnode(dev);
 	struct v4l2_fwnode_endpoint bus_cfg = {
@@ -961,14 +1161,29 @@ static int ov2740_check_hwcfg(struct device *dev)
 				break;
 		}
 
-		if (j == bus_cfg.nr_of_link_frequencies) {
-			ret = dev_err_probe(dev, -EINVAL,
-					    "no link frequency %lld supported\n",
-					    link_freq_menu_items[i]);
-			goto check_hwcfg_error;
+		if (j == bus_cfg.nr_of_link_frequencies)
+			continue;
+
+		switch (i) {
+		case OV2740_LINK_FREQ_360MHZ_INDEX:
+			ov2740->supported_modes = supported_modes_360mhz;
+			ov2740->supported_modes_count =
+				ARRAY_SIZE(supported_modes_360mhz);
+			break;
+		case OV2740_LINK_FREQ_180MHZ_INDEX:
+			ov2740->supported_modes = supported_modes_180mhz;
+			ov2740->supported_modes_count =
+				ARRAY_SIZE(supported_modes_180mhz);
+			break;
 		}
+
+		break; /* Prefer modes from first available link-freq */
 	}
 
+	if (!ov2740->supported_modes)
+		ret = dev_err_probe(dev, -EINVAL,
+				    "no supported link frequencies\n");
+
 check_hwcfg_error:
 	v4l2_fwnode_endpoint_free(&bus_cfg);
 
@@ -1129,7 +1344,7 @@ static int ov2740_probe(struct i2c_client *client)
 		}
 	}
 
-	ov2740->cur_mode = &supported_modes[0];
+	ov2740->cur_mode = &ov2740->supported_modes[0];
 	ret = ov2740_init_controls(ov2740);
 	if (ret) {
 		dev_err_probe(dev, ret, "failed to init controls\n");
-- 
2.43.0

From b8c88a81135741a39f56d63673bfd2d2f5f12b3a Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 4 Dec 2023 13:39:45 +0100
Subject: [PATCH 26/31] media: ov2740: Add a sleep after resetting the sensor

Split the resetting of the sensor out of the link_freq_config reg_list
and add a delay after this.

This hopefully fixes the stream sometimes not starting, this was
taken from the ov2740 sensor driver in the out of tree IPU6 driver:

https://github.com/intel/ipu6-drivers/

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
---
 drivers/media/i2c/ov2740.c | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 85629992d3aa..9666f6e293d9 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -128,7 +128,6 @@ struct ov2740_mode {
 };
 
 static const struct ov2740_reg mipi_data_rate_720mbps[] = {
-	{0x0103, 0x01},
 	{0x0302, 0x4b},
 	{0x030d, 0x4b},
 	{0x030e, 0x02},
@@ -137,7 +136,6 @@ static const struct ov2740_reg mipi_data_rate_720mbps[] = {
 };
 
 static const struct ov2740_reg mipi_data_rate_360mbps[] = {
-	{0x0103, 0x01},
 	{0x0302, 0x4b},
 	{0x0303, 0x01},
 	{0x030d, 0x4b},
@@ -935,6 +933,15 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
 	if (ov2740->nvm)
 		ov2740_load_otp_data(ov2740->nvm);
 
+	/* Reset the sensor */
+	ret = ov2740_write_reg(ov2740, 0x0103, 1, 0x01);
+	if (ret) {
+		dev_err(&client->dev, "failed to reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 15000);
+
 	link_freq_index = ov2740->cur_mode->link_freq_index;
 	reg_list = &link_freq_configs[link_freq_index].reg_list;
 	ret = ov2740_write_reg_list(ov2740, reg_list);
-- 
2.43.0

From e807d266be092024e9963ffb6c7b9ace1153ec15 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Mon, 4 Dec 2023 13:39:46 +0100
Subject: [PATCH 27/31] media: ipu-bridge: Change ov2740 link-frequency to 180
 MHz

The only known devices that use an ov2740 sensor in combination with
the ipu-bridge code are various Lenovo ThinkPad models, which all
need the link-frequency to be 180 MHz for things to work properly.

The ov2740 driver used to only support 360 MHz link-frequency,
which is why the ipu-bridge entry used 360 MHz, but now the
ov2740 driver has been extended to also support 180 MHz.

The ov2740 is actually used with 360 MHz link-frequency on Chromebooks.
On Chromebooks the camera/sensor fwnode graph is part of the ACPI tables.
The ipu-bridge code is used to dynamically generate the graph when it is
missing, so it is not used on Chromebooks and the ov2740 will keep using
360 MHz link-frequency there as before.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
---
 drivers/media/pci/intel/ipu-bridge.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index e38198e259c0..f980e3125a7b 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -53,7 +53,7 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
 	/* Omnivision ov8856 */
 	IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
 	/* Omnivision ov2740 */
-	IPU_SENSOR_CONFIG("INT3474", 1, 360000000),
+	IPU_SENSOR_CONFIG("INT3474", 1, 180000000),
 	/* Hynix hi556 */
 	IPU_SENSOR_CONFIG("INT3537", 1, 437000000),
 	/* Omnivision ov13b10 */
-- 
2.43.0

From d1c04b1284f0ce4d32ea73f6a325e08b31b2a323 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Tue, 23 Jan 2024 14:58:35 +0100
Subject: [PATCH 28/31] media: hi556: Return -EPROBE_DEFER if no endpoint is
 found

With ipu bridge, endpoints may only be created when ipu bridge has
initialised. This may happen after the sensor driver has first probed.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/i2c/hi556.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index f6ea9b7b9700..258614b33f51 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -1207,8 +1207,13 @@ static int hi556_check_hwcfg(struct device *dev)
 	int ret = 0;
 	unsigned int i, j;
 
-	if (!fwnode)
-		return -ENXIO;
+	/*
+	 * Sometimes the fwnode graph is initialized by the bridge driver,
+	 * wait for this.
+	 */
+	ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+	if (!ep)
+		return -EPROBE_DEFER;
 
 	ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
 	if (ret) {
@@ -1221,10 +1226,6 @@ static int hi556_check_hwcfg(struct device *dev)
 		return -EINVAL;
 	}
 
-	ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
-	if (!ep)
-		return -ENXIO;
-
 	ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
 	fwnode_handle_put(ep);
 	if (ret)
-- 
2.43.0

From 109e64438f05a0cd791e5d84084672b2fcf12cf2 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Tue, 23 Jan 2024 14:48:26 +0100
Subject: [PATCH 29/31] media: hi556: Add support for reset GPIO

On some ACPI platforms, such as Chromebooks the ACPI methods to
change the power-state (_PS0 and _PS3) fully take care of powering
on/off the sensor.

On other ACPI platforms, such as e.g. various HP models with IPU6 +
hi556 sensor, the sensor driver must control the reset GPIO itself.

Add support for having the driver control an optional reset GPIO.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/i2c/hi556.c | 45 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 44 insertions(+), 1 deletion(-)

diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 258614b33f51..7398391989ea 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -4,6 +4,7 @@
 #include <asm/unaligned.h>
 #include <linux/acpi.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
@@ -633,6 +634,9 @@ struct hi556 {
 	struct v4l2_ctrl *hblank;
 	struct v4l2_ctrl *exposure;
 
+	/* GPIOs, clocks, etc. */
+	struct gpio_desc *reset_gpio;
+
 	/* Current mode */
 	const struct hi556_mode *cur_mode;
 
@@ -1277,6 +1281,25 @@ static void hi556_remove(struct i2c_client *client)
 	mutex_destroy(&hi556->mutex);
 }
 
+static int hi556_suspend(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct hi556 *hi556 = to_hi556(sd);
+
+	gpiod_set_value_cansleep(hi556->reset_gpio, 1);
+	return 0;
+}
+
+static int hi556_resume(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct hi556 *hi556 = to_hi556(sd);
+
+	gpiod_set_value_cansleep(hi556->reset_gpio, 0);
+	usleep_range(5000, 5500);
+	return 0;
+}
+
 static int hi556_probe(struct i2c_client *client)
 {
 	struct hi556 *hi556;
@@ -1296,12 +1319,24 @@ static int hi556_probe(struct i2c_client *client)
 
 	v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
 
+	hi556->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+						    GPIOD_OUT_HIGH);
+	if (IS_ERR(hi556->reset_gpio))
+		return dev_err_probe(&client->dev, PTR_ERR(hi556->reset_gpio),
+				     "failed to get reset GPIO\n");
+
 	full_power = acpi_dev_state_d0(&client->dev);
 	if (full_power) {
+		/* Ensure non ACPI managed resources are enabled */
+		ret = hi556_resume(&client->dev);
+		if (ret)
+			return dev_err_probe(&client->dev, ret,
+					     "failed to power on sensor\n");
+
 		ret = hi556_identify_module(hi556);
 		if (ret) {
 			dev_err(&client->dev, "failed to find sensor: %d", ret);
-			return ret;
+			goto probe_error_power_off;
 		}
 	}
 
@@ -1346,9 +1381,16 @@ static int hi556_probe(struct i2c_client *client)
 	v4l2_ctrl_handler_free(hi556->sd.ctrl_handler);
 	mutex_destroy(&hi556->mutex);
 
+probe_error_power_off:
+	if (full_power)
+		hi556_suspend(&client->dev);
+
 	return ret;
 }
 
+static DEFINE_RUNTIME_DEV_PM_OPS(hi556_pm_ops, hi556_suspend, hi556_resume,
+				 NULL);
+
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id hi556_acpi_ids[] = {
 	{"INT3537"},
@@ -1362,6 +1404,7 @@ static struct i2c_driver hi556_i2c_driver = {
 	.driver = {
 		.name = "hi556",
 		.acpi_match_table = ACPI_PTR(hi556_acpi_ids),
+		.pm = pm_sleep_ptr(&hi556_pm_ops),
 	},
 	.probe = hi556_probe,
 	.remove = hi556_remove,
-- 
2.43.0

From 23a74772614fcba8ad2ed9370db613ab0540072e Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Tue, 23 Jan 2024 14:54:22 +0100
Subject: [PATCH 30/31] media: hi556: Add support for external clock

On some ACPI platforms, such as Chromebooks the ACPI methods to
change the power-state (_PS0 and _PS3) fully take care of powering
on/off the sensor.

On other ACPI platforms, such as e.g. various HP models with IPU6 +
hi556 sensor, the sensor driver must control the sensor's clock itself.

Add support for having the driver control an optional clock.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/i2c/hi556.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 7398391989ea..fb6ba6984e38 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -3,6 +3,7 @@
 
 #include <asm/unaligned.h>
 #include <linux/acpi.h>
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
@@ -636,6 +637,7 @@ struct hi556 {
 
 	/* GPIOs, clocks, etc. */
 	struct gpio_desc *reset_gpio;
+	struct clk *clk;
 
 	/* Current mode */
 	const struct hi556_mode *cur_mode;
@@ -1287,6 +1289,7 @@ static int hi556_suspend(struct device *dev)
 	struct hi556 *hi556 = to_hi556(sd);
 
 	gpiod_set_value_cansleep(hi556->reset_gpio, 1);
+	clk_disable_unprepare(hi556->clk);
 	return 0;
 }
 
@@ -1294,6 +1297,11 @@ static int hi556_resume(struct device *dev)
 {
 	struct v4l2_subdev *sd = dev_get_drvdata(dev);
 	struct hi556 *hi556 = to_hi556(sd);
+	int ret;
+
+	ret = clk_prepare_enable(hi556->clk);
+	if (ret)
+		return ret;
 
 	gpiod_set_value_cansleep(hi556->reset_gpio, 0);
 	usleep_range(5000, 5500);
@@ -1325,6 +1333,11 @@ static int hi556_probe(struct i2c_client *client)
 		return dev_err_probe(&client->dev, PTR_ERR(hi556->reset_gpio),
 				     "failed to get reset GPIO\n");
 
+	hi556->clk = devm_clk_get_optional(&client->dev, "clk");
+	if (IS_ERR(hi556->clk))
+		return dev_err_probe(&client->dev, PTR_ERR(hi556->clk),
+				     "failed to get clock\n");
+
 	full_power = acpi_dev_state_d0(&client->dev);
 	if (full_power) {
 		/* Ensure non ACPI managed resources are enabled */
-- 
2.43.0

From 6c3b454ee3ab07dd2e10011d685f6bd9f03bee71 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Wed, 24 Jan 2024 18:45:02 +0100
Subject: [PATCH 31/31] media: hi556: Add support for avdd regulator

On some ACPI platforms, such as Chromebooks the ACPI methods to
change the power-state (_PS0 and _PS3) fully take care of powering
on/off the sensor.

On other ACPI platforms, such as e.g. various HP models with IPU6 +
hi556 sensor, the sensor driver must control the avdd regulator itself.

Add support for having the driver control the sensor's avdd regulator.
Note this relies on the regulator-core providing a dummy regulator
(which it does by default) on platforms where Linux is not aware of
the avdd regulator.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/media/i2c/hi556.c | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index fb6ba6984e38..90eff282a6e2 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -9,6 +9,7 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-fwnode.h>
@@ -638,6 +639,7 @@ struct hi556 {
 	/* GPIOs, clocks, etc. */
 	struct gpio_desc *reset_gpio;
 	struct clk *clk;
+	struct regulator *avdd;
 
 	/* Current mode */
 	const struct hi556_mode *cur_mode;
@@ -1287,8 +1289,17 @@ static int hi556_suspend(struct device *dev)
 {
 	struct v4l2_subdev *sd = dev_get_drvdata(dev);
 	struct hi556 *hi556 = to_hi556(sd);
+	int ret;
 
 	gpiod_set_value_cansleep(hi556->reset_gpio, 1);
+
+	ret = regulator_disable(hi556->avdd);
+	if (ret) {
+		dev_err(dev, "failed to disable avdd: %d\n", ret);
+		gpiod_set_value_cansleep(hi556->reset_gpio, 0);
+		return ret;
+	}
+
 	clk_disable_unprepare(hi556->clk);
 	return 0;
 }
@@ -1303,6 +1314,13 @@ static int hi556_resume(struct device *dev)
 	if (ret)
 		return ret;
 
+	ret = regulator_enable(hi556->avdd);
+	if (ret) {
+		dev_err(dev, "failed to enable avdd: %d\n", ret);
+		clk_disable_unprepare(hi556->clk);
+		return ret;
+	}
+
 	gpiod_set_value_cansleep(hi556->reset_gpio, 0);
 	usleep_range(5000, 5500);
 	return 0;
@@ -1338,6 +1356,12 @@ static int hi556_probe(struct i2c_client *client)
 		return dev_err_probe(&client->dev, PTR_ERR(hi556->clk),
 				     "failed to get clock\n");
 
+	/* The regulator core will provide a "dummy" regulator if necessary */
+	hi556->avdd = devm_regulator_get(&client->dev, "avdd");
+	if (IS_ERR(hi556->avdd))
+		return dev_err_probe(&client->dev, PTR_ERR(hi556->avdd),
+				     "failed to get avdd regulator\n");
+
 	full_power = acpi_dev_state_d0(&client->dev);
 	if (full_power) {
 		/* Ensure non ACPI managed resources are enabled */
-- 
2.43.0