aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan200101 <sentrycraft123@gmail.com>2023-10-28 15:28:59 +0200
committerJan200101 <sentrycraft123@gmail.com>2023-10-28 15:28:59 +0200
commit2a7bf10872e046e007f851a5314d2432389dc0c8 (patch)
tree8ca5b90e1d6bb9b94fc5f1371b4df4b209a60479
parent6ffe0fb37ea4136ee9252bcce5ab3d1c1e235378 (diff)
downloadkernel-fsync-2a7bf10872e046e007f851a5314d2432389dc0c8.tar.gz
kernel-fsync-2a7bf10872e046e007f851a5314d2432389dc0c8.zip
kernel 6.5.8
-rw-r--r--SOURCES/linux-surface.patch1760
-rw-r--r--SOURCES/patch-6.5-redhat.patch6
-rw-r--r--SPECS/kernel.spec15
3 files changed, 1677 insertions, 104 deletions
diff --git a/SOURCES/linux-surface.patch b/SOURCES/linux-surface.patch
index b209613..ac6bcf5 100644
--- a/SOURCES/linux-surface.patch
+++ b/SOURCES/linux-surface.patch
@@ -1,4 +1,4 @@
-From d83d6478931989d4a211372e92c44b4020be48de Mon Sep 17 00:00:00 2001
+From 38f9bee60e9c7c742358e862c6c9422964f1d41a Mon Sep 17 00:00:00 2001
From: Tsuchiya Yuto <kitakar@gmail.com>
Date: Sun, 18 Oct 2020 16:42:44 +0900
Subject: [PATCH] (surface3-oemb) add DMI matches for Surface 3 with broken DMI
@@ -99,7 +99,7 @@ index cdcbf04b8832..958305779b12 100644
--
2.42.0
-From 8f3df38a8fb044dc63d15d1aa7eeec13a0ec4cfd Mon Sep 17 00:00:00 2001
+From b8dec23a399dc5deb88ac30d71dd6270c1794ba0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jonas=20Dre=C3=9Fler?= <verdre@v0yd.nl>
Date: Tue, 3 Nov 2020 13:28:04 +0100
Subject: [PATCH] mwifiex: Add quirk resetting the PCI bridge on MS Surface
@@ -266,7 +266,7 @@ index d6ff964aec5b..5d30ae39d65e 100644
--
2.42.0
-From 15d63e166f619c8a5baf139e376fe2285f4c2ff6 Mon Sep 17 00:00:00 2001
+From 44360e255e0cfebd6e0584e75e13c2cc69c7d41a Mon Sep 17 00:00:00 2001
From: Tsuchiya Yuto <kitakar@gmail.com>
Date: Sun, 4 Oct 2020 00:11:49 +0900
Subject: [PATCH] mwifiex: pcie: disable bridge_d3 for Surface gen4+
@@ -421,7 +421,7 @@ index 5d30ae39d65e..c14eb56eb911 100644
--
2.42.0
-From d91266ba1b8afcdcc2a628e9f1bb400c86286bd8 Mon Sep 17 00:00:00 2001
+From 9203ef88e0067699c993d4715c48ff9deaea92be Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jonas=20Dre=C3=9Fler?= <verdre@v0yd.nl>
Date: Thu, 25 Mar 2021 11:33:02 +0100
Subject: [PATCH] Bluetooth: btusb: Lower passive lescan interval on Marvell
@@ -499,7 +499,7 @@ index dfdfb72d350f..44ef02efba46 100644
--
2.42.0
-From a77ffb3a1080facd893088070bf6e5474fd9ee58 Mon Sep 17 00:00:00 2001
+From 4ec07cef92d6b7ce42ccc5e1b0e73678cf023dcb Mon Sep 17 00:00:00 2001
From: Maximilian Luz <luzmaximilian@gmail.com>
Date: Sat, 27 Feb 2021 00:45:52 +0100
Subject: [PATCH] ath10k: Add module parameters to override board files
@@ -620,7 +620,7 @@ index 6cdb225b7eac..19c036751fb1 100644
--
2.42.0
-From 45e0d4511653ae109942a4250ef05c2f3661ce18 Mon Sep 17 00:00:00 2001
+From 6e7929685df7d87379ad03942e364d7e22122624 Mon Sep 17 00:00:00 2001
From: Dorian Stoll <dorian.stoll@tmsp.io>
Date: Thu, 30 Jul 2020 13:21:53 +0200
Subject: [PATCH] misc: mei: Add missing IPTS device IDs
@@ -658,7 +658,7 @@ index 676d566f38dd..6b37dd1f8b2a 100644
--
2.42.0
-From 3a7f8d3d21d3b841607e8af4437b16fd677f5679 Mon Sep 17 00:00:00 2001
+From 390bee34de9e6382d9a0e0af9e515cb1f114b210 Mon Sep 17 00:00:00 2001
From: Liban Hannan <liban.p@gmail.com>
Date: Tue, 12 Apr 2022 23:31:12 +0100
Subject: [PATCH] iommu: ipts: use IOMMU passthrough mode for IPTS
@@ -680,7 +680,7 @@ Patchset: ipts
1 file changed, 24 insertions(+)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
-index 5c8c5cdc36cf..fc4799415c3c 100644
+index 4a9d9e82847d..6387f3a6eccf 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -37,6 +37,8 @@
@@ -727,7 +727,7 @@ index 5c8c5cdc36cf..fc4799415c3c 100644
check_tylersburg_isoch();
ret = si_domain_init(hw_pass_through);
-@@ -4771,6 +4781,17 @@ static void quirk_iommu_igfx(struct pci_dev *dev)
+@@ -4755,6 +4765,17 @@ static void quirk_iommu_igfx(struct pci_dev *dev)
dmar_map_gfx = 0;
}
@@ -745,7 +745,7 @@ index 5c8c5cdc36cf..fc4799415c3c 100644
/* G4x/GM45 integrated gfx dmar support is totally busted. */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
-@@ -4806,6 +4827,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
+@@ -4790,6 +4811,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
@@ -758,7 +758,7 @@ index 5c8c5cdc36cf..fc4799415c3c 100644
--
2.42.0
-From c504c6923f2b5239436f990f443e0af9d7462c4a Mon Sep 17 00:00:00 2001
+From 13b6ad2c284cf321144851a527dda96e6d4b9064 Mon Sep 17 00:00:00 2001
From: Dorian Stoll <dorian.stoll@tmsp.io>
Date: Sun, 11 Dec 2022 12:00:59 +0100
Subject: [PATCH] hid: Add support for Intel Precise Touch and Stylus
@@ -825,10 +825,10 @@ Patchset: ipts
create mode 100644 drivers/hid/ipts/thread.h
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
-index e11c1c803676..54f45bdf663e 100644
+index dc456c86e956..b35203b9a7d8 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
-@@ -1334,4 +1334,6 @@ source "drivers/hid/amd-sfh-hid/Kconfig"
+@@ -1335,4 +1335,6 @@ source "drivers/hid/amd-sfh-hid/Kconfig"
source "drivers/hid/surface-hid/Kconfig"
@@ -3850,7 +3850,7 @@ index 000000000000..1f966b8b32c4
--
2.42.0
-From 15bcb09157266ad8e90e5ee5f954a9760ac40e4f Mon Sep 17 00:00:00 2001
+From cc8157a9538ba31fb72482b9fa52803241f0887d Mon Sep 17 00:00:00 2001
From: Dorian Stoll <dorian.stoll@tmsp.io>
Date: Sun, 11 Dec 2022 12:03:38 +0100
Subject: [PATCH] iommu: intel: Disable source id verification for ITHC
@@ -3891,7 +3891,7 @@ index 08f56326e2f8..75218b38995c 100644
--
2.42.0
-From 4d815045b207589057c1dfc6eba542d232e9654b Mon Sep 17 00:00:00 2001
+From b835496e4ba0bc567b8e83fe1fcb5c4da34c1289 Mon Sep 17 00:00:00 2001
From: Dorian Stoll <dorian.stoll@tmsp.io>
Date: Sun, 11 Dec 2022 12:10:54 +0100
Subject: [PATCH] hid: Add support for Intel Touch Host Controller
@@ -3924,10 +3924,10 @@ Patchset: ithc
create mode 100644 drivers/hid/ithc/ithc.h
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
-index 54f45bdf663e..0b58bd30a22b 100644
+index b35203b9a7d8..3259f2764dc4 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
-@@ -1336,4 +1336,6 @@ source "drivers/hid/surface-hid/Kconfig"
+@@ -1337,4 +1337,6 @@ source "drivers/hid/surface-hid/Kconfig"
source "drivers/hid/ipts/Kconfig"
@@ -5283,61 +5283,1631 @@ index 000000000000..6a9b0d480bc1
--
2.42.0
-From 89b8b8c121fe201862929709d451fecb24947676 Mon Sep 17 00:00:00 2001
-From: Tony Lindgren <tony@atomide.com>
-Date: Thu, 5 Oct 2023 10:56:42 +0300
-Subject: [PATCH] serial: core: Fix checks for tx runtime PM state
-
-Maximilian reported that surface_serial_hub serdev tx does not work during
-system suspend. During system suspend, runtime PM gets disabled in
-__device_suspend_late(), and tx is unable to wake-up the serial core port
-device that we use to check if tx is safe to start. Johan summarized the
-regression noting that serdev tx no longer always works as earlier when the
-serdev device is runtime PM active.
-
-The serdev device and the serial core controller devices are siblings of
-the serial port hardware device. The runtime PM usage count from serdev
-device does not propagate to the serial core device siblings, it only
-propagates to the parent.
-
-In addition to the tx issue for suspend, testing for the serial core port
-device can cause an unnecessary delay in enabling tx while waiting for the
-serial core port device to wake-up. The serial core port device wake-up is
-only needed to flush pending tx when the serial port hardware device was
-in runtime PM suspended state.
-
-To fix the regression, we need to check the runtime PM state of the parent
-serial port hardware device for tx instead of the serial core port device.
-
-As the serial port device drivers may or may not implement runtime PM, we
-need to also add a check for pm_runtime_enabled().
-
-Reported-by: Maximilian Luz <luzmaximilian@gmail.com>
-Fixes: 84a9582fd203 ("serial: core: Start managing serial controllers to enable runtime PM")
-Signed-off-by: Tony Lindgren <tony@atomide.com>
-Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+From 7edaa4190fe42f701948b6320b82dce4bf0e0a42 Mon Sep 17 00:00:00 2001
+From: quo <tuple@list.ru>
+Date: Mon, 23 Oct 2023 10:15:29 +0200
+Subject: [PATCH] Update ITHC from module repo
+
+Changes:
+ - Added some comments and fixed a few checkpatch warnings
+ - Improved CPU latency QoS handling
+ - Retry reading the report descriptor on error / timeout
+
+Based on https://github.com/quo/ithc-linux/commit/0b8b45d9775e756d6bd3a699bfaf9f5bd7b9b10b
+
+Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
+Patchset: ithc
+---
+ drivers/hid/ithc/ithc-debug.c | 94 +++++---
+ drivers/hid/ithc/ithc-dma.c | 231 +++++++++++++-----
+ drivers/hid/ithc/ithc-dma.h | 4 +-
+ drivers/hid/ithc/ithc-main.c | 430 ++++++++++++++++++++++++----------
+ drivers/hid/ithc/ithc-regs.c | 68 ++++--
+ drivers/hid/ithc/ithc-regs.h | 19 +-
+ drivers/hid/ithc/ithc.h | 13 +-
+ 7 files changed, 623 insertions(+), 236 deletions(-)
+
+diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c
+index 57bf125c45bd..1f1f1e33f2e5 100644
+--- a/drivers/hid/ithc/ithc-debug.c
++++ b/drivers/hid/ithc/ithc-debug.c
+@@ -1,10 +1,14 @@
++// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
++
+ #include "ithc.h"
+
+-void ithc_log_regs(struct ithc *ithc) {
+- if (!ithc->prev_regs) return;
+- u32 __iomem *cur = (__iomem void*)ithc->regs;
+- u32 *prev = (void*)ithc->prev_regs;
+- for (int i = 1024; i < sizeof *ithc->regs / 4; i++) {
++void ithc_log_regs(struct ithc *ithc)
++{
++ if (!ithc->prev_regs)
++ return;
++ u32 __iomem *cur = (__iomem void *)ithc->regs;
++ u32 *prev = (void *)ithc->prev_regs;
++ for (int i = 1024; i < sizeof(*ithc->regs) / 4; i++) {
+ u32 x = readl(cur + i);
+ if (x != prev[i]) {
+ pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x);
+@@ -13,55 +17,79 @@ void ithc_log_regs(struct ithc *ithc) {
+ }
+ }
+
+-static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, loff_t *offset) {
++static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len,
++ loff_t *offset)
++{
++ // Debug commands consist of a single letter followed by a list of numbers (decimal or
++ // hexadecimal, space-separated).
+ struct ithc *ithc = file_inode(f)->i_private;
+ char cmd[256];
+- if (!ithc || !ithc->pci) return -ENODEV;
+- if (!len) return -EINVAL;
+- if (len >= sizeof cmd) return -EINVAL;
+- if (copy_from_user(cmd, buf, len)) return -EFAULT;
++ if (!ithc || !ithc->pci)
++ return -ENODEV;
++ if (!len)
++ return -EINVAL;
++ if (len >= sizeof(cmd))
++ return -EINVAL;
++ if (copy_from_user(cmd, buf, len))
++ return -EFAULT;
+ cmd[len] = 0;
+- if (cmd[len-1] == '\n') cmd[len-1] = 0;
++ if (cmd[len-1] == '\n')
++ cmd[len-1] = 0;
+ pci_info(ithc->pci, "debug command: %s\n", cmd);
++
++ // Parse the list of arguments into a u32 array.
+ u32 n = 0;
+ const char *s = cmd + 1;
+ u32 a[32];
+ while (*s && *s != '\n') {
+- if (n >= ARRAY_SIZE(a)) return -EINVAL;
+- if (*s++ != ' ') return -EINVAL;
++ if (n >= ARRAY_SIZE(a))
++ return -EINVAL;
++ if (*s++ != ' ')
++ return -EINVAL;
+ char *e;
+ a[n++] = simple_strtoul(s, &e, 0);
+- if (e == s) return -EINVAL;
++ if (e == s)
++ return -EINVAL;
+ s = e;
+ }
+ ithc_log_regs(ithc);
+- switch(cmd[0]) {
++
++ // Execute the command.
++ switch (cmd[0]) {
+ case 'x': // reset
+ ithc_reset(ithc);
+ break;
+ case 'w': // write register: offset mask value
+- if (n != 3 || (a[0] & 3)) return -EINVAL;
+- pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", a[0], a[2], a[1]);
++ if (n != 3 || (a[0] & 3))
++ return -EINVAL;
++ pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n",
++ a[0], a[2], a[1]);
+ bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]);
+ break;
+ case 'r': // read register: offset
+- if (n != 1 || (a[0] & 3)) return -EINVAL;
+- pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
++ if (n != 1 || (a[0] & 3))
++ return -EINVAL;
++ pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0],
++ readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
+ break;
+ case 's': // spi command: cmd offset len data...
+ // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ // set touch cfg: s 6 12 4 XX
+- if (n < 3 || a[2] > (n - 3) * 4) return -EINVAL;
++ if (n < 3 || a[2] > (n - 3) * 4)
++ return -EINVAL;
+ pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]);
+ if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3))
+- for (u32 i = 0; i < (a[2] + 3) / 4; i++) pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
++ for (u32 i = 0; i < (a[2] + 3) / 4; i++)
++ pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
+ break;
+ case 'd': // dma command: cmd len data...
+ // get report descriptor: d 7 8 0 0
+ // enable multitouch: d 3 2 0x0105
+- if (n < 2 || a[1] > (n - 2) * 4) return -EINVAL;
++ if (n < 2 || a[1] > (n - 2) * 4)
++ return -EINVAL;
+ pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]);
+- if (ithc_dma_tx(ithc, a[0], a[1], a + 2)) pci_err(ithc->pci, "dma tx failed\n");
++ if (ithc_dma_tx(ithc, a[0], a[1], a + 2))
++ pci_err(ithc->pci, "dma tx failed\n");
+ break;
+ default:
+ return -EINVAL;
+@@ -75,21 +103,27 @@ static const struct file_operations ithc_debugfops_cmd = {
+ .write = ithc_debugfs_cmd_write,
+ };
+
+-static void ithc_debugfs_devres_release(struct device *dev, void *res) {
++static void ithc_debugfs_devres_release(struct device *dev, void *res)
++{
+ struct dentry **dbgm = res;
+- if (*dbgm) debugfs_remove_recursive(*dbgm);
++ if (*dbgm)
++ debugfs_remove_recursive(*dbgm);
+ }
+
+-int ithc_debug_init(struct ithc *ithc) {
+- struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof *dbgm, GFP_KERNEL);
+- if (!dbgm) return -ENOMEM;
++int ithc_debug_init(struct ithc *ithc)
++{
++ struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof(*dbgm), GFP_KERNEL);
++ if (!dbgm)
++ return -ENOMEM;
+ devres_add(&ithc->pci->dev, dbgm);
+ struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL);
+- if (IS_ERR(dbg)) return PTR_ERR(dbg);
++ if (IS_ERR(dbg))
++ return PTR_ERR(dbg);
+ *dbgm = dbg;
+
+ struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd);
+- if (IS_ERR(cmd)) return PTR_ERR(cmd);
++ if (IS_ERR(cmd))
++ return PTR_ERR(cmd);
+
+ return 0;
+ }
+diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c
+index 7e89b3496918..ffb8689b8a78 100644
+--- a/drivers/hid/ithc/ithc-dma.c
++++ b/drivers/hid/ithc/ithc-dma.c
+@@ -1,59 +1,91 @@
++// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
++
+ #include "ithc.h"
+
+-static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, unsigned num_buffers, unsigned num_pages, enum dma_data_direction dir) {
++// The THC uses tables of PRDs (physical region descriptors) to describe the TX and RX data buffers.
++// Each PRD contains the DMA address and size of a block of DMA memory, and some status flags.
++// This allows each data buffer to consist of multiple non-contiguous blocks of memory.
++
++static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p,
++ unsigned int num_buffers, unsigned int num_pages, enum dma_data_direction dir)
++{
+ p->num_pages = num_pages;
+ p->dir = dir;
++ // We allocate enough space to have one PRD per data buffer page, however if the data
++ // buffer pages happen to be contiguous, we can describe the buffer using fewer PRDs, so
++ // some will remain unused (which is fine).
+ p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE);
+ p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL);
+- if (!p->addr) return -ENOMEM;
+- if (p->dma_addr & (PAGE_SIZE - 1)) return -EFAULT;
++ if (!p->addr)
++ return -ENOMEM;
++ if (p->dma_addr & (PAGE_SIZE - 1))
++ return -EFAULT;
+ return 0;
+ }
+
++// Devres managed sg_table wrapper.
+ struct ithc_sg_table {
+ void *addr;
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+ };
+-static void ithc_dma_sgtable_free(struct sg_table *sgt) {
++static void ithc_dma_sgtable_free(struct sg_table *sgt)
++{
+ struct scatterlist *sg;
+ int i;
+ for_each_sgtable_sg(sgt, sg, i) {
+ struct page *p = sg_page(sg);
+- if (p) __free_page(p);
++ if (p)
++ __free_page(p);
+ }
+ sg_free_table(sgt);
+ }
+-static void ithc_dma_data_devres_release(struct device *dev, void *res) {
++static void ithc_dma_data_devres_release(struct device *dev, void *res)
++{
+ struct ithc_sg_table *sgt = res;
+- if (sgt->addr) vunmap(sgt->addr);
++ if (sgt->addr)
++ vunmap(sgt->addr);
+ dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0);
+ ithc_dma_sgtable_free(&sgt->sgt);
+ }
+
+-static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b) {
+- // We don't use dma_alloc_coherent for data buffers, because they don't have to be contiguous (we can use one PRD per page) or coherent (they are unidirectional).
+- // Instead we use an sg_table of individually allocated pages (5.13 has dma_alloc_noncontiguous for this, but we'd like to support 5.10 for now).
++static int ithc_dma_data_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
++ struct ithc_dma_data_buffer *b)
++{
++ // We don't use dma_alloc_coherent() for data buffers, because they don't have to be
++ // coherent (they are unidirectional) or contiguous (we can use one PRD per page).
++ // We could use dma_alloc_noncontiguous(), however this still always allocates a single
++ // DMA mapped segment, which is more restrictive than what we need.
++ // Instead we use an sg_table of individually allocated pages.
+ struct page *pages[16];
+- if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) return -EINVAL;
++ if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages))
++ return -EINVAL;
+ b->active_idx = -1;
+- struct ithc_sg_table *sgt = devres_alloc(ithc_dma_data_devres_release, sizeof *sgt, GFP_KERNEL);
+- if (!sgt) return -ENOMEM;
++ struct ithc_sg_table *sgt = devres_alloc(
++ ithc_dma_data_devres_release, sizeof(*sgt), GFP_KERNEL);
++ if (!sgt)
++ return -ENOMEM;
+ sgt->dir = prds->dir;
++
+ if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) {
+ struct scatterlist *sg;
+ int i;
+ bool ok = true;
+ for_each_sgtable_sg(&sgt->sgt, sg, i) {
+- struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); // don't need __GFP_DMA for PCI DMA
+- if (!p) { ok = false; break; }
++ // NOTE: don't need __GFP_DMA for PCI DMA
++ struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
++ if (!p) {
++ ok = false;
++ break;
++ }
+ sg_set_page(sg, p, PAGE_SIZE, 0);
+ }
+ if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) {
+ devres_add(&ithc->pci->dev, sgt);
+ b->sgt = &sgt->sgt;
+ b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL);
+- if (!b->addr) return -ENOMEM;
++ if (!b->addr)
++ return -ENOMEM;
+ return 0;
+ }
+ ithc_dma_sgtable_free(&sgt->sgt);
+@@ -62,17 +94,29 @@ static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *pr
+ return -ENOMEM;
+ }
+
+-static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
++static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
++ struct ithc_dma_data_buffer *b, unsigned int idx)
++{
++ // Give a buffer to the THC.
+ struct ithc_phys_region_desc *prd = prds->addr;
+ prd += idx * prds->num_pages;
+- if (b->active_idx >= 0) { pci_err(ithc->pci, "buffer already active\n"); return -EINVAL; }
++ if (b->active_idx >= 0) {
++ pci_err(ithc->pci, "buffer already active\n");
++ return -EINVAL;
++ }
+ b->active_idx = idx;
+ if (prds->dir == DMA_TO_DEVICE) {
+- if (b->data_size > PAGE_SIZE) return -EINVAL;
++ // TX buffer: Caller should have already filled the data buffer, so just fill
++ // the PRD and flush.
++ // (TODO: Support multi-page TX buffers. So far no device seems to use or need
++ // these though.)
++ if (b->data_size > PAGE_SIZE)
++ return -EINVAL;
+ prd->addr = sg_dma_address(b->sgt->sgl) >> 10;
+ prd->size = b->data_size | PRD_FLAG_END;
+ flush_kernel_vmap_range(b->addr, b->data_size);
+ } else if (prds->dir == DMA_FROM_DEVICE) {
++ // RX buffer: Reset PRDs.
+ struct scatterlist *sg;
+ int i;
+ for_each_sgtable_dma_sg(b->sgt, sg, i) {
+@@ -87,21 +131,34 @@ static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffe
+ return 0;
+ }
+
+-static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
++static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
++ struct ithc_dma_data_buffer *b, unsigned int idx)
++{
++ // Take a buffer from the THC.
+ struct ithc_phys_region_desc *prd = prds->addr;
+ prd += idx * prds->num_pages;
+- if (b->active_idx != idx) { pci_err(ithc->pci, "wrong buffer index\n"); return -EINVAL; }
++ // This is purely a sanity check. We don't strictly need the idx parameter for this
++ // function, because it should always be the same as active_idx, unless we have a bug.
++ if (b->active_idx != idx) {
++ pci_err(ithc->pci, "wrong buffer index\n");
++ return -EINVAL;
++ }
+ b->active_idx = -1;
+ if (prds->dir == DMA_FROM_DEVICE) {
++ // RX buffer: Calculate actual received data size from PRDs.
+ dma_rmb(); // for the prds
+ b->data_size = 0;
+ struct scatterlist *sg;
+ int i;
+ for_each_sgtable_dma_sg(b->sgt, sg, i) {
+- unsigned size = prd->size;
++ unsigned int size = prd->size;
+ b->data_size += size & PRD_SIZE_MASK;
+- if (size & PRD_FLAG_END) break;
+- if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { pci_err(ithc->pci, "truncated prd\n"); break; }
++ if (size & PRD_FLAG_END)
++ break;
++ if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) {
++ pci_err(ithc->pci, "truncated prd\n");
++ break;
++ }
+ prd++;
+ }
+ invalidate_kernel_vmap_range(b->addr, b->data_size);
+@@ -110,93 +167,139 @@ static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffe
+ return 0;
+ }
+
+-int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname) {
++int ithc_dma_rx_init(struct ithc *ithc, u8 channel)
++{
+ struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
+ mutex_init(&rx->mutex);
++
++ // Allocate buffers.
+ u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes);
+- unsigned num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
+- pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", NUM_RX_BUF, buf_size, num_pages);
++ unsigned int num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
++ pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n",
++ NUM_RX_BUF, buf_size, num_pages);
+ CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE);
+- for (unsigned i = 0; i < NUM_RX_BUF; i++)
++ for (unsigned int i = 0; i < NUM_RX_BUF; i++)
+ CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]);
++
++ // Init registers.
+ writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2);
+ lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr);
+ writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs);
+ writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds);
+ u8 head = readb(&ithc->regs->dma_rx[channel].head);
+- if (head) { pci_err(ithc->pci, "head is nonzero (%u)\n", head); return -EIO; }
+- for (unsigned i = 0; i < NUM_RX_BUF; i++)
++ if (head) {
++ pci_err(ithc->pci, "head is nonzero (%u)\n", head);
++ return -EIO;
++ }
++
++ // Init buffers.
++ for (unsigned int i = 0; i < NUM_RX_BUF; i++)
+ CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i);
++
+ writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail);
+ return 0;
+ }
+-void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) {
+- bitsb_set(&ithc->regs->dma_rx[channel].control, DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
+- CHECK(waitl, ithc, &ithc->regs->dma_rx[1].status, DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
++
++void ithc_dma_rx_enable(struct ithc *ithc, u8 channel)
++{
++ bitsb_set(&ithc->regs->dma_rx[channel].control,
++ DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
++ CHECK(waitl, ithc, &ithc->regs->dma_rx[channel].status,
++ DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
+ }
+
+-int ithc_dma_tx_init(struct ithc *ithc) {
++int ithc_dma_tx_init(struct ithc *ithc)
++{
+ struct ithc_dma_tx *tx = &ithc->dma_tx;
+ mutex_init(&tx->mutex);
++
++ // Allocate buffers.
+ tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes);
+- unsigned num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
+- pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", tx->max_size, num_pages);
++ unsigned int num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
++ pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n",
++ tx->max_size, num_pages);
+ CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE);
+ CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf);
++
++ // Init registers.
+ lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr);
+ writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds);
++
++ // Init buffers.
+ CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
+ return 0;
+ }
+
+-static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data, u8 channel, u8 buf) {
++static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data,
++ u8 channel, u8 buf)
++{
+ if (buf >= NUM_RX_BUF) {
+ pci_err(ithc->pci, "invalid dma ringbuffer index\n");
+ return -EINVAL;
+ }
+- ithc_set_active(ithc);
+ u32 len = data->data_size;
+ struct ithc_dma_rx_header *hdr = data->addr;
+ u8 *hiddata = (void *)(hdr + 1);
+- if (len >= sizeof *hdr && hdr->code == DMA_RX_CODE_RESET) {
++ if (len >= sizeof(*hdr) && hdr->code == DMA_RX_CODE_RESET) {
++ // The THC sends a reset request when we need to reinitialize the device.
++ // This usually only happens if we send an invalid command or put the device
++ // in a bad state.
+ CHECK(ithc_reset, ithc);
+- } else if (len < sizeof *hdr || len != sizeof *hdr + hdr->data_size) {
++ } else if (len < sizeof(*hdr) || len != sizeof(*hdr) + hdr->data_size) {
+ if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
+- // When the CPU enters a low power state during DMA, we can get truncated messages.
+- // Typically this will be a single touch HID report that is only 1 byte, or a multitouch report that is 257 bytes.
++ // When the CPU enters a low power state during DMA, we can get truncated
++ // messages. For Surface devices, this will typically be a single touch
++ // report that is only 1 byte, or a multitouch report that is 257 bytes.
+ // See also ithc_set_active().
+ } else {
+- pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n", channel, buf, len, hdr->code, hdr->data_size);
+- print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
++ pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n",
++ channel, buf, len, hdr->code, hdr->data_size);
++ print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
++ hdr, min(len, 0x400u), 0);
+ }
+ } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) {
++ // Response to a 'get report descriptor' request.
++ // The actual descriptor is preceded by 8 nul bytes.
+ CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8);
+ WRITE_ONCE(ithc->hid_parse_done, true);
+ wake_up(&ithc->wait_hid_parse);
+ } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
++ // Standard HID input report containing touch data.
+ CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1);
+ } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) {
++ // Response to a 'get feature' request.
+ bool done = false;
+ mutex_lock(&ithc->hid_get_feature_mutex);
+ if (ithc->hid_get_feature_buf) {
+- if (hdr->data_size < ithc->hid_get_feature_size) ithc->hid_get_feature_size = hdr->data_size;
++ if (hdr->data_size < ithc->hid_get_feature_size)
++ ithc->hid_get_feature_size = hdr->data_size;
+ memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size);
+ ithc->hid_get_feature_buf = NULL;
+ done = true;
+ }
+ mutex_unlock(&ithc->hid_get_feature_mutex);
+- if (done) wake_up(&ithc->wait_hid_get_feature);
+- else CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT, hiddata, hdr->data_size, 1);
++ if (done) {
++ wake_up(&ithc->wait_hid_get_feature);
++ } else {
++ // Received data without a matching request, or the request already
++ // timed out. (XXX What's the correct thing to do here?)
++ CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT,
++ hiddata, hdr->data_size, 1);
++ }
+ } else {
+- pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n", channel, buf, len, hdr->code);
+- print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
++ pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n",
++ channel, buf, len, hdr->code);
++ print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
++ hdr, min(len, 0x400u), 0);
+ }
+ return 0;
+ }
+
+-static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
++static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel)
++{
++ // Process all filled RX buffers from the ringbuffer.
+ struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
+- unsigned n = rx->num_received;
++ unsigned int n = rx->num_received;
+ u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head);
+ while (1) {
+ u8 tail = n % NUM_RX_BUF;
+@@ -204,7 +307,8 @@ static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
+ writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail);
+ // ringbuffer is full if tail_wrap == head_wrap
+ // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG
+- if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) return 0;
++ if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG))
++ return 0;
+
+ // take the buffer that the device just filled
+ struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF];
+@@ -218,7 +322,8 @@ static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
+ CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail);
+ }
+ }
+-int ithc_dma_rx(struct ithc *ithc, u8 channel) {
++int ithc_dma_rx(struct ithc *ithc, u8 channel)
++{
+ struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
+ mutex_lock(&rx->mutex);
+ int ret = ithc_dma_rx_unlocked(ithc, channel);
+@@ -226,14 +331,21 @@ int ithc_dma_rx(struct ithc *ithc, u8 channel) {
+ return ret;
+ }
+
+-static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
++static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
++{
++ ithc_set_active(ithc, 100 * USEC_PER_MSEC);
++
++ // Send a single TX buffer to the THC.
+ pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize);
+ struct ithc_dma_tx_header *hdr;
++ // Data must be padded to next 4-byte boundary.
+ u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0;
+- unsigned fullsize = sizeof *hdr + datasize + padding;
+- if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE) return -EINVAL;
++ unsigned int fullsize = sizeof(*hdr) + datasize + padding;
++ if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE)
++ return -EINVAL;
+ CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
+
++ // Fill the TX buffer with header and data.
+ ithc->dma_tx.buf.data_size = fullsize;
+ hdr = ithc->dma_tx.buf.addr;
+ hdr->code = cmdcode;
+@@ -241,15 +353,18 @@ static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, vo
+ u8 *dest = (void *)(hdr + 1);
+ memcpy(dest, data, datasize);
+ dest += datasize;
+- for (u8 p = 0; p < padding; p++) *dest++ = 0;
++ for (u8 p = 0; p < padding; p++)
++ *dest++ = 0;
+ CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
+
++ // Let the THC process the buffer.
+ bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND);
+ CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
+ writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status);
+ return 0;
+ }
+-int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
++int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
++{
+ mutex_lock(&ithc->dma_tx.mutex);
+ int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data);
+ mutex_unlock(&ithc->dma_tx.mutex);
+diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h
+index d9f2c19a13f3..93652e4476bf 100644
+--- a/drivers/hid/ithc/ithc-dma.h
++++ b/drivers/hid/ithc/ithc-dma.h
+@@ -1,3 +1,5 @@
++/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
++
+ #define PRD_SIZE_MASK 0xffffff
+ #define PRD_FLAG_END 0x1000000
+ #define PRD_FLAG_SUCCESS 0x2000000
+@@ -59,7 +61,7 @@ struct ithc_dma_rx {
+ struct ithc_dma_data_buffer bufs[NUM_RX_BUF];
+ };
+
+-int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname);
++int ithc_dma_rx_init(struct ithc *ithc, u8 channel);
+ void ithc_dma_rx_enable(struct ithc *ithc, u8 channel);
+ int ithc_dma_tx_init(struct ithc *ithc);
+ int ithc_dma_rx(struct ithc *ithc, u8 channel);
+diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c
+index 09512b9cb4d3..87ed4aa70fda 100644
+--- a/drivers/hid/ithc/ithc-main.c
++++ b/drivers/hid/ithc/ithc-main.c
+@@ -1,3 +1,5 @@
++// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
++
+ #include "ithc.h"
+
+ MODULE_DESCRIPTION("Intel Touch Host Controller driver");
+@@ -42,6 +44,9 @@ static const struct pci_device_id ithc_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) },
++ // XXX So far the THC seems to be the only Intel PCI device with PCI_CLASS_INPUT_PEN,
++ // so instead of the device list we could just do:
++ // { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = PCI_CLASS_INPUT_PEN, .class_mask = ~0, },
+ {}
+ };
+ MODULE_DEVICE_TABLE(pci, ithc_pci_tbl);
+@@ -52,6 +57,7 @@ static bool ithc_use_polling = false;
+ module_param_named(poll, ithc_use_polling, bool, 0);
+ MODULE_PARM_DESC(poll, "Use polling instead of interrupts");
+
++// Since all known devices seem to use only channel 1, by default we disable channel 0.
+ static bool ithc_use_rx0 = false;
+ module_param_named(rx0, ithc_use_rx0, bool, 0);
+ MODULE_PARM_DESC(rx0, "Use DMA RX channel 0");
+@@ -60,37 +66,56 @@ static bool ithc_use_rx1 = true;
+ module_param_named(rx1, ithc_use_rx1, bool, 0);
+ MODULE_PARM_DESC(rx1, "Use DMA RX channel 1");
+
++// Values below 250 seem to work well on the SP7+. If this is set too high, you may observe cursor stuttering.
++static int ithc_dma_latency_us = 200;
++module_param_named(dma_latency_us, ithc_dma_latency_us, int, 0);
++MODULE_PARM_DESC(dma_latency_us, "Determines the CPU latency QoS value for DMA transfers (in microseconds), -1 to disable latency QoS");
++
++// Values above 1700 seem to work well on the SP7+. If this is set too low, you may observe cursor stuttering.
++static unsigned int ithc_dma_early_us = 2000;
++module_param_named(dma_early_us, ithc_dma_early_us, uint, 0);
++MODULE_PARM_DESC(dma_early_us, "Determines how early the CPU latency QoS value is applied before the next expected IRQ (in microseconds)");
++
+ static bool ithc_log_regs_enabled = false;
+ module_param_named(logregs, ithc_log_regs_enabled, bool, 0);
+ MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)");
+
+ // Sysfs attributes
+
+-static bool ithc_is_config_valid(struct ithc *ithc) {
++static bool ithc_is_config_valid(struct ithc *ithc)
++{
+ return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC;
+ }
+
+-static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) {
++static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
+ struct ithc *ithc = dev_get_drvdata(dev);
+- if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
++ if (!ithc || !ithc_is_config_valid(ithc))
++ return -ENODEV;
+ return sprintf(buf, "0x%04x", ithc->config.vendor_id);
+ }
+ static DEVICE_ATTR_RO(vendor);
+-static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf) {
++static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
+ struct ithc *ithc = dev_get_drvdata(dev);
+- if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
++ if (!ithc || !ithc_is_config_valid(ithc))
++ return -ENODEV;
+ return sprintf(buf, "0x%04x", ithc->config.product_id);
+ }
+ static DEVICE_ATTR_RO(product);
+-static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) {
++static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
+ struct ithc *ithc = dev_get_drvdata(dev);
+- if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
++ if (!ithc || !ithc_is_config_valid(ithc))
++ return -ENODEV;
+ return sprintf(buf, "%u", ithc->config.revision);
+ }
+ static DEVICE_ATTR_RO(revision);
+-static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) {
++static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
+ struct ithc *ithc = dev_get_drvdata(dev);
+- if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
++ if (!ithc || !ithc_is_config_valid(ithc))
++ return -ENODEV;
+ u32 v = ithc->config.fw_version;
+ return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff);
+ }
+@@ -117,45 +142,75 @@ static void ithc_hid_stop(struct hid_device *hdev) { }
+ static int ithc_hid_open(struct hid_device *hdev) { return 0; }
+ static void ithc_hid_close(struct hid_device *hdev) { }
+
+-static int ithc_hid_parse(struct hid_device *hdev) {
++static int ithc_hid_parse(struct hid_device *hdev)
++{
+ struct ithc *ithc = hdev->driver_data;
+ u64 val = 0;
+ WRITE_ONCE(ithc->hid_parse_done, false);
+- CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof val, &val);
+- if (!wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done), msecs_to_jiffies(1000))) return -ETIMEDOUT;
+- return 0;
++ for (int retries = 0; ; retries++) {
++ CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof(val), &val);
++ if (wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done),
++ msecs_to_jiffies(200)))
++ return 0;
++ if (retries > 5) {
++ pci_err(ithc->pci, "failed to read report descriptor\n");
++ return -ETIMEDOUT;
++ }
++ pci_warn(ithc->pci, "failed to read report descriptor, retrying\n");
++ }
+ }
+
+-static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) {
++static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf,
++ size_t len, unsigned char rtype, int reqtype)
++{
+ struct ithc *ithc = hdev->driver_data;
+- if (!buf || !len) return -EINVAL;
++ if (!buf || !len)
++ return -EINVAL;
+ u32 code;
+- if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_OUTPUT_REPORT;
+- else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_SET_FEATURE;
+- else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) code = DMA_TX_CODE_GET_FEATURE;
+- else {
+- pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n", rtype, reqtype, reportnum);
++ if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) {
++ code = DMA_TX_CODE_OUTPUT_REPORT;
++ } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) {
++ code = DMA_TX_CODE_SET_FEATURE;
++ } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) {
++ code = DMA_TX_CODE_GET_FEATURE;
++ } else {
++ pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n",
++ rtype, reqtype, reportnum);
+ return -EINVAL;
+ }
+ buf[0] = reportnum;
++
+ if (reqtype == HID_REQ_GET_REPORT) {
++ // Prepare for response.
+ mutex_lock(&ithc->hid_get_feature_mutex);
+ ithc->hid_get_feature_buf = buf;
+ ithc->hid_get_feature_size = len;
+ mutex_unlock(&ithc->hid_get_feature_mutex);
++
++ // Transmit 'get feature' request.
+ int r = CHECK(ithc_dma_tx, ithc, code, 1, buf);
+ if (!r) {
+- r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature, !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
+- if (!r) r = -ETIMEDOUT;
+- else if (r < 0) r = -EINTR;
+- else r = 0;
++ r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature,
++ !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
++ if (!r)
++ r = -ETIMEDOUT;
++ else if (r < 0)
++ r = -EINTR;
++ else
++ r = 0;
+ }
++
++ // If everything went ok, the buffer has been filled with the response data.
++ // Return the response size.
+ mutex_lock(&ithc->hid_get_feature_mutex);
+ ithc->hid_get_feature_buf = NULL;
+- if (!r) r = ithc->hid_get_feature_size;
++ if (!r)
++ r = ithc->hid_get_feature_size;
+ mutex_unlock(&ithc->hid_get_feature_mutex);
+ return r;
+ }
++
++ // 'Set feature', or 'output report'. These don't have a response.
+ CHECK_RET(ithc_dma_tx, ithc, code, len, buf);
+ return 0;
+ }
+@@ -169,17 +224,22 @@ static struct hid_ll_driver ithc_ll_driver = {
+ .raw_request = ithc_hid_raw_request,
+ };
+
+-static void ithc_hid_devres_release(struct device *dev, void *res) {
++static void ithc_hid_devres_release(struct device *dev, void *res)
++{
+ struct hid_device **hidm = res;
+- if (*hidm) hid_destroy_device(*hidm);
++ if (*hidm)
++ hid_destroy_device(*hidm);
+ }
+
+-static int ithc_hid_init(struct ithc *ithc) {
+- struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof *hidm, GFP_KERNEL);
+- if (!hidm) return -ENOMEM;
++static int ithc_hid_init(struct ithc *ithc)
++{
++ struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof(*hidm), GFP_KERNEL);
++ if (!hidm)
++ return -ENOMEM;
+ devres_add(&ithc->pci->dev, hidm);
+ struct hid_device *hid = hid_allocate_device();
+- if (IS_ERR(hid)) return PTR_ERR(hid);
++ if (IS_ERR(hid))
++ return PTR_ERR(hid);
+ *hidm = hid;
+
+ strscpy(hid->name, DEVFULLNAME, sizeof(hid->name));
+@@ -198,27 +258,45 @@ static int ithc_hid_init(struct ithc *ithc) {
+
+ // Interrupts/polling
+
+-static void ithc_activity_timer_callback(struct timer_list *t) {
+- struct ithc *ithc = container_of(t, struct ithc, activity_timer);
+- cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
++static enum hrtimer_restart ithc_activity_start_timer_callback(struct hrtimer *t)
++{
++ struct ithc *ithc = container_of(t, struct ithc, activity_start_timer);
++ ithc_set_active(ithc, ithc_dma_early_us * 2 + USEC_PER_MSEC);
++ return HRTIMER_NORESTART;
+ }
+
+-void ithc_set_active(struct ithc *ithc) {
+- // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
+- // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_UNKNOWN_12 will be set when this happens.
+- // The amount of truncated messages can become very high, resulting in user-visible effects (laggy/stuttering cursor).
+- // To avoid this, we use a CPU latency QoS request to prevent the CPU from entering low power states during touch interactions.
+- cpu_latency_qos_update_request(&ithc->activity_qos, 0);
+- mod_timer(&ithc->activity_timer, jiffies + msecs_to_jiffies(1000));
+-}
+-
+-static int ithc_set_device_enabled(struct ithc *ithc, bool enable) {
+- u32 x = ithc->config.touch_cfg = (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2
+- | (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
+- return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE, offsetof(struct ithc_device_config, touch_cfg), sizeof x, &x);
++static enum hrtimer_restart ithc_activity_end_timer_callback(struct hrtimer *t)
++{
++ struct ithc *ithc = container_of(t, struct ithc, activity_end_timer);
++ cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
++ return HRTIMER_NORESTART;
+ }
+
+-static void ithc_disable_interrupts(struct ithc *ithc) {
++void ithc_set_active(struct ithc *ithc, unsigned int duration_us)
++{
++ if (ithc_dma_latency_us < 0)
++ return;
++ // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
++ // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_RX_TIMEOUT will be
++ // set when this happens. The amount of truncated messages can become very high, resulting
++ // in user-visible effects (laggy/stuttering cursor). To avoid this, we use a CPU latency
++ // QoS request to prevent the CPU from entering low power states during touch interactions.
++ cpu_latency_qos_update_request(&ithc->activity_qos, ithc_dma_latency_us);
++ hrtimer_start_range_ns(&ithc->activity_end_timer,
++ ns_to_ktime(duration_us * NSEC_PER_USEC), duration_us * NSEC_PER_USEC, HRTIMER_MODE_REL);
++}
++
++static int ithc_set_device_enabled(struct ithc *ithc, bool enable)
++{
++ u32 x = ithc->config.touch_cfg =
++ (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2 |
++ (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
++ return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE,
++ offsetof(struct ithc_device_config, touch_cfg), sizeof(x), &x);
++}
++
++static void ithc_disable_interrupts(struct ithc *ithc)
++{
+ writel(0, &ithc->regs->error_control);
+ bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0);
+ bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
+@@ -226,43 +304,85 @@ static void ithc_disable_interrupts(struct ithc *ithc) {
+ bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0);
+ }
+
+-static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned channel) {
+- writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA, &ithc->regs->dma_rx[channel].status);
++static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned int channel)
++{
++ writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA,
++ &ithc->regs->dma_rx[channel].status);
+ }
+
+-static void ithc_clear_interrupts(struct ithc *ithc) {
++static void ithc_clear_interrupts(struct ithc *ithc)
++{
+ writel(0xffffffff, &ithc->regs->error_flags);
+ writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status);
+ writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
+ ithc_clear_dma_rx_interrupts(ithc, 0);
+ ithc_clear_dma_rx_interrupts(ithc, 1);
+- writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2, &ithc->regs->dma_tx.status);
++ writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2,
++ &ithc->regs->dma_tx.status);
+ }
+
+-static void ithc_process(struct ithc *ithc) {
++static void ithc_process(struct ithc *ithc)
++{
+ ithc_log_regs(ithc);
+
+- // read and clear error bits
++ bool rx0 = ithc_use_rx0 && (readl(&ithc->regs->dma_rx[0].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
++ bool rx1 = ithc_use_rx1 && (readl(&ithc->regs->dma_rx[1].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
++
++ // Track time between DMA rx transfers, so we can try to predict when we need to enable CPU latency QoS for the next transfer
++ ktime_t t = ktime_get();
++ ktime_t dt = ktime_sub(t, ithc->last_rx_time);
++ if (rx0 || rx1) {
++ ithc->last_rx_time = t;
++ if (dt > ms_to_ktime(100)) {
++ ithc->cur_rx_seq_count = 0;
++ ithc->cur_rx_seq_errors = 0;
++ }
++ ithc->cur_rx_seq_count++;
++ if (!ithc_use_polling && ithc_dma_latency_us >= 0) {
++ // Disable QoS, since the DMA transfer has completed (we re-enable it after a delay below)
++ cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
++ hrtimer_try_to_cancel(&ithc->activity_end_timer);
++ }
++ }
++
++ // Read and clear error bits
+ u32 err = readl(&ithc->regs->error_flags);
+ if (err) {
+- if (err & ~ERROR_FLAG_DMA_UNKNOWN_12) pci_err(ithc->pci, "error flags: 0x%08x\n", err);
+ writel(err, &ithc->regs->error_flags);
++ if (err & ~ERROR_FLAG_DMA_RX_TIMEOUT)
++ pci_err(ithc->pci, "error flags: 0x%08x\n", err);
++ if (err & ERROR_FLAG_DMA_RX_TIMEOUT) {
++ // Only log an error if we see a significant number of these errors.
++ ithc->cur_rx_seq_errors++;
++ if (ithc->cur_rx_seq_errors && ithc->cur_rx_seq_errors % 50 == 0 && ithc->cur_rx_seq_errors > ithc->cur_rx_seq_count / 10)
++ pci_err(ithc->pci, "High number of DMA RX timeouts/errors (%u/%u, dt=%lldus). Try adjusting dma_early_us and/or dma_latency_us.\n",
++ ithc->cur_rx_seq_errors, ithc->cur_rx_seq_count, ktime_to_us(dt));
++ }
+ }
+
+- // process DMA rx
++ // Process DMA rx
+ if (ithc_use_rx0) {
+ ithc_clear_dma_rx_interrupts(ithc, 0);
+- ithc_dma_rx(ithc, 0);
++ if (rx0)
++ ithc_dma_rx(ithc, 0);
+ }
+ if (ithc_use_rx1) {
+ ithc_clear_dma_rx_interrupts(ithc, 1);
+- ithc_dma_rx(ithc, 1);
++ if (rx1)
++ ithc_dma_rx(ithc, 1);
++ }
++
++ // Start timer to re-enable QoS for next rx, but only if we've seen an ERROR_FLAG_DMA_RX_TIMEOUT
++ if ((rx0 || rx1) && !ithc_use_polling && ithc_dma_latency_us >= 0 && ithc->cur_rx_seq_errors > 0) {
++ ktime_t expires = ktime_add(t, ktime_sub_us(dt, ithc_dma_early_us));
++ hrtimer_start_range_ns(&ithc->activity_start_timer, expires, 10 * NSEC_PER_USEC, HRTIMER_MODE_ABS);
+ }
+
+ ithc_log_regs(ithc);
+ }
+
+-static irqreturn_t ithc_interrupt_thread(int irq, void *arg) {
++static irqreturn_t ithc_interrupt_thread(int irq, void *arg)
++{
+ struct ithc *ithc = arg;
+ pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n",
+ readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags),
+@@ -274,14 +394,21 @@ static irqreturn_t ithc_interrupt_thread(int irq, void *arg) {
+ return IRQ_HANDLED;
+ }
+
+-static int ithc_poll_thread(void *arg) {
++static int ithc_poll_thread(void *arg)
++{
+ struct ithc *ithc = arg;
+- unsigned sleep = 100;
++ unsigned int sleep = 100;
+ while (!kthread_should_stop()) {
+ u32 n = ithc->dma_rx[1].num_received;
+ ithc_process(ithc);
+- if (n != ithc->dma_rx[1].num_received) sleep = 20;
+- else sleep = min(200u, sleep + (sleep >> 4) + 1);
++ // Decrease polling interval to 20ms if we received data, otherwise slowly
++ // increase it up to 200ms.
++ if (n != ithc->dma_rx[1].num_received) {
++ ithc_set_active(ithc, 100 * USEC_PER_MSEC);
++ sleep = 20;
++ } else {
++ sleep = min(200u, sleep + (sleep >> 4) + 1);
++ }
+ msleep_interruptible(sleep);
+ }
+ return 0;
+@@ -289,7 +416,8 @@ static int ithc_poll_thread(void *arg) {
+
+ // Device initialization and shutdown
+
+-static void ithc_disable(struct ithc *ithc) {
++static void ithc_disable(struct ithc *ithc)
++{
+ bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE);
+ CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED);
+ bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
+@@ -301,81 +429,112 @@ static void ithc_disable(struct ithc *ithc) {
+ ithc_clear_interrupts(ithc);
+ }
+
+-static int ithc_init_device(struct ithc *ithc) {
++static int ithc_init_device(struct ithc *ithc)
++{
+ ithc_log_regs(ithc);
+ bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0;
+ ithc_disable(ithc);
+ CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY);
++
++ // Since we don't yet know which SPI config the device wants, use default speed and mode
++ // initially for reading config data.
+ ithc_set_spi_config(ithc, 10, 0);
+- bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000); // seems to help with reading config
+
+- if (was_enabled) if (msleep_interruptible(100)) return -EINTR;
++ // Setting the following bit seems to make reading the config more reliable.
++ bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000);
++
++ // If the device was previously enabled, wait a bit to make sure it's fully shut down.
++ if (was_enabled)
++ if (msleep_interruptible(100))
++ return -EINTR;
++
++ // Take the touch device out of reset.
+ bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
+ CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0);
+ for (int retries = 0; ; retries++) {
+ ithc_log_regs(ithc);
+ bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET);
+- if (!waitl(ithc, &ithc->regs->state, 0xf, 2)) break;
++ if (!waitl(ithc, &ithc->regs->state, 0xf, 2))
++ break;
+ if (retries > 5) {
+- pci_err(ithc->pci, "too many retries, failed to reset device\n");
++ pci_err(ithc->pci, "failed to reset device, state = 0x%08x\n", readl(&ithc->regs->state));
+ return -ETIMEDOUT;
+ }
+- pci_err(ithc->pci, "invalid state, retrying reset\n");
++ pci_warn(ithc->pci, "invalid state, retrying reset\n");
+ bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
+- if (msleep_interruptible(1000)) return -EINTR;
++ if (msleep_interruptible(1000))
++ return -EINTR;
+ }
+ ithc_log_regs(ithc);
+
++ // Waiting for the following status bit makes reading config much more reliable,
++ // however the official driver does not seem to do this...
+ CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4);
+
+- // read config
++ // Read configuration data.
+ for (int retries = 0; ; retries++) {
+ ithc_log_regs(ithc);
+- memset(&ithc->config, 0, sizeof ithc->config);
+- CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof ithc->config, &ithc->config);
++ memset(&ithc->config, 0, sizeof(ithc->config));
++ CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof(ithc->config), &ithc->config);
+ u32 *p = (void *)&ithc->config;
+ pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+- if (ithc_is_config_valid(ithc)) break;
++ if (ithc_is_config_valid(ithc))
++ break;
+ if (retries > 10) {
+- pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n", ithc->config.device_id);
++ pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n",
++ ithc->config.device_id);
+ return -EIO;
+ }
+- pci_err(ithc->pci, "failed to read config, retrying\n");
+- if (msleep_interruptible(100)) return -EINTR;
++ pci_warn(ithc->pci, "failed to read config, retrying\n");
++ if (msleep_interruptible(100))
++ return -EINTR;
+ }
+ ithc_log_regs(ithc);
+
+- CHECK_RET(ithc_set_spi_config, ithc, DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config), DEVCFG_SPI_MODE(ithc->config.spi_config));
++ // Apply SPI config and enable touch device.
++ CHECK_RET(ithc_set_spi_config, ithc,
++ DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config),
++ DEVCFG_SPI_MODE(ithc->config.spi_config));
+ CHECK_RET(ithc_set_device_enabled, ithc, true);
+ ithc_log_regs(ithc);
+ return 0;
+ }
+
+-int ithc_reset(struct ithc *ithc) {
+- // FIXME This should probably do devres_release_group()+ithc_start(). But because this is called during DMA
+- // processing, that would have to be done asynchronously (schedule_work()?). And with extra locking?
++int ithc_reset(struct ithc *ithc)
++{
++ // FIXME This should probably do devres_release_group()+ithc_start().
++ // But because this is called during DMA processing, that would have to be done
++ // asynchronously (schedule_work()?). And with extra locking?
+ pci_err(ithc->pci, "reset\n");
+ CHECK(ithc_init_device, ithc);
+- if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
+- if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
++ if (ithc_use_rx0)
++ ithc_dma_rx_enable(ithc, 0);
++ if (ithc_use_rx1)
++ ithc_dma_rx_enable(ithc, 1);
+ ithc_log_regs(ithc);
+ pci_dbg(ithc->pci, "reset completed\n");
+ return 0;
+ }
+
+-static void ithc_stop(void *res) {
++static void ithc_stop(void *res)
++{
+ struct ithc *ithc = res;
+ pci_dbg(ithc->pci, "stopping\n");
+ ithc_log_regs(ithc);
+- if (ithc->poll_thread) CHECK(kthread_stop, ithc->poll_thread);
+- if (ithc->irq >= 0) disable_irq(ithc->irq);
++
++ if (ithc->poll_thread)
++ CHECK(kthread_stop, ithc->poll_thread);
++ if (ithc->irq >= 0)
++ disable_irq(ithc->irq);
+ CHECK(ithc_set_device_enabled, ithc, false);
+ ithc_disable(ithc);
+- del_timer_sync(&ithc->activity_timer);
++ hrtimer_cancel(&ithc->activity_start_timer);
++ hrtimer_cancel(&ithc->activity_end_timer);
+ cpu_latency_qos_remove_request(&ithc->activity_qos);
+- // clear dma config
+- for(unsigned i = 0; i < 2; i++) {
++
++ // Clear DMA config.
++ for (unsigned int i = 0; i < 2; i++) {
+ CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0);
+ lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr);
+ writeb(0, &ithc->regs->dma_rx[i].num_bufs);
+@@ -383,35 +542,43 @@ static void ithc_stop(void *res) {
+ }
+ lo_hi_writeq(0, &ithc->regs->dma_tx.addr);
+ writeb(0, &ithc->regs->dma_tx.num_prds);
++
+ ithc_log_regs(ithc);
+ pci_dbg(ithc->pci, "stopped\n");
+ }
+
+-static void ithc_clear_drvdata(void *res) {
++static void ithc_clear_drvdata(void *res)
++{
+ struct pci_dev *pci = res;
+ pci_set_drvdata(pci, NULL);
+ }
+
+-static int ithc_start(struct pci_dev *pci) {
++static int ithc_start(struct pci_dev *pci)
++{
+ pci_dbg(pci, "starting\n");
+ if (pci_get_drvdata(pci)) {
+ pci_err(pci, "device already initialized\n");
+ return -EINVAL;
+ }
+- if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL)) return -ENOMEM;
++ if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL))
++ return -ENOMEM;
+
+- struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof *ithc, GFP_KERNEL);
+- if (!ithc) return -ENOMEM;
++ // Allocate/init main driver struct.
++ struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof(*ithc), GFP_KERNEL);
++ if (!ithc)
++ return -ENOMEM;
+ ithc->irq = -1;
+ ithc->pci = pci;
+- snprintf(ithc->phys, sizeof ithc->phys, "pci-%s/" DEVNAME, pci_name(pci));
++ snprintf(ithc->phys, sizeof(ithc->phys), "pci-%s/" DEVNAME, pci_name(pci));
+ init_waitqueue_head(&ithc->wait_hid_parse);
+ init_waitqueue_head(&ithc->wait_hid_get_feature);
+ mutex_init(&ithc->hid_get_feature_mutex);
+ pci_set_drvdata(pci, ithc);
+ CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci);
+- if (ithc_log_regs_enabled) ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof *ithc->prev_regs, GFP_KERNEL);
++ if (ithc_log_regs_enabled)
++ ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof(*ithc->prev_regs), GFP_KERNEL);
+
++ // PCI initialization.
+ CHECK_RET(pcim_enable_device, pci);
+ pci_set_master(pci);
+ CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs");
+@@ -419,29 +586,39 @@ static int ithc_start(struct pci_dev *pci) {
+ CHECK_RET(pci_set_power_state, pci, PCI_D0);
+ ithc->regs = pcim_iomap_table(pci)[0];
+
++ // Allocate IRQ.
+ if (!ithc_use_polling) {
+ CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ ithc->irq = CHECK(pci_irq_vector, pci, 0);
+- if (ithc->irq < 0) return ithc->irq;
++ if (ithc->irq < 0)
++ return ithc->irq;
+ }
+
++ // Initialize THC and touch device.
+ CHECK_RET(ithc_init_device, ithc);
+ CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups);
+- if (ithc_use_rx0) CHECK_RET(ithc_dma_rx_init, ithc, 0, ithc_use_rx1 ? DEVNAME "0" : DEVNAME);
+- if (ithc_use_rx1) CHECK_RET(ithc_dma_rx_init, ithc, 1, ithc_use_rx0 ? DEVNAME "1" : DEVNAME);
++ if (ithc_use_rx0)
++ CHECK_RET(ithc_dma_rx_init, ithc, 0);
++ if (ithc_use_rx1)
++ CHECK_RET(ithc_dma_rx_init, ithc, 1);
+ CHECK_RET(ithc_dma_tx_init, ithc);
+
+- CHECK_RET(ithc_hid_init, ithc);
+-
+ cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
+- timer_setup(&ithc->activity_timer, ithc_activity_timer_callback, 0);
++ hrtimer_init(&ithc->activity_start_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ ithc->activity_start_timer.function = ithc_activity_start_timer_callback;
++ hrtimer_init(&ithc->activity_end_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ ithc->activity_end_timer.function = ithc_activity_end_timer_callback;
+
+- // add ithc_stop callback AFTER setting up DMA buffers, so that polling/irqs/DMA are disabled BEFORE the buffers are freed
++ // Add ithc_stop() callback AFTER setting up DMA buffers, so that polling/irqs/DMA are
++ // disabled BEFORE the buffers are freed.
+ CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc);
+
++ CHECK_RET(ithc_hid_init, ithc);
++
++ // Start polling/IRQ.
+ if (ithc_use_polling) {
+ pci_info(pci, "using polling instead of irq\n");
+- // use a thread instead of simple timer because we want to be able to sleep
++ // Use a thread instead of simple timer because we want to be able to sleep.
+ ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll");
+ if (IS_ERR(ithc->poll_thread)) {
+ int err = PTR_ERR(ithc->poll_thread);
+@@ -449,13 +626,17 @@ static int ithc_start(struct pci_dev *pci) {
+ return err;
+ }
+ } else {
+- CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL, ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc);
++ CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL,
++ ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc);
+ }
+
+- if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
+- if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
++ if (ithc_use_rx0)
++ ithc_dma_rx_enable(ithc, 0);
++ if (ithc_use_rx1)
++ ithc_dma_rx_enable(ithc, 1);
+
+- // hid_add_device can only be called after irq/polling is started and DMA is enabled, because it calls ithc_hid_parse which reads the report descriptor via DMA
++ // hid_add_device() can only be called after irq/polling is started and DMA is enabled,
++ // because it calls ithc_hid_parse() which reads the report descriptor via DMA.
+ CHECK_RET(hid_add_device, ithc->hid);
+
+ CHECK(ithc_debug_init, ithc);
+@@ -464,43 +645,54 @@ static int ithc_start(struct pci_dev *pci) {
+ return 0;
+ }
+
+-static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id) {
++static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id)
++{
+ pci_dbg(pci, "device probe\n");
+ return ithc_start(pci);
+ }
+
+-static void ithc_remove(struct pci_dev *pci) {
++static void ithc_remove(struct pci_dev *pci)
++{
+ pci_dbg(pci, "device remove\n");
+ // all cleanup is handled by devres
+ }
+
+-static int ithc_suspend(struct device *dev) {
++// For suspend/resume, we just deinitialize and reinitialize everything.
++// TODO It might be cleaner to keep the HID device around, however we would then have to signal
++// to userspace that the touch device has lost state and userspace needs to e.g. resend 'set
++// feature' requests. Hidraw does not seem to have a facility to do that.
++static int ithc_suspend(struct device *dev)
++{
+ struct pci_dev *pci = to_pci_dev(dev);
+ pci_dbg(pci, "pm suspend\n");
+ devres_release_group(dev, ithc_start);
+ return 0;
+ }
+
+-static int ithc_resume(struct device *dev) {
++static int ithc_resume(struct device *dev)
++{
+ struct pci_dev *pci = to_pci_dev(dev);
+ pci_dbg(pci, "pm resume\n");
+ return ithc_start(pci);
+ }
+
+-static int ithc_freeze(struct device *dev) {
++static int ithc_freeze(struct device *dev)
++{
+ struct pci_dev *pci = to_pci_dev(dev);
+ pci_dbg(pci, "pm freeze\n");
+ devres_release_group(dev, ithc_start);
+ return 0;
+ }
+
+-static int ithc_thaw(struct device *dev) {
++static int ithc_thaw(struct device *dev)
++{
+ struct pci_dev *pci = to_pci_dev(dev);
+ pci_dbg(pci, "pm thaw\n");
+ return ithc_start(pci);
+ }
+
+-static int ithc_restore(struct device *dev) {
++static int ithc_restore(struct device *dev)
++{
+ struct pci_dev *pci = to_pci_dev(dev);
+ pci_dbg(pci, "pm restore\n");
+ return ithc_start(pci);
+@@ -521,11 +713,13 @@ static struct pci_driver ithc_driver = {
+ //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway
+ };
+
+-static int __init ithc_init(void) {
++static int __init ithc_init(void)
++{
+ return pci_register_driver(&ithc_driver);
+ }
+
+-static void __exit ithc_exit(void) {
++static void __exit ithc_exit(void)
++{
+ pci_unregister_driver(&ithc_driver);
+ }
+
+diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c
+index 85d567b05761..e058721886e3 100644
+--- a/drivers/hid/ithc/ithc-regs.c
++++ b/drivers/hid/ithc/ithc-regs.c
+@@ -1,63 +1,95 @@
++// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
++
+ #include "ithc.h"
+
+ #define reg_num(r) (0x1fff & (u16)(__force u64)(r))
+
+-void bitsl(__iomem u32 *reg, u32 mask, u32 val) {
+- if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
++void bitsl(__iomem u32 *reg, u32 mask, u32 val)
++{
++ if (val & ~mask)
++ pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n",
++ reg_num(reg), val, mask);
+ writel((readl(reg) & ~mask) | (val & mask), reg);
+ }
+
+-void bitsb(__iomem u8 *reg, u8 mask, u8 val) {
+- if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
++void bitsb(__iomem u8 *reg, u8 mask, u8 val)
++{
++ if (val & ~mask)
++ pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n",
++ reg_num(reg), val, mask);
+ writeb((readb(reg) & ~mask) | (val & mask), reg);
+ }
+
+-int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val) {
+- pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
++int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val)
++{
++ pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
++ reg_num(reg), mask, val);
+ u32 x;
+ if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
+- pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
++ pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
++ reg_num(reg), mask, val);
+ return -ETIMEDOUT;
+ }
+ pci_dbg(ithc->pci, "done waiting\n");
+ return 0;
+ }
+
+-int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val) {
+- pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
++int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val)
++{
++ pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
++ reg_num(reg), mask, val);
+ u8 x;
+ if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
+- pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
++ pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
++ reg_num(reg), mask, val);
+ return -ETIMEDOUT;
+ }
+ pci_dbg(ithc->pci, "done waiting\n");
+ return 0;
+ }
+
+-int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode) {
++int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode)
++{
+ pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode);
+- if (mode == 3) mode = 2;
++ if (mode == 3)
++ mode = 2;
+ bitsl(&ithc->regs->spi_config,
+ SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff),
+ SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed));
+ return 0;
+ }
+
+-int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data) {
++int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data)
++{
+ pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset);
+- if (size > sizeof ithc->regs->spi_cmd.data) return -EINVAL;
++ if (size > sizeof(ithc->regs->spi_cmd.data))
++ return -EINVAL;
++
++ // Wait if the device is still busy.
+ CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
++ // Clear result flags.
+ writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
++
++ // Init SPI command data.
+ writeb(command, &ithc->regs->spi_cmd.code);
+ writew(size, &ithc->regs->spi_cmd.size);
+ writel(offset, &ithc->regs->spi_cmd.offset);
+ u32 *p = data, n = (size + 3) / 4;
+- for (u32 i = 0; i < n; i++) writel(p[i], &ithc->regs->spi_cmd.data[i]);
++ for (u32 i = 0; i < n; i++)
++ writel(p[i], &ithc->regs->spi_cmd.data[i]);
++
++ // Start transmission.
+ bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND);
+ CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
+- if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE) return -EIO;
+- if (readw(&ithc->regs->spi_cmd.size) != size) return -EMSGSIZE;
+- for (u32 i = 0; i < n; i++) p[i] = readl(&ithc->regs->spi_cmd.data[i]);
++
++ // Read response.
++ if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE)
++ return -EIO;
++ if (readw(&ithc->regs->spi_cmd.size) != size)
++ return -EMSGSIZE;
++ for (u32 i = 0; i < n; i++)
++ p[i] = readl(&ithc->regs->spi_cmd.data[i]);
++
+ writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
+ return 0;
+ }
+diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h
+index 1a96092ed7ee..d4007d9e2bac 100644
+--- a/drivers/hid/ithc/ithc-regs.h
++++ b/drivers/hid/ithc/ithc-regs.h
+@@ -1,3 +1,5 @@
++/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
++
+ #define CONTROL_QUIESCE BIT(1)
+ #define CONTROL_IS_QUIESCED BIT(2)
+ #define CONTROL_NRESET BIT(3)
+@@ -24,7 +26,7 @@
+
+ #define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9)
+ #define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10)
+-#define ERROR_FLAG_DMA_UNKNOWN_12 BIT(12) // set when we receive a truncated DMA message
++#define ERROR_FLAG_DMA_RX_TIMEOUT BIT(12) // set when we receive a truncated DMA message
+ #define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13)
+ #define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16)
+ #define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17)
+@@ -67,6 +69,7 @@
+ #define DMA_RX_STATUS_HAVE_DATA BIT(5)
+ #define DMA_RX_STATUS_ENABLED BIT(8)
+
++// COUNTER_RESET can be written to counter registers to reset them to zero. However, in some cases this can mess up the THC.
+ #define COUNTER_RESET BIT(31)
+
+ struct ithc_registers {
+@@ -147,15 +150,15 @@ static_assert(sizeof(struct ithc_registers) == 0x1300);
+ #define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode?
+ #define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3)
+ #define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f)
+-#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20)
+-#define DEVCFG_SPI_HEARTBEAT_INTERVAL (((x) >> 21) & 7)
++#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) // TODO implement heartbeat
++#define DEVCFG_SPI_HEARTBEAT_INTERVAL(x) (((x) >> 21) & 7)
+ #define DEVCFG_SPI_UNKNOWN_25 BIT(25)
+ #define DEVCFG_SPI_UNKNOWN_26 BIT(26)
+ #define DEVCFG_SPI_UNKNOWN_27 BIT(27)
+-#define DEVCFG_SPI_DELAY (((x) >> 28) & 7)
+-#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31)
++#define DEVCFG_SPI_DELAY(x) (((x) >> 28) & 7) // TODO use this
++#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) // TODO use this?
+
+-struct ithc_device_config {
++struct ithc_device_config { // (Example values are from an SP7+.)
+ u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET)
+ u32 _unknown_04; // 04 = 0x00000000
+ u32 dma_buf_sizes; // 08 = 0x000a00ff
+@@ -166,9 +169,9 @@ struct ithc_device_config {
+ u16 vendor_id; // 1c = 0x045e = Microsoft Corp.
+ u16 product_id; // 1e = 0x0c1a
+ u32 revision; // 20 = 0x00000001
+- u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139
++ u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 (this value looks more random on newer devices)
+ u32 _unknown_28; // 28 = 0x00000000
+- u32 fw_mode; // 2c = 0x00000000
++ u32 fw_mode; // 2c = 0x00000000 (for fw update?)
+ u32 _unknown_30; // 30 = 0x00000000
+ u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?)
+ u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET)
+diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h
+index 6a9b0d480bc1..028e55a4ec53 100644
+--- a/drivers/hid/ithc/ithc.h
++++ b/drivers/hid/ithc/ithc.h
+@@ -1,3 +1,5 @@
++/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
++
+ #include <linux/module.h>
+ #include <linux/input.h>
+ #include <linux/hid.h>
+@@ -21,7 +23,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; })
+-#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while(0)
++#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while (0)
+
+ #define NUM_RX_BUF 16
+
+@@ -35,8 +37,13 @@ struct ithc {
+ struct pci_dev *pci;
+ int irq;
+ struct task_struct *poll_thread;
++
+ struct pm_qos_request activity_qos;
+- struct timer_list activity_timer;
++ struct hrtimer activity_start_timer;
++ struct hrtimer activity_end_timer;
++ ktime_t last_rx_time;
++ unsigned int cur_rx_seq_count;
++ unsigned int cur_rx_seq_errors;
+
+ struct hid_device *hid;
+ bool hid_parse_done;
+@@ -54,7 +61,7 @@ struct ithc {
+ };
+
+ int ithc_reset(struct ithc *ithc);
+-void ithc_set_active(struct ithc *ithc);
++void ithc_set_active(struct ithc *ithc, unsigned int duration_us);
+ int ithc_debug_init(struct ithc *ithc);
+ void ithc_log_regs(struct ithc *ithc);
+
+--
+2.42.0
+
+From 1528e0fe127f03118f593fc1db51a4d4b5a20341 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sun, 22 Oct 2023 14:57:11 +0200
+Subject: [PATCH] platform/surface: aggregator_registry: Add support for
+ Surface Laptop Go 3
+
+Add SAM client device nodes for the Surface Laptop Go 3. It seems to use
+the same SAM client devices as the Surface Laptop Go 1 and 2, so re-use
+their node group.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
Patchset: surface-sam
---
- drivers/tty/serial/serial_core.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ drivers/platform/surface/surface_aggregator_registry.c | 3 +++
+ 1 file changed, 3 insertions(+)
-diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
-index 831d033611e6..e429ac42a12e 100644
---- a/drivers/tty/serial/serial_core.c
-+++ b/drivers/tty/serial/serial_core.c
-@@ -157,7 +157,7 @@ static void __uart_start(struct tty_struct *tty)
- * enabled, serial_port_runtime_resume() calls start_tx() again
- * after enabling the device.
- */
-- if (pm_runtime_active(&port_dev->dev))
-+ if (!pm_runtime_enabled(port->dev) || pm_runtime_active(port->dev))
- port->ops->start_tx(port);
- pm_runtime_mark_last_busy(&port_dev->dev);
- pm_runtime_put_autosuspend(&port_dev->dev);
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+index 0fe5be539652..0d8c8395c588 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -367,6 +367,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
+ /* Surface Laptop Go 2 */
+ { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
+
++ /* Surface Laptop Go 3 */
++ { "MSHW0440", (unsigned long)ssam_node_group_slg1 },
++
+ /* Surface Laptop Studio */
+ { "MSHW0123", (unsigned long)ssam_node_group_sls },
+
--
2.42.0
-From ef087e2d75b52e727db2e413fdbdc596e2babadb Mon Sep 17 00:00:00 2001
+From 2ba1a8bceb51c41accd3416aa85d86e58ae86d5d Mon Sep 17 00:00:00 2001
From: Maximilian Luz <luzmaximilian@gmail.com>
Date: Sat, 25 Jul 2020 17:19:53 +0200
Subject: [PATCH] i2c: acpi: Implement RawBytes read access
@@ -5449,7 +7019,7 @@ index d6037a328669..a290ebc77aea 100644
--
2.42.0
-From ec8fe494052629fc2a9f5234c674f42e42aee254 Mon Sep 17 00:00:00 2001
+From 646e55b10dd11dcabcb886a39c8d3b63ec16d867 Mon Sep 17 00:00:00 2001
From: Maximilian Luz <luzmaximilian@gmail.com>
Date: Sat, 13 Feb 2021 16:41:18 +0100
Subject: [PATCH] platform/surface: Add driver for Surface Book 1 dGPU switch
@@ -5672,7 +7242,7 @@ index 000000000000..8b816ed8f35c
--
2.42.0
-From b595029ce12d05babb051893a3b5ad8a0555a553 Mon Sep 17 00:00:00 2001
+From f7519ec63734ab71e46bf03b6a38573788dfa29d Mon Sep 17 00:00:00 2001
From: Sachi King <nakato@nakato.io>
Date: Tue, 5 Oct 2021 00:05:09 +1100
Subject: [PATCH] Input: soc_button_array - support AMD variant Surface devices
@@ -5749,7 +7319,7 @@ index e79f5497948b..2bddbe6e9ea4 100644
--
2.42.0
-From 701b3618008b9aad7c8f480e781a3fd412b169f0 Mon Sep 17 00:00:00 2001
+From 500ba0980d1e89041a9c83a3e937df0a2119f4af Mon Sep 17 00:00:00 2001
From: Sachi King <nakato@nakato.io>
Date: Tue, 5 Oct 2021 00:22:57 +1100
Subject: [PATCH] platform/surface: surfacepro3_button: don't load on amd
@@ -5821,7 +7391,7 @@ index 2755601f979c..4240c98ca226 100644
--
2.42.0
-From d38931d4fe25e2f1a02fcde88b0cbf0756304631 Mon Sep 17 00:00:00 2001
+From b478d859ace437e33ae34ff41d639566d877fff5 Mon Sep 17 00:00:00 2001
From: Maximilian Luz <luzmaximilian@gmail.com>
Date: Sat, 18 Feb 2023 01:02:49 +0100
Subject: [PATCH] USB: quirks: Add USB_QUIRK_DELAY_INIT for Surface Go 3
@@ -5862,7 +7432,7 @@ index 15e9bd180a1d..0d70461d01e1 100644
--
2.42.0
-From 75a8d59dc8c049e0fe3eaeb509552a37ba3dc6f0 Mon Sep 17 00:00:00 2001
+From edb9e1949c6164b318183b3796eab9bbd5e3297e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jonas=20Dre=C3=9Fler?= <verdre@v0yd.nl>
Date: Thu, 5 Nov 2020 13:09:45 +0100
Subject: [PATCH] hid/multitouch: Turn off Type Cover keyboard backlight when
@@ -6095,7 +7665,7 @@ index 521b2ffb4244..c8f3d05c8866 100644
--
2.42.0
-From 6387e47aa58153a28176ffb65da4f3c40842b495 Mon Sep 17 00:00:00 2001
+From 687dbad7b7d5da3923289f28fb576231ffe79f99 Mon Sep 17 00:00:00 2001
From: PJungkamp <p.jungkamp@gmail.com>
Date: Fri, 25 Feb 2022 12:04:25 +0100
Subject: [PATCH] hid/multitouch: Add support for surface pro type cover tablet
@@ -6395,7 +7965,7 @@ index c8f3d05c8866..1c6e4d66e762 100644
--
2.42.0
-From 42a0ecfd028c26a4ea5a5f8108c36fe3ffe4f6a2 Mon Sep 17 00:00:00 2001
+From 9349467c1adee5875fe9b6664f280a2d1ff77fb6 Mon Sep 17 00:00:00 2001
From: Maximilian Luz <luzmaximilian@gmail.com>
Date: Sun, 19 Feb 2023 22:12:24 +0100
Subject: [PATCH] PCI: Add quirk to prevent calling shutdown mehtod
@@ -6420,7 +7990,7 @@ Patchset: surface-shutdown
3 files changed, 40 insertions(+)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
-index ae9baf801681..fdfaec2312a0 100644
+index 41ee3dd8cecb..0bc473c2c187 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -507,6 +507,9 @@ static void pci_device_shutdown(struct device *dev)
@@ -6492,7 +8062,7 @@ index 7ee498cd1f37..740049a82343 100644
--
2.42.0
-From 0e7f1e2a3a8d9bfbacaa6dfd2f902de83390b4c1 Mon Sep 17 00:00:00 2001
+From cc9f1aa3808d704ee8cbdb971ff2395d8db57b6d Mon Sep 17 00:00:00 2001
From: Maximilian Luz <luzmaximilian@gmail.com>
Date: Sun, 12 Mar 2023 01:41:57 +0100
Subject: [PATCH] platform/surface: gpe: Add support for Surface Pro 9
@@ -6543,7 +8113,7 @@ index c219b840d491..69c4352e8406 100644
--
2.42.0
-From 91a302af94f03251baa2e66abe81b0beb967a4ae Mon Sep 17 00:00:00 2001
+From 6d04b8ab365775a6b1ab7b7c560b5d50e78bb783 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Sun, 10 Oct 2021 20:56:57 +0200
Subject: [PATCH] ACPI: delay enumeration of devices with a _DEP pointing to an
@@ -6619,7 +8189,7 @@ index 87e385542576..1183d09c13a6 100644
--
2.42.0
-From 2b033206c433221e3955fab9f7cc0ce07794a2ec Mon Sep 17 00:00:00 2001
+From df1aa869818d56d8153223c60d74fb307cf3ff81 Mon Sep 17 00:00:00 2001
From: zouxiaoh <xiaohong.zou@intel.com>
Date: Fri, 25 Jun 2021 08:52:59 +0800
Subject: [PATCH] iommu: intel-ipu: use IOMMU passthrough mode for Intel IPUs
@@ -6645,7 +8215,7 @@ Patchset: cameras
1 file changed, 30 insertions(+)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
-index fc4799415c3c..e10b4f62594d 100644
+index 6387f3a6eccf..d75107c47de0 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -37,6 +37,12 @@
@@ -6696,7 +8266,7 @@ index fc4799415c3c..e10b4f62594d 100644
if (!dmar_map_ipts)
iommu_identity_mapping |= IDENTMAP_IPTS;
-@@ -4781,6 +4795,18 @@ static void quirk_iommu_igfx(struct pci_dev *dev)
+@@ -4765,6 +4779,18 @@ static void quirk_iommu_igfx(struct pci_dev *dev)
dmar_map_gfx = 0;
}
@@ -6715,7 +8285,7 @@ index fc4799415c3c..e10b4f62594d 100644
static void quirk_iommu_ipts(struct pci_dev *dev)
{
if (!IS_IPTS(dev))
-@@ -4792,6 +4818,7 @@ static void quirk_iommu_ipts(struct pci_dev *dev)
+@@ -4776,6 +4802,7 @@ static void quirk_iommu_ipts(struct pci_dev *dev)
pci_info(dev, "Passthrough IOMMU for IPTS\n");
dmar_map_ipts = 0;
}
@@ -6723,7 +8293,7 @@ index fc4799415c3c..e10b4f62594d 100644
/* G4x/GM45 integrated gfx dmar support is totally busted. */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
-@@ -4827,6 +4854,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
+@@ -4811,6 +4838,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
@@ -6736,7 +8306,7 @@ index fc4799415c3c..e10b4f62594d 100644
--
2.42.0
-From eec09b357ed7b25912c9bf682e21fa0f1344e912 Mon Sep 17 00:00:00 2001
+From 58f403707b4b56371254100412f325a30ef1707d Mon Sep 17 00:00:00 2001
From: Daniel Scally <djrscally@gmail.com>
Date: Sun, 10 Oct 2021 20:57:02 +0200
Subject: [PATCH] platform/x86: int3472: Enable I2c daisy chain
@@ -6773,7 +8343,7 @@ index 1e107fd49f82..e3e1696e7f0e 100644
--
2.42.0
-From 3ee1b7ce0457aba33817a415213caf951dab2cbf Mon Sep 17 00:00:00 2001
+From 60a2d404d65225351612a7d3d3d5a1d1a382ca82 Mon Sep 17 00:00:00 2001
From: Daniel Scally <djrscally@gmail.com>
Date: Wed, 4 May 2022 23:21:45 +0100
Subject: [PATCH] media: ipu3-cio2: Move functionality from .complete() to
@@ -6882,7 +8452,7 @@ index ca51776a961f..c027b2bfd851 100644
--
2.42.0
-From 43767f29d49a977f39fe9bcbd9d347b5b2549f3d Mon Sep 17 00:00:00 2001
+From e9b59c24855f146141b665efed05b7455aeb5fd4 Mon Sep 17 00:00:00 2001
From: Daniel Scally <djrscally@gmail.com>
Date: Thu, 2 Jun 2022 22:15:56 +0100
Subject: [PATCH] media: ipu3-cio2: Re-add .complete() to ipu3-cio2
@@ -6925,7 +8495,7 @@ index c027b2bfd851..031acee26553 100644
--
2.42.0
-From 5aa4977340e179ed4e2b7e1236ee8f0ed5af3d4b Mon Sep 17 00:00:00 2001
+From 0926490302f353661c1a454f27611f5c94d928f0 Mon Sep 17 00:00:00 2001
From: Daniel Scally <djrscally@gmail.com>
Date: Thu, 28 Oct 2021 21:55:16 +0100
Subject: [PATCH] media: i2c: Add driver for DW9719 VCM
@@ -7429,7 +8999,7 @@ index 000000000000..180b04d2a6b3
--
2.42.0
-From c5d772b1854f0ac07aa20ba39e7e0d513c14cd2a Mon Sep 17 00:00:00 2001
+From 3a35f5894c7e979357c64bbf198bd35dc3489a0a Mon Sep 17 00:00:00 2001
From: Maximilian Luz <luzmaximilian@gmail.com>
Date: Fri, 15 Jul 2022 23:48:00 +0200
Subject: [PATCH] drivers/media/i2c: Fix DW9719 dependencies
@@ -7458,7 +9028,7 @@ index 6959ee1a89fb..1d5082fe9ce3 100644
--
2.42.0
-From a91910f04221481012703436c3e821a54332beee Mon Sep 17 00:00:00 2001
+From acbd8c1b38ffee270862e8efcf68ae3580b1b82d Mon Sep 17 00:00:00 2001
From: Daniel Scally <dan.scally@ideasonboard.com>
Date: Thu, 2 Mar 2023 12:59:39 +0000
Subject: [PATCH] platform/x86: int3472: Remap reset GPIO for INT347E
@@ -7514,7 +9084,7 @@ index e33c2d75975c..c0c90ae66b70 100644
--
2.42.0
-From e33de4cf8fa02ac63ceb16f3f860ddfae8ea8726 Mon Sep 17 00:00:00 2001
+From b2ae8fc0aecde894bf6e8f52e515de645f1b4739 Mon Sep 17 00:00:00 2001
From: Daniel Scally <dan.scally@ideasonboard.com>
Date: Tue, 21 Mar 2023 13:45:26 +0000
Subject: [PATCH] media: i2c: Clarify that gain is Analogue gain in OV7251
@@ -7553,7 +9123,7 @@ index 675fb37a6fea..43b30db08c9e 100644
--
2.42.0
-From 4c3f0ec8d169cd6114990df67b3bb43dc351f951 Mon Sep 17 00:00:00 2001
+From 590fe333492348f48411fd9c2fc14d4302223266 Mon Sep 17 00:00:00 2001
From: Daniel Scally <dan.scally@ideasonboard.com>
Date: Wed, 22 Mar 2023 11:01:42 +0000
Subject: [PATCH] media: v4l2-core: Acquire privacy led in
@@ -7604,7 +9174,7 @@ index 4fa9225aa3d9..ed4c75253cbc 100644
--
2.42.0
-From 6d72225fd0eefb1e15f645d404dfa31873ce5a8b Mon Sep 17 00:00:00 2001
+From cf42b6660ce6a705292e0cd0df8e7d39dcae8f04 Mon Sep 17 00:00:00 2001
From: Kate Hsuan <hpa@redhat.com>
Date: Tue, 21 Mar 2023 23:37:16 +0800
Subject: [PATCH] platform: x86: int3472: Add MFD cell for tps68470 LED
@@ -7645,7 +9215,7 @@ index e3e1696e7f0e..423dc555093f 100644
--
2.42.0
-From 591153826d40ce4d57f2d5c4fb133e36ac187c8e Mon Sep 17 00:00:00 2001
+From 5ef4b0088658da64a7f39c37e33aad3469b3fb39 Mon Sep 17 00:00:00 2001
From: Kate Hsuan <hpa@redhat.com>
Date: Tue, 21 Mar 2023 23:37:17 +0800
Subject: [PATCH] include: mfd: tps68470: Add masks for LEDA and LEDB
@@ -7686,7 +9256,7 @@ index 7807fa329db0..2d2abb25b944 100644
--
2.42.0
-From 8c8c933fccb6d380bc2dc81625ca4419c826656d Mon Sep 17 00:00:00 2001
+From 3296fb49f2be7439c31fb45250c403a484107463 Mon Sep 17 00:00:00 2001
From: Kate Hsuan <hpa@redhat.com>
Date: Tue, 21 Mar 2023 23:37:18 +0800
Subject: [PATCH] leds: tps68470: Add LED control for tps68470
@@ -7937,7 +9507,7 @@ index 000000000000..35aeb5db89c8
--
2.42.0
-From f30fc6335df8d95752ecfe086b8a0b1c43039073 Mon Sep 17 00:00:00 2001
+From bc06956985ee377fba5e8a4483d8777373c3d54c Mon Sep 17 00:00:00 2001
From: Sachi King <nakato@nakato.io>
Date: Sat, 29 May 2021 17:47:38 +1000
Subject: [PATCH] ACPI: Add quirk for Surface Laptop 4 AMD missing irq 7
@@ -8004,7 +9574,7 @@ index 53369c57751e..1ec1a9015178 100644
--
2.42.0
-From 550bd4c801d8651b3ee57a7e412b10f0e3dc8ebb Mon Sep 17 00:00:00 2001
+From 221495b0d4fc09a39ddff1ea38a82f621e26f722 Mon Sep 17 00:00:00 2001
From: Maximilian Luz <luzmaximilian@gmail.com>
Date: Thu, 3 Jun 2021 14:04:26 +0200
Subject: [PATCH] ACPI: Add AMD 13" Surface Laptop 4 model to irq 7 override
@@ -8046,7 +9616,7 @@ index 1ec1a9015178..a7d40015e46a 100644
--
2.42.0
-From ecca82459be635f742336b861f26018c16aeb18f Mon Sep 17 00:00:00 2001
+From 709097b5e8cea4cb5296d01c6ba30c2761c1a36c Mon Sep 17 00:00:00 2001
From: "Bart Groeneveld | GPX Solutions B.V" <bart@gpxbv.nl>
Date: Mon, 5 Dec 2022 16:08:46 +0100
Subject: [PATCH] acpi: allow usage of acpi_tad on HW-reduced platforms
diff --git a/SOURCES/patch-6.5-redhat.patch b/SOURCES/patch-6.5-redhat.patch
index 9a9cdb7..619d79d 100644
--- a/SOURCES/patch-6.5-redhat.patch
+++ b/SOURCES/patch-6.5-redhat.patch
@@ -42,7 +42,7 @@
41 files changed, 554 insertions(+), 189 deletions(-)
diff --git a/Makefile b/Makefile
-index f9d5970f3441..e9655d0c31ca 100644
+index a687c9a0646c..82992814939e 100644
--- a/Makefile
+++ b/Makefile
@@ -22,6 +22,18 @@ $(if $(filter __%, $(MAKECMDGOALS)), \
@@ -1241,10 +1241,10 @@ index d6535cbb4e05..bcaad1fc5d8d 100644
if (err)
goto err_out_driver;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 26a27ff50408..e1050b7dd63d 100644
+index f2c561ae4bfe..36805f1c5786 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
-@@ -5805,6 +5805,13 @@ static void hub_event(struct work_struct *work)
+@@ -5824,6 +5824,13 @@ static void hub_event(struct work_struct *work)
(u16) hub->change_bits[0],
(u16) hub->event_bits[0]);
diff --git a/SPECS/kernel.spec b/SPECS/kernel.spec
index d6cba3c..9a0610f 100644
--- a/SPECS/kernel.spec
+++ b/SPECS/kernel.spec
@@ -160,18 +160,18 @@ Summary: The Linux kernel
# the --with-release option overrides this setting.)
%define debugbuildsenabled 1
%define buildid .fsync
-%define specrpmversion 6.5.7
-%define specversion 6.5.7
+%define specrpmversion 6.5.8
+%define specversion 6.5.8
%define patchversion 6.5
%define pkgrelease 200
%define kversion 6
-%define tarfile_release 6.5.7
+%define tarfile_release 6.5.8
# This is needed to do merge window version magic
%define patchlevel 5
# This allows pkg_release to have configurable %%{?dist} tag
%define specrelease 201%{?buildid}%{?dist}
# This defines the kabi tarball version
-%define kabiversion 6.5.7
+%define kabiversion 6.5.8
# If this variable is set to 1, a bpf selftests build failure will cause a
# fatal kernel package build error
@@ -3779,8 +3779,11 @@ fi\
#
#
%changelog
-* Fri Oct 13 2023 Jan Drögehoff <sentrycraft123@gmail.com> - 6.5.7-201.fsync
-- kernel-fsync v6.5.7
+* Sat Oct 28 2023 Jan Drögehoff <sentrycraft123@gmail.com> - 6.5.8-200.1
+- kernel-fsync v6.5.8
+
+* Fri Oct 20 2023 Augusto Caringi <acaringi@redhat.com> [6.5.8-0]
+- Linux v6.5.8
* Tue Oct 10 2023 Augusto Caringi <acaringi@redhat.com> [6.5.7-0]
- common: aarch64: enable CONFIG_ARM64_ERRATUM_2966298 (Augusto Caringi)