1 From b9936aa8a3775c2027f655d91a206d0e6e1c7ec0 Mon Sep 17 00:00:00 2001
2 From: Daniel Golle <daniel@makrotopia.org>
3 Date: Tue, 11 Jul 2023 00:17:31 +0100
4 Subject: [PATCH 11/15] block: implement NVMEM provider
6 On embedded devices using an eMMC it is common that one or more partitions
7 on the eMMC are used to store MAC addresses and Wi-Fi calibration EEPROM
8 data. Allow referencing the partition in device tree for the kernel and
9 Wi-Fi drivers accessing it via the NVMEM layer.
11 Signed-off-by: Daniel Golle <daniel@makrotopia.org>
15 block/blk-nvmem.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++
16 3 files changed, 196 insertions(+)
17 create mode 100644 block/blk-nvmem.c
21 @@ -208,6 +208,15 @@ config BLK_INLINE_ENCRYPTION_FALLBACK
22 by falling back to the kernel crypto API when inline
23 encryption hardware is not present.
26 + bool "Block device NVMEM provider"
30 + Allow block devices (or partitions) to act as NVMEM prodivers,
31 + typically used with eMMC to store MAC addresses or Wi-Fi
32 + calibration data on embedded devices.
34 source "block/partitions/Kconfig"
39 @@ -34,6 +34,7 @@ obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned
40 obj-$(CONFIG_BLK_WBT) += blk-wbt.o
41 obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
42 obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
43 +obj-$(CONFIG_BLK_NVMEM) += blk-nvmem.o
44 obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
45 obj-$(CONFIG_BLK_PM) += blk-pm.o
46 obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += blk-crypto.o blk-crypto-profile.o \
48 +++ b/block/blk-nvmem.c
50 +// SPDX-License-Identifier: GPL-2.0-or-later
52 + * block device NVMEM provider
54 + * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
56 + * Useful on devices using a partition on an eMMC for MAC addresses or
57 + * Wi-Fi calibration EEPROM data.
61 +#include <linux/nvmem-provider.h>
62 +#include <linux/of.h>
63 +#include <linux/pagemap.h>
64 +#include <linux/property.h>
66 +/* List of all NVMEM devices */
67 +static LIST_HEAD(nvmem_devices);
68 +static DEFINE_MUTEX(devices_mutex);
71 + struct nvmem_device *nvmem;
72 + struct block_device *bdev;
73 + struct list_head list;
76 +static int blk_nvmem_reg_read(void *priv, unsigned int from,
77 + void *val, size_t bytes)
79 + unsigned long offs = from & ~PAGE_MASK, to_read;
80 + pgoff_t f_index = from >> PAGE_SHIFT;
81 + struct address_space *mapping;
82 + struct blk_nvmem *bnv = priv;
83 + size_t bytes_left = bytes;
84 + struct folio *folio;
91 + if (!bnv->bdev->bd_disk)
94 + if (!bnv->bdev->bd_disk->fops)
97 + if (!bnv->bdev->bd_disk->fops->open)
100 + ret = bnv->bdev->bd_disk->fops->open(bnv->bdev->bd_disk, BLK_OPEN_READ);
104 + mapping = bnv->bdev->bd_inode->i_mapping;
106 + while (bytes_left) {
107 + folio = read_mapping_folio(mapping, f_index++, NULL);
108 + if (IS_ERR(folio)) {
109 + ret = PTR_ERR(folio);
110 + goto err_release_bdev;
112 + to_read = min_t(unsigned long, bytes_left, PAGE_SIZE - offs);
113 + p = folio_address(folio) + offset_in_folio(folio, offs);
114 + memcpy(val, p, to_read);
116 + bytes_left -= to_read;
122 + bnv->bdev->bd_disk->fops->release(bnv->bdev->bd_disk);
127 +static int blk_nvmem_register(struct device *dev)
129 + struct device_node *np = dev_of_node(dev);
130 + struct block_device *bdev = dev_to_bdev(dev);
131 + struct nvmem_config config = {};
132 + struct blk_nvmem *bnv;
134 + /* skip devices which do not have a device tree node */
138 + /* skip devices without an nvmem layout defined */
139 + if (!of_get_child_by_name(np, "nvmem-layout"))
143 + * skip devices which don't have GENHD_FL_NVMEM set
145 + * This flag is used for mtdblock and ubiblock devices because
146 + * both, MTD and UBI already implement their own NVMEM provider.
147 + * To avoid registering multiple NVMEM providers for the same
148 + * device node, don't register the block NVMEM provider for them.
150 + if (!(bdev->bd_disk->flags & GENHD_FL_NVMEM))
154 + * skip block device too large to be represented as NVMEM devices
155 + * which are using an 'int' as address
157 + if (bdev_nr_bytes(bdev) > INT_MAX)
160 + bnv = kzalloc(sizeof(struct blk_nvmem), GFP_KERNEL);
164 + config.id = NVMEM_DEVID_NONE;
165 + config.dev = &bdev->bd_device;
166 + config.name = dev_name(&bdev->bd_device);
167 + config.owner = THIS_MODULE;
169 + config.reg_read = blk_nvmem_reg_read;
170 + config.size = bdev_nr_bytes(bdev);
171 + config.word_size = 1;
173 + config.read_only = true;
174 + config.root_only = true;
175 + config.ignore_wp = true;
176 + config.of_node = to_of_node(dev->fwnode);
179 + bnv->nvmem = nvmem_register(&config);
180 + if (IS_ERR(bnv->nvmem)) {
181 + dev_err_probe(&bdev->bd_device, PTR_ERR(bnv->nvmem),
182 + "Failed to register NVMEM device\n");
185 + return PTR_ERR(bnv->nvmem);
188 + mutex_lock(&devices_mutex);
189 + list_add_tail(&bnv->list, &nvmem_devices);
190 + mutex_unlock(&devices_mutex);
195 +static void blk_nvmem_unregister(struct device *dev)
197 + struct block_device *bdev = dev_to_bdev(dev);
198 + struct blk_nvmem *bnv_c, *bnv = NULL;
200 + mutex_lock(&devices_mutex);
201 + list_for_each_entry(bnv_c, &nvmem_devices, list) {
202 + if (bnv_c->bdev == bdev) {
209 + mutex_unlock(&devices_mutex);
213 + list_del(&bnv->list);
214 + mutex_unlock(&devices_mutex);
215 + nvmem_unregister(bnv->nvmem);
219 +static struct class_interface blk_nvmem_bus_interface __refdata = {
220 + .class = &block_class,
221 + .add_dev = &blk_nvmem_register,
222 + .remove_dev = &blk_nvmem_unregister,
225 +static int __init blk_nvmem_init(void)
229 + ret = class_interface_register(&blk_nvmem_bus_interface);
235 +device_initcall(blk_nvmem_init);