2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
25 #include <asm/cacheflush.h>
26 #include <acpi/nfit.h>
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
33 #include <linux/io-64-nonatomic-hi-lo.h>
35 static bool force_enable_dimms;
36 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
37 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
39 static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
40 module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
41 MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
43 /* after three payloads of overflow, it's dead jim */
44 static unsigned int scrub_overflow_abort = 3;
45 module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
46 MODULE_PARM_DESC(scrub_overflow_abort,
47 "Number of times we overflow ARS results before abort");
49 static bool disable_vendor_specific;
50 module_param(disable_vendor_specific, bool, S_IRUGO);
51 MODULE_PARM_DESC(disable_vendor_specific,
52 "Limit commands to the publicly specified set");
54 static unsigned long override_dsm_mask;
55 module_param(override_dsm_mask, ulong, S_IRUGO);
56 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
58 static int default_dsm_family = -1;
59 module_param(default_dsm_family, int, S_IRUGO);
60 MODULE_PARM_DESC(default_dsm_family,
61 "Try this DSM type first when identifying NVDIMM family");
63 LIST_HEAD(acpi_descs);
64 DEFINE_MUTEX(acpi_desc_lock);
66 static struct workqueue_struct *nfit_wq;
68 struct nfit_table_prev {
69 struct list_head spas;
70 struct list_head memdevs;
71 struct list_head dcrs;
72 struct list_head bdws;
73 struct list_head idts;
74 struct list_head flushes;
77 static guid_t nfit_uuid[NFIT_UUID_MAX];
79 const guid_t *to_nfit_uuid(enum nfit_uuids id)
81 return &nfit_uuid[id];
83 EXPORT_SYMBOL(to_nfit_uuid);
85 static struct acpi_nfit_desc *to_acpi_nfit_desc(
86 struct nvdimm_bus_descriptor *nd_desc)
88 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
91 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
93 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
96 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
99 if (!nd_desc->provider_name
100 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
103 return to_acpi_device(acpi_desc->dev);
106 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
108 struct nd_cmd_clear_error *clear_err;
109 struct nd_cmd_ars_status *ars_status;
114 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
121 /* No supported scan types for this range */
122 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
123 if ((status >> 16 & flags) == 0)
126 case ND_CMD_ARS_START:
127 /* ARS is in progress */
128 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
135 case ND_CMD_ARS_STATUS:
140 /* Check extended status (Upper two bytes) */
141 if (status == NFIT_ARS_STATUS_DONE)
144 /* ARS is in progress */
145 if (status == NFIT_ARS_STATUS_BUSY)
148 /* No ARS performed for the current boot */
149 if (status == NFIT_ARS_STATUS_NONE)
153 * ARS interrupted, either we overflowed or some other
154 * agent wants the scan to stop. If we didn't overflow
155 * then just continue with the returned results.
157 if (status == NFIT_ARS_STATUS_INTR) {
158 if (ars_status->out_length >= 40 && (ars_status->flags
159 & NFIT_ARS_F_OVERFLOW))
168 case ND_CMD_CLEAR_ERROR:
172 if (!clear_err->cleared)
174 if (clear_err->length > clear_err->cleared)
175 return clear_err->cleared;
181 /* all other non-zero status results in an error */
187 #define ACPI_LABELS_LOCKED 3
189 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
192 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
195 case ND_CMD_GET_CONFIG_SIZE:
197 * In the _LSI, _LSR, _LSW case the locked status is
198 * communicated via the read/write commands
200 if (nfit_mem->has_lsi)
203 if (status >> 16 & ND_CONFIG_LOCKED)
206 case ND_CMD_GET_CONFIG_DATA:
207 if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED)
210 case ND_CMD_SET_CONFIG_DATA:
211 if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED)
218 /* all other non-zero status results in an error */
224 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
228 return xlat_bus_status(buf, cmd, status);
229 return xlat_nvdimm_status(nvdimm, buf, cmd, status);
232 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
233 static union acpi_object *pkg_to_buf(union acpi_object *pkg)
238 union acpi_object *buf = NULL;
240 if (pkg->type != ACPI_TYPE_PACKAGE) {
241 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
246 for (i = 0; i < pkg->package.count; i++) {
247 union acpi_object *obj = &pkg->package.elements[i];
249 if (obj->type == ACPI_TYPE_INTEGER)
251 else if (obj->type == ACPI_TYPE_BUFFER)
252 size += obj->buffer.length;
254 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
260 buf = ACPI_ALLOCATE(sizeof(*buf) + size);
265 buf->type = ACPI_TYPE_BUFFER;
266 buf->buffer.length = size;
267 buf->buffer.pointer = dst;
268 for (i = 0; i < pkg->package.count; i++) {
269 union acpi_object *obj = &pkg->package.elements[i];
271 if (obj->type == ACPI_TYPE_INTEGER) {
272 memcpy(dst, &obj->integer.value, 4);
274 } else if (obj->type == ACPI_TYPE_BUFFER) {
275 memcpy(dst, obj->buffer.pointer, obj->buffer.length);
276 dst += obj->buffer.length;
284 static union acpi_object *int_to_buf(union acpi_object *integer)
286 union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
292 if (integer->type != ACPI_TYPE_INTEGER) {
293 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
299 buf->type = ACPI_TYPE_BUFFER;
300 buf->buffer.length = 4;
301 buf->buffer.pointer = dst;
302 memcpy(dst, &integer->integer.value, 4);
308 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
312 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
313 struct acpi_object_list input = {
315 .pointer = (union acpi_object []) {
317 .integer.type = ACPI_TYPE_INTEGER,
318 .integer.value = offset,
321 .integer.type = ACPI_TYPE_INTEGER,
322 .integer.value = len,
325 .buffer.type = ACPI_TYPE_BUFFER,
326 .buffer.pointer = data,
327 .buffer.length = len,
332 rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
333 if (ACPI_FAILURE(rc))
335 return int_to_buf(buf.pointer);
338 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
342 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
343 struct acpi_object_list input = {
345 .pointer = (union acpi_object []) {
347 .integer.type = ACPI_TYPE_INTEGER,
348 .integer.value = offset,
351 .integer.type = ACPI_TYPE_INTEGER,
352 .integer.value = len,
357 rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
358 if (ACPI_FAILURE(rc))
360 return pkg_to_buf(buf.pointer);
363 static union acpi_object *acpi_label_info(acpi_handle handle)
366 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
368 rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
369 if (ACPI_FAILURE(rc))
371 return pkg_to_buf(buf.pointer);
374 static u8 nfit_dsm_revid(unsigned family, unsigned func)
376 static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
377 [NVDIMM_FAMILY_INTEL] = {
378 [NVDIMM_INTEL_GET_MODES] = 2,
379 [NVDIMM_INTEL_GET_FWINFO] = 2,
380 [NVDIMM_INTEL_START_FWUPDATE] = 2,
381 [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
382 [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
383 [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
384 [NVDIMM_INTEL_SET_THRESHOLD] = 2,
385 [NVDIMM_INTEL_INJECT_ERROR] = 2,
390 if (family > NVDIMM_FAMILY_MAX)
394 id = revid_table[family][func];
396 return 1; /* default */
400 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
401 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
403 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
404 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
405 union acpi_object in_obj, in_buf, *out_obj;
406 const struct nd_cmd_desc *desc = NULL;
407 struct device *dev = acpi_desc->dev;
408 struct nd_cmd_pkg *call_pkg = NULL;
409 const char *cmd_name, *dimm_name;
410 unsigned long cmd_mask, dsm_mask;
411 u32 offset, fw_status = 0;
418 if (cmd == ND_CMD_CALL) {
420 func = call_pkg->nd_command;
422 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
423 if (call_pkg->nd_reserved2[i])
428 struct acpi_device *adev = nfit_mem->adev;
432 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
435 dimm_name = nvdimm_name(nvdimm);
436 cmd_name = nvdimm_cmd_name(cmd);
437 cmd_mask = nvdimm_cmd_mask(nvdimm);
438 dsm_mask = nfit_mem->dsm_mask;
439 desc = nd_cmd_dimm_desc(cmd);
440 guid = to_nfit_uuid(nfit_mem->family);
441 handle = adev->handle;
443 struct acpi_device *adev = to_acpi_dev(acpi_desc);
445 cmd_name = nvdimm_bus_cmd_name(cmd);
446 cmd_mask = nd_desc->cmd_mask;
448 if (cmd == ND_CMD_CALL)
449 dsm_mask = nd_desc->bus_dsm_mask;
450 desc = nd_cmd_bus_desc(cmd);
451 guid = to_nfit_uuid(NFIT_DEV_BUS);
452 handle = adev->handle;
456 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
459 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
462 in_obj.type = ACPI_TYPE_PACKAGE;
463 in_obj.package.count = 1;
464 in_obj.package.elements = &in_buf;
465 in_buf.type = ACPI_TYPE_BUFFER;
466 in_buf.buffer.pointer = buf;
467 in_buf.buffer.length = 0;
469 /* libnvdimm has already validated the input envelope */
470 for (i = 0; i < desc->in_num; i++)
471 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
475 /* skip over package wrapper */
476 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
477 in_buf.buffer.length = call_pkg->nd_size_in;
480 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
481 __func__, dimm_name, cmd, func, in_buf.buffer.length);
482 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
483 in_buf.buffer.pointer,
484 min_t(u32, 256, in_buf.buffer.length), true);
486 /* call the BIOS, prefer the named methods over _DSM if available */
487 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsi)
488 out_obj = acpi_label_info(handle);
489 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
490 struct nd_cmd_get_config_data_hdr *p = buf;
492 out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
493 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
494 && nfit_mem->has_lsw) {
495 struct nd_cmd_set_config_hdr *p = buf;
497 out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
503 revid = nfit_dsm_revid(nfit_mem->family, func);
506 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
510 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
516 call_pkg->nd_fw_size = out_obj->buffer.length;
517 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
518 out_obj->buffer.pointer,
519 min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
523 * Need to support FW function w/o known size in advance.
524 * Caller can determine required size based upon nd_fw_size.
525 * If we return an error (like elsewhere) then caller wouldn't
526 * be able to rely upon data returned to make calculation.
531 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
532 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
533 __func__, dimm_name, cmd_name, out_obj->type);
538 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name,
539 cmd_name, out_obj->buffer.length);
540 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
541 out_obj->buffer.pointer,
542 min_t(u32, 128, out_obj->buffer.length), true);
544 for (i = 0, offset = 0; i < desc->out_num; i++) {
545 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
546 (u32 *) out_obj->buffer.pointer,
547 out_obj->buffer.length - offset);
549 if (offset + out_size > out_obj->buffer.length) {
550 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
551 __func__, dimm_name, cmd_name, i);
555 if (in_buf.buffer.length + offset + out_size > buf_len) {
556 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
557 __func__, dimm_name, cmd_name, i);
561 memcpy(buf + in_buf.buffer.length + offset,
562 out_obj->buffer.pointer + offset, out_size);
567 * Set fw_status for all the commands with a known format to be
568 * later interpreted by xlat_status().
570 if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
571 && cmd <= ND_CMD_CLEAR_ERROR)
572 || (nvdimm && cmd >= ND_CMD_SMART
573 && cmd <= ND_CMD_VENDOR)))
574 fw_status = *(u32 *) out_obj->buffer.pointer;
576 if (offset + in_buf.buffer.length < buf_len) {
579 * status valid, return the number of bytes left
580 * unfilled in the output buffer
582 rc = buf_len - offset - in_buf.buffer.length;
584 *cmd_rc = xlat_status(nvdimm, buf, cmd,
587 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
588 __func__, dimm_name, cmd_name, buf_len,
595 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
603 EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
605 static const char *spa_type_name(u16 type)
607 static const char *to_name[] = {
608 [NFIT_SPA_VOLATILE] = "volatile",
609 [NFIT_SPA_PM] = "pmem",
610 [NFIT_SPA_DCR] = "dimm-control-region",
611 [NFIT_SPA_BDW] = "block-data-window",
612 [NFIT_SPA_VDISK] = "volatile-disk",
613 [NFIT_SPA_VCD] = "volatile-cd",
614 [NFIT_SPA_PDISK] = "persistent-disk",
615 [NFIT_SPA_PCD] = "persistent-cd",
619 if (type > NFIT_SPA_PCD)
622 return to_name[type];
625 int nfit_spa_type(struct acpi_nfit_system_address *spa)
629 for (i = 0; i < NFIT_UUID_MAX; i++)
630 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
635 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
636 struct nfit_table_prev *prev,
637 struct acpi_nfit_system_address *spa)
639 struct device *dev = acpi_desc->dev;
640 struct nfit_spa *nfit_spa;
642 if (spa->header.length != sizeof(*spa))
645 list_for_each_entry(nfit_spa, &prev->spas, list) {
646 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
647 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
652 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
656 INIT_LIST_HEAD(&nfit_spa->list);
657 memcpy(nfit_spa->spa, spa, sizeof(*spa));
658 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
659 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
661 spa_type_name(nfit_spa_type(spa)));
665 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
666 struct nfit_table_prev *prev,
667 struct acpi_nfit_memory_map *memdev)
669 struct device *dev = acpi_desc->dev;
670 struct nfit_memdev *nfit_memdev;
672 if (memdev->header.length != sizeof(*memdev))
675 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
676 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
677 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
681 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
685 INIT_LIST_HEAD(&nfit_memdev->list);
686 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
687 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
688 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
689 __func__, memdev->device_handle, memdev->range_index,
690 memdev->region_index, memdev->flags);
694 int nfit_get_smbios_id(u32 device_handle, u16 *flags)
696 struct acpi_nfit_memory_map *memdev;
697 struct acpi_nfit_desc *acpi_desc;
698 struct nfit_mem *nfit_mem;
700 mutex_lock(&acpi_desc_lock);
701 list_for_each_entry(acpi_desc, &acpi_descs, list) {
702 mutex_lock(&acpi_desc->init_mutex);
703 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
704 memdev = __to_nfit_memdev(nfit_mem);
705 if (memdev->device_handle == device_handle) {
706 mutex_unlock(&acpi_desc->init_mutex);
707 mutex_unlock(&acpi_desc_lock);
708 *flags = memdev->flags;
709 return memdev->physical_id;
712 mutex_unlock(&acpi_desc->init_mutex);
714 mutex_unlock(&acpi_desc_lock);
718 EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
721 * An implementation may provide a truncated control region if no block windows
724 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
726 if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
731 return offsetof(struct acpi_nfit_control_region, window_size);
734 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
735 struct nfit_table_prev *prev,
736 struct acpi_nfit_control_region *dcr)
738 struct device *dev = acpi_desc->dev;
739 struct nfit_dcr *nfit_dcr;
741 if (!sizeof_dcr(dcr))
744 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
745 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
746 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
750 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
754 INIT_LIST_HEAD(&nfit_dcr->list);
755 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
756 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
757 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
758 dcr->region_index, dcr->windows);
762 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
763 struct nfit_table_prev *prev,
764 struct acpi_nfit_data_region *bdw)
766 struct device *dev = acpi_desc->dev;
767 struct nfit_bdw *nfit_bdw;
769 if (bdw->header.length != sizeof(*bdw))
771 list_for_each_entry(nfit_bdw, &prev->bdws, list)
772 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
773 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
777 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
781 INIT_LIST_HEAD(&nfit_bdw->list);
782 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
783 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
784 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
785 bdw->region_index, bdw->windows);
789 static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
791 if (idt->header.length < sizeof(*idt))
793 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
796 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
797 struct nfit_table_prev *prev,
798 struct acpi_nfit_interleave *idt)
800 struct device *dev = acpi_desc->dev;
801 struct nfit_idt *nfit_idt;
803 if (!sizeof_idt(idt))
806 list_for_each_entry(nfit_idt, &prev->idts, list) {
807 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
810 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
811 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
816 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
820 INIT_LIST_HEAD(&nfit_idt->list);
821 memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
822 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
823 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
824 idt->interleave_index, idt->line_count);
828 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
830 if (flush->header.length < sizeof(*flush))
832 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
835 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
836 struct nfit_table_prev *prev,
837 struct acpi_nfit_flush_address *flush)
839 struct device *dev = acpi_desc->dev;
840 struct nfit_flush *nfit_flush;
842 if (!sizeof_flush(flush))
845 list_for_each_entry(nfit_flush, &prev->flushes, list) {
846 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
849 if (memcmp(nfit_flush->flush, flush,
850 sizeof_flush(flush)) == 0) {
851 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
856 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
857 + sizeof_flush(flush), GFP_KERNEL);
860 INIT_LIST_HEAD(&nfit_flush->list);
861 memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
862 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
863 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
864 flush->device_handle, flush->hint_count);
868 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
869 struct acpi_nfit_capabilities *pcap)
871 struct device *dev = acpi_desc->dev;
874 mask = (1 << (pcap->highest_capability + 1)) - 1;
875 acpi_desc->platform_cap = pcap->capabilities & mask;
876 dev_dbg(dev, "%s: cap: %#x\n", __func__, acpi_desc->platform_cap);
880 static void *add_table(struct acpi_nfit_desc *acpi_desc,
881 struct nfit_table_prev *prev, void *table, const void *end)
883 struct device *dev = acpi_desc->dev;
884 struct acpi_nfit_header *hdr;
885 void *err = ERR_PTR(-ENOMEM);
892 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
898 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
899 if (!add_spa(acpi_desc, prev, table))
902 case ACPI_NFIT_TYPE_MEMORY_MAP:
903 if (!add_memdev(acpi_desc, prev, table))
906 case ACPI_NFIT_TYPE_CONTROL_REGION:
907 if (!add_dcr(acpi_desc, prev, table))
910 case ACPI_NFIT_TYPE_DATA_REGION:
911 if (!add_bdw(acpi_desc, prev, table))
914 case ACPI_NFIT_TYPE_INTERLEAVE:
915 if (!add_idt(acpi_desc, prev, table))
918 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
919 if (!add_flush(acpi_desc, prev, table))
922 case ACPI_NFIT_TYPE_SMBIOS:
923 dev_dbg(dev, "%s: smbios\n", __func__);
925 case ACPI_NFIT_TYPE_CAPABILITIES:
926 if (!add_platform_cap(acpi_desc, table))
930 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
934 return table + hdr->length;
937 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
938 struct nfit_mem *nfit_mem)
940 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
941 u16 dcr = nfit_mem->dcr->region_index;
942 struct nfit_spa *nfit_spa;
944 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
945 u16 range_index = nfit_spa->spa->range_index;
946 int type = nfit_spa_type(nfit_spa->spa);
947 struct nfit_memdev *nfit_memdev;
949 if (type != NFIT_SPA_BDW)
952 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
953 if (nfit_memdev->memdev->range_index != range_index)
955 if (nfit_memdev->memdev->device_handle != device_handle)
957 if (nfit_memdev->memdev->region_index != dcr)
960 nfit_mem->spa_bdw = nfit_spa->spa;
965 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
966 nfit_mem->spa_dcr->range_index);
967 nfit_mem->bdw = NULL;
970 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
971 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
973 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
974 struct nfit_memdev *nfit_memdev;
975 struct nfit_bdw *nfit_bdw;
976 struct nfit_idt *nfit_idt;
977 u16 idt_idx, range_index;
979 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
980 if (nfit_bdw->bdw->region_index != dcr)
982 nfit_mem->bdw = nfit_bdw->bdw;
989 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
991 if (!nfit_mem->spa_bdw)
994 range_index = nfit_mem->spa_bdw->range_index;
995 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
996 if (nfit_memdev->memdev->range_index != range_index ||
997 nfit_memdev->memdev->region_index != dcr)
999 nfit_mem->memdev_bdw = nfit_memdev->memdev;
1000 idt_idx = nfit_memdev->memdev->interleave_index;
1001 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1002 if (nfit_idt->idt->interleave_index != idt_idx)
1004 nfit_mem->idt_bdw = nfit_idt->idt;
1011 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
1012 struct acpi_nfit_system_address *spa)
1014 struct nfit_mem *nfit_mem, *found;
1015 struct nfit_memdev *nfit_memdev;
1016 int type = spa ? nfit_spa_type(spa) : 0;
1028 * This loop runs in two modes, when a dimm is mapped the loop
1029 * adds memdev associations to an existing dimm, or creates a
1030 * dimm. In the unmapped dimm case this loop sweeps for memdev
1031 * instances with an invalid / zero range_index and adds those
1032 * dimms without spa associations.
1034 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1035 struct nfit_flush *nfit_flush;
1036 struct nfit_dcr *nfit_dcr;
1040 if (spa && nfit_memdev->memdev->range_index != spa->range_index)
1042 if (!spa && nfit_memdev->memdev->range_index)
1045 dcr = nfit_memdev->memdev->region_index;
1046 device_handle = nfit_memdev->memdev->device_handle;
1047 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1048 if (__to_nfit_memdev(nfit_mem)->device_handle
1057 nfit_mem = devm_kzalloc(acpi_desc->dev,
1058 sizeof(*nfit_mem), GFP_KERNEL);
1061 INIT_LIST_HEAD(&nfit_mem->list);
1062 nfit_mem->acpi_desc = acpi_desc;
1063 list_add(&nfit_mem->list, &acpi_desc->dimms);
1066 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1067 if (nfit_dcr->dcr->region_index != dcr)
1070 * Record the control region for the dimm. For
1071 * the ACPI 6.1 case, where there are separate
1072 * control regions for the pmem vs blk
1073 * interfaces, be sure to record the extended
1077 nfit_mem->dcr = nfit_dcr->dcr;
1078 else if (nfit_mem->dcr->windows == 0
1079 && nfit_dcr->dcr->windows)
1080 nfit_mem->dcr = nfit_dcr->dcr;
1084 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1085 struct acpi_nfit_flush_address *flush;
1088 if (nfit_flush->flush->device_handle != device_handle)
1090 nfit_mem->nfit_flush = nfit_flush;
1091 flush = nfit_flush->flush;
1092 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
1094 * sizeof(struct resource), GFP_KERNEL);
1095 if (!nfit_mem->flush_wpq)
1097 for (i = 0; i < flush->hint_count; i++) {
1098 struct resource *res = &nfit_mem->flush_wpq[i];
1100 res->start = flush->hint_address[i];
1101 res->end = res->start + 8 - 1;
1106 if (dcr && !nfit_mem->dcr) {
1107 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1108 spa->range_index, dcr);
1112 if (type == NFIT_SPA_DCR) {
1113 struct nfit_idt *nfit_idt;
1116 /* multiple dimms may share a SPA when interleaved */
1117 nfit_mem->spa_dcr = spa;
1118 nfit_mem->memdev_dcr = nfit_memdev->memdev;
1119 idt_idx = nfit_memdev->memdev->interleave_index;
1120 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1121 if (nfit_idt->idt->interleave_index != idt_idx)
1123 nfit_mem->idt_dcr = nfit_idt->idt;
1126 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1127 } else if (type == NFIT_SPA_PM) {
1129 * A single dimm may belong to multiple SPA-PM
1130 * ranges, record at least one in addition to
1131 * any SPA-DCR range.
1133 nfit_mem->memdev_pmem = nfit_memdev->memdev;
1135 nfit_mem->memdev_dcr = nfit_memdev->memdev;
1141 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
1143 struct nfit_mem *a = container_of(_a, typeof(*a), list);
1144 struct nfit_mem *b = container_of(_b, typeof(*b), list);
1145 u32 handleA, handleB;
1147 handleA = __to_nfit_memdev(a)->device_handle;
1148 handleB = __to_nfit_memdev(b)->device_handle;
1149 if (handleA < handleB)
1151 else if (handleA > handleB)
1156 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
1158 struct nfit_spa *nfit_spa;
1163 * For each SPA-DCR or SPA-PMEM address range find its
1164 * corresponding MEMDEV(s). From each MEMDEV find the
1165 * corresponding DCR. Then, if we're operating on a SPA-DCR,
1166 * try to find a SPA-BDW and a corresponding BDW that references
1167 * the DCR. Throw it all into an nfit_mem object. Note, that
1168 * BDWs are optional.
1170 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1171 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1177 * If a DIMM has failed to be mapped into SPA there will be no
1178 * SPA entries above. Find and register all the unmapped DIMMs
1179 * for reporting and recovery purposes.
1181 rc = __nfit_mem_init(acpi_desc, NULL);
1185 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
1190 static ssize_t bus_dsm_mask_show(struct device *dev,
1191 struct device_attribute *attr, char *buf)
1193 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1194 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1196 return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
1198 static struct device_attribute dev_attr_bus_dsm_mask =
1199 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
1201 static ssize_t revision_show(struct device *dev,
1202 struct device_attribute *attr, char *buf)
1204 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1205 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1206 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1208 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1210 static DEVICE_ATTR_RO(revision);
1212 static ssize_t hw_error_scrub_show(struct device *dev,
1213 struct device_attribute *attr, char *buf)
1215 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1216 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1217 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1219 return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
1223 * The 'hw_error_scrub' attribute can have the following values written to it:
1224 * '0': Switch to the default mode where an exception will only insert
1225 * the address of the memory error into the poison and badblocks lists.
1226 * '1': Enable a full scrub to happen if an exception for a memory error is
1229 static ssize_t hw_error_scrub_store(struct device *dev,
1230 struct device_attribute *attr, const char *buf, size_t size)
1232 struct nvdimm_bus_descriptor *nd_desc;
1236 rc = kstrtol(buf, 0, &val);
1241 nd_desc = dev_get_drvdata(dev);
1243 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1246 case HW_ERROR_SCRUB_ON:
1247 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
1249 case HW_ERROR_SCRUB_OFF:
1250 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
1262 static DEVICE_ATTR_RW(hw_error_scrub);
1265 * This shows the number of full Address Range Scrubs that have been
1266 * completed since driver load time. Userspace can wait on this using
1267 * select/poll etc. A '+' at the end indicates an ARS is in progress
1269 static ssize_t scrub_show(struct device *dev,
1270 struct device_attribute *attr, char *buf)
1272 struct nvdimm_bus_descriptor *nd_desc;
1273 ssize_t rc = -ENXIO;
1276 nd_desc = dev_get_drvdata(dev);
1278 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1280 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
1281 (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
1287 static ssize_t scrub_store(struct device *dev,
1288 struct device_attribute *attr, const char *buf, size_t size)
1290 struct nvdimm_bus_descriptor *nd_desc;
1294 rc = kstrtol(buf, 0, &val);
1301 nd_desc = dev_get_drvdata(dev);
1303 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1305 rc = acpi_nfit_ars_rescan(acpi_desc, 0);
1312 static DEVICE_ATTR_RW(scrub);
1314 static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1316 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1317 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1318 | 1 << ND_CMD_ARS_STATUS;
1320 return (nd_desc->cmd_mask & mask) == mask;
1323 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1325 struct device *dev = container_of(kobj, struct device, kobj);
1326 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1328 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1333 static struct attribute *acpi_nfit_attributes[] = {
1334 &dev_attr_revision.attr,
1335 &dev_attr_scrub.attr,
1336 &dev_attr_hw_error_scrub.attr,
1337 &dev_attr_bus_dsm_mask.attr,
1341 static const struct attribute_group acpi_nfit_attribute_group = {
1343 .attrs = acpi_nfit_attributes,
1344 .is_visible = nfit_visible,
1347 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1348 &nvdimm_bus_attribute_group,
1349 &acpi_nfit_attribute_group,
1353 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1355 struct nvdimm *nvdimm = to_nvdimm(dev);
1356 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1358 return __to_nfit_memdev(nfit_mem);
1361 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1363 struct nvdimm *nvdimm = to_nvdimm(dev);
1364 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1366 return nfit_mem->dcr;
1369 static ssize_t handle_show(struct device *dev,
1370 struct device_attribute *attr, char *buf)
1372 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1374 return sprintf(buf, "%#x\n", memdev->device_handle);
1376 static DEVICE_ATTR_RO(handle);
1378 static ssize_t phys_id_show(struct device *dev,
1379 struct device_attribute *attr, char *buf)
1381 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1383 return sprintf(buf, "%#x\n", memdev->physical_id);
1385 static DEVICE_ATTR_RO(phys_id);
1387 static ssize_t vendor_show(struct device *dev,
1388 struct device_attribute *attr, char *buf)
1390 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1392 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1394 static DEVICE_ATTR_RO(vendor);
1396 static ssize_t rev_id_show(struct device *dev,
1397 struct device_attribute *attr, char *buf)
1399 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1401 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1403 static DEVICE_ATTR_RO(rev_id);
1405 static ssize_t device_show(struct device *dev,
1406 struct device_attribute *attr, char *buf)
1408 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1410 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1412 static DEVICE_ATTR_RO(device);
1414 static ssize_t subsystem_vendor_show(struct device *dev,
1415 struct device_attribute *attr, char *buf)
1417 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1419 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1421 static DEVICE_ATTR_RO(subsystem_vendor);
1423 static ssize_t subsystem_rev_id_show(struct device *dev,
1424 struct device_attribute *attr, char *buf)
1426 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1428 return sprintf(buf, "0x%04x\n",
1429 be16_to_cpu(dcr->subsystem_revision_id));
1431 static DEVICE_ATTR_RO(subsystem_rev_id);
1433 static ssize_t subsystem_device_show(struct device *dev,
1434 struct device_attribute *attr, char *buf)
1436 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1438 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1440 static DEVICE_ATTR_RO(subsystem_device);
1442 static int num_nvdimm_formats(struct nvdimm *nvdimm)
1444 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1447 if (nfit_mem->memdev_pmem)
1449 if (nfit_mem->memdev_bdw)
1454 static ssize_t format_show(struct device *dev,
1455 struct device_attribute *attr, char *buf)
1457 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1459 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1461 static DEVICE_ATTR_RO(format);
1463 static ssize_t format1_show(struct device *dev,
1464 struct device_attribute *attr, char *buf)
1467 ssize_t rc = -ENXIO;
1468 struct nfit_mem *nfit_mem;
1469 struct nfit_memdev *nfit_memdev;
1470 struct acpi_nfit_desc *acpi_desc;
1471 struct nvdimm *nvdimm = to_nvdimm(dev);
1472 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1474 nfit_mem = nvdimm_provider_data(nvdimm);
1475 acpi_desc = nfit_mem->acpi_desc;
1476 handle = to_nfit_memdev(dev)->device_handle;
1478 /* assumes DIMMs have at most 2 published interface codes */
1479 mutex_lock(&acpi_desc->init_mutex);
1480 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1481 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1482 struct nfit_dcr *nfit_dcr;
1484 if (memdev->device_handle != handle)
1487 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1488 if (nfit_dcr->dcr->region_index != memdev->region_index)
1490 if (nfit_dcr->dcr->code == dcr->code)
1492 rc = sprintf(buf, "0x%04x\n",
1493 le16_to_cpu(nfit_dcr->dcr->code));
1499 mutex_unlock(&acpi_desc->init_mutex);
1502 static DEVICE_ATTR_RO(format1);
1504 static ssize_t formats_show(struct device *dev,
1505 struct device_attribute *attr, char *buf)
1507 struct nvdimm *nvdimm = to_nvdimm(dev);
1509 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1511 static DEVICE_ATTR_RO(formats);
1513 static ssize_t serial_show(struct device *dev,
1514 struct device_attribute *attr, char *buf)
1516 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1518 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1520 static DEVICE_ATTR_RO(serial);
1522 static ssize_t family_show(struct device *dev,
1523 struct device_attribute *attr, char *buf)
1525 struct nvdimm *nvdimm = to_nvdimm(dev);
1526 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1528 if (nfit_mem->family < 0)
1530 return sprintf(buf, "%d\n", nfit_mem->family);
1532 static DEVICE_ATTR_RO(family);
1534 static ssize_t dsm_mask_show(struct device *dev,
1535 struct device_attribute *attr, char *buf)
1537 struct nvdimm *nvdimm = to_nvdimm(dev);
1538 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1540 if (nfit_mem->family < 0)
1542 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1544 static DEVICE_ATTR_RO(dsm_mask);
1546 static ssize_t flags_show(struct device *dev,
1547 struct device_attribute *attr, char *buf)
1549 u16 flags = to_nfit_memdev(dev)->flags;
1551 return sprintf(buf, "%s%s%s%s%s%s%s\n",
1552 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1553 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1554 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1555 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1556 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1557 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1558 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1560 static DEVICE_ATTR_RO(flags);
1562 static ssize_t id_show(struct device *dev,
1563 struct device_attribute *attr, char *buf)
1565 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1567 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1568 return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1569 be16_to_cpu(dcr->vendor_id),
1570 dcr->manufacturing_location,
1571 be16_to_cpu(dcr->manufacturing_date),
1572 be32_to_cpu(dcr->serial_number));
1574 return sprintf(buf, "%04x-%08x\n",
1575 be16_to_cpu(dcr->vendor_id),
1576 be32_to_cpu(dcr->serial_number));
1578 static DEVICE_ATTR_RO(id);
1580 static struct attribute *acpi_nfit_dimm_attributes[] = {
1581 &dev_attr_handle.attr,
1582 &dev_attr_phys_id.attr,
1583 &dev_attr_vendor.attr,
1584 &dev_attr_device.attr,
1585 &dev_attr_rev_id.attr,
1586 &dev_attr_subsystem_vendor.attr,
1587 &dev_attr_subsystem_device.attr,
1588 &dev_attr_subsystem_rev_id.attr,
1589 &dev_attr_format.attr,
1590 &dev_attr_formats.attr,
1591 &dev_attr_format1.attr,
1592 &dev_attr_serial.attr,
1593 &dev_attr_flags.attr,
1595 &dev_attr_family.attr,
1596 &dev_attr_dsm_mask.attr,
1600 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1601 struct attribute *a, int n)
1603 struct device *dev = container_of(kobj, struct device, kobj);
1604 struct nvdimm *nvdimm = to_nvdimm(dev);
1606 if (!to_nfit_dcr(dev)) {
1607 /* Without a dcr only the memdev attributes can be surfaced */
1608 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1609 || a == &dev_attr_flags.attr
1610 || a == &dev_attr_family.attr
1611 || a == &dev_attr_dsm_mask.attr)
1616 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1621 static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1623 .attrs = acpi_nfit_dimm_attributes,
1624 .is_visible = acpi_nfit_dimm_attr_visible,
1627 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1628 &nvdimm_attribute_group,
1629 &nd_device_attribute_group,
1630 &acpi_nfit_dimm_attribute_group,
1634 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1637 struct nfit_mem *nfit_mem;
1639 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1640 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1641 return nfit_mem->nvdimm;
1646 void __acpi_nvdimm_notify(struct device *dev, u32 event)
1648 struct nfit_mem *nfit_mem;
1649 struct acpi_nfit_desc *acpi_desc;
1651 dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
1654 if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1655 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1660 acpi_desc = dev_get_drvdata(dev->parent);
1665 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1668 nfit_mem = dev_get_drvdata(dev);
1669 if (nfit_mem && nfit_mem->flags_attr)
1670 sysfs_notify_dirent(nfit_mem->flags_attr);
1672 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1674 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1676 struct acpi_device *adev = data;
1677 struct device *dev = &adev->dev;
1679 device_lock(dev->parent);
1680 __acpi_nvdimm_notify(dev, event);
1681 device_unlock(dev->parent);
1684 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1685 struct nfit_mem *nfit_mem, u32 device_handle)
1687 struct acpi_device *adev, *adev_dimm;
1688 struct device *dev = acpi_desc->dev;
1689 union acpi_object *obj;
1690 unsigned long dsm_mask;
1695 /* nfit test assumes 1:1 relationship between commands and dsms */
1696 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1697 nfit_mem->family = NVDIMM_FAMILY_INTEL;
1698 adev = to_acpi_dev(acpi_desc);
1702 adev_dimm = acpi_find_child_device(adev, device_handle, false);
1703 nfit_mem->adev = adev_dimm;
1705 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1707 return force_enable_dimms ? 0 : -ENODEV;
1710 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1711 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1712 dev_err(dev, "%s: notification registration failed\n",
1713 dev_name(&adev_dimm->dev));
1717 * Record nfit_mem for the notification path to track back to
1718 * the nfit sysfs attributes for this dimm device object.
1720 dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1723 * Until standardization materializes we need to consider 4
1724 * different command sets. Note, that checking for function0 (bit0)
1725 * tells us if any commands are reachable through this GUID.
1727 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1728 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1729 if (family < 0 || i == default_dsm_family)
1732 /* limit the supported commands to those that are publicly documented */
1733 nfit_mem->family = family;
1734 if (override_dsm_mask && !disable_vendor_specific)
1735 dsm_mask = override_dsm_mask;
1736 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1737 dsm_mask = NVDIMM_INTEL_CMDMASK;
1738 if (disable_vendor_specific)
1739 dsm_mask &= ~(1 << ND_CMD_VENDOR);
1740 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1741 dsm_mask = 0x1c3c76;
1742 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1744 if (disable_vendor_specific)
1745 dsm_mask &= ~(1 << 8);
1746 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1747 dsm_mask = 0xffffffff;
1749 dev_dbg(dev, "unknown dimm command family\n");
1750 nfit_mem->family = -1;
1751 /* DSMs are optional, continue loading the driver... */
1755 guid = to_nfit_uuid(nfit_mem->family);
1756 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1757 if (acpi_check_dsm(adev_dimm->handle, guid,
1758 nfit_dsm_revid(nfit_mem->family, i),
1760 set_bit(i, &nfit_mem->dsm_mask);
1762 obj = acpi_label_info(adev_dimm->handle);
1765 nfit_mem->has_lsi = 1;
1766 dev_dbg(dev, "%s: has _LSI\n", dev_name(&adev_dimm->dev));
1769 obj = acpi_label_read(adev_dimm->handle, 0, 0);
1772 nfit_mem->has_lsr = 1;
1773 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1776 obj = acpi_label_write(adev_dimm->handle, 0, 0, NULL);
1779 nfit_mem->has_lsw = 1;
1780 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1786 static void shutdown_dimm_notify(void *data)
1788 struct acpi_nfit_desc *acpi_desc = data;
1789 struct nfit_mem *nfit_mem;
1791 mutex_lock(&acpi_desc->init_mutex);
1793 * Clear out the nfit_mem->flags_attr and shut down dimm event
1796 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1797 struct acpi_device *adev_dimm = nfit_mem->adev;
1799 if (nfit_mem->flags_attr) {
1800 sysfs_put(nfit_mem->flags_attr);
1801 nfit_mem->flags_attr = NULL;
1804 acpi_remove_notify_handler(adev_dimm->handle,
1805 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1806 dev_set_drvdata(&adev_dimm->dev, NULL);
1809 mutex_unlock(&acpi_desc->init_mutex);
1812 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1814 struct nfit_mem *nfit_mem;
1815 int dimm_count = 0, rc;
1816 struct nvdimm *nvdimm;
1818 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1819 struct acpi_nfit_flush_address *flush;
1820 unsigned long flags = 0, cmd_mask;
1821 struct nfit_memdev *nfit_memdev;
1825 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1826 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1832 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1833 set_bit(NDD_ALIASING, &flags);
1835 /* collate flags across all memdevs for this dimm */
1836 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1837 struct acpi_nfit_memory_map *dimm_memdev;
1839 dimm_memdev = __to_nfit_memdev(nfit_mem);
1840 if (dimm_memdev->device_handle
1841 != nfit_memdev->memdev->device_handle)
1843 dimm_memdev->flags |= nfit_memdev->memdev->flags;
1846 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
1847 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
1848 set_bit(NDD_UNARMED, &flags);
1850 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1855 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1856 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1857 * userspace interface.
1859 cmd_mask = 1UL << ND_CMD_CALL;
1860 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1862 * These commands have a 1:1 correspondence
1863 * between DSM payload and libnvdimm ioctl
1866 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
1869 if (nfit_mem->has_lsi)
1870 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
1871 if (nfit_mem->has_lsr)
1872 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
1873 if (nfit_mem->has_lsw)
1874 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
1876 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
1878 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
1879 acpi_nfit_dimm_attribute_groups,
1880 flags, cmd_mask, flush ? flush->hint_count : 0,
1881 nfit_mem->flush_wpq);
1885 nfit_mem->nvdimm = nvdimm;
1888 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
1891 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
1892 nvdimm_name(nvdimm),
1893 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
1894 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
1895 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
1896 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
1897 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
1901 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
1906 * Now that dimms are successfully registered, and async registration
1907 * is flushed, attempt to enable event notification.
1909 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1910 struct kernfs_node *nfit_kernfs;
1912 nvdimm = nfit_mem->nvdimm;
1916 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
1918 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
1920 sysfs_put(nfit_kernfs);
1921 if (!nfit_mem->flags_attr)
1922 dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
1923 nvdimm_name(nvdimm));
1926 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
1931 * These constants are private because there are no kernel consumers of
1934 enum nfit_aux_cmds {
1935 NFIT_CMD_TRANSLATE_SPA = 5,
1936 NFIT_CMD_ARS_INJECT_SET = 7,
1937 NFIT_CMD_ARS_INJECT_CLEAR = 8,
1938 NFIT_CMD_ARS_INJECT_GET = 9,
1941 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1943 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1944 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
1945 struct acpi_device *adev;
1946 unsigned long dsm_mask;
1949 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
1950 nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
1951 adev = to_acpi_dev(acpi_desc);
1955 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
1956 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
1957 set_bit(i, &nd_desc->cmd_mask);
1958 set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
1961 (1 << ND_CMD_ARS_CAP) |
1962 (1 << ND_CMD_ARS_START) |
1963 (1 << ND_CMD_ARS_STATUS) |
1964 (1 << ND_CMD_CLEAR_ERROR) |
1965 (1 << NFIT_CMD_TRANSLATE_SPA) |
1966 (1 << NFIT_CMD_ARS_INJECT_SET) |
1967 (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
1968 (1 << NFIT_CMD_ARS_INJECT_GET);
1969 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1970 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
1971 set_bit(i, &nd_desc->bus_dsm_mask);
1974 static ssize_t range_index_show(struct device *dev,
1975 struct device_attribute *attr, char *buf)
1977 struct nd_region *nd_region = to_nd_region(dev);
1978 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1980 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1982 static DEVICE_ATTR_RO(range_index);
1984 static ssize_t ecc_unit_size_show(struct device *dev,
1985 struct device_attribute *attr, char *buf)
1987 struct nd_region *nd_region = to_nd_region(dev);
1988 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1990 return sprintf(buf, "%d\n", nfit_spa->clear_err_unit);
1992 static DEVICE_ATTR_RO(ecc_unit_size);
1994 static struct attribute *acpi_nfit_region_attributes[] = {
1995 &dev_attr_range_index.attr,
1996 &dev_attr_ecc_unit_size.attr,
2000 static const struct attribute_group acpi_nfit_region_attribute_group = {
2002 .attrs = acpi_nfit_region_attributes,
2005 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
2006 &nd_region_attribute_group,
2007 &nd_mapping_attribute_group,
2008 &nd_device_attribute_group,
2009 &nd_numa_attribute_group,
2010 &acpi_nfit_region_attribute_group,
2014 /* enough info to uniquely specify an interleave set */
2015 struct nfit_set_info {
2016 struct nfit_set_info_map {
2023 struct nfit_set_info2 {
2024 struct nfit_set_info_map2 {
2028 u16 manufacturing_date;
2029 u8 manufacturing_location;
2034 static size_t sizeof_nfit_set_info(int num_mappings)
2036 return sizeof(struct nfit_set_info)
2037 + num_mappings * sizeof(struct nfit_set_info_map);
2040 static size_t sizeof_nfit_set_info2(int num_mappings)
2042 return sizeof(struct nfit_set_info2)
2043 + num_mappings * sizeof(struct nfit_set_info_map2);
2046 static int cmp_map_compat(const void *m0, const void *m1)
2048 const struct nfit_set_info_map *map0 = m0;
2049 const struct nfit_set_info_map *map1 = m1;
2051 return memcmp(&map0->region_offset, &map1->region_offset,
2055 static int cmp_map(const void *m0, const void *m1)
2057 const struct nfit_set_info_map *map0 = m0;
2058 const struct nfit_set_info_map *map1 = m1;
2060 if (map0->region_offset < map1->region_offset)
2062 else if (map0->region_offset > map1->region_offset)
2067 static int cmp_map2(const void *m0, const void *m1)
2069 const struct nfit_set_info_map2 *map0 = m0;
2070 const struct nfit_set_info_map2 *map1 = m1;
2072 if (map0->region_offset < map1->region_offset)
2074 else if (map0->region_offset > map1->region_offset)
2079 /* Retrieve the nth entry referencing this spa */
2080 static struct acpi_nfit_memory_map *memdev_from_spa(
2081 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
2083 struct nfit_memdev *nfit_memdev;
2085 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
2086 if (nfit_memdev->memdev->range_index == range_index)
2088 return nfit_memdev->memdev;
2092 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2093 struct nd_region_desc *ndr_desc,
2094 struct acpi_nfit_system_address *spa)
2096 struct device *dev = acpi_desc->dev;
2097 struct nd_interleave_set *nd_set;
2098 u16 nr = ndr_desc->num_mappings;
2099 struct nfit_set_info2 *info2;
2100 struct nfit_set_info *info;
2103 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2106 ndr_desc->nd_set = nd_set;
2107 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2109 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
2113 info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
2117 for (i = 0; i < nr; i++) {
2118 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2119 struct nfit_set_info_map *map = &info->mapping[i];
2120 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2121 struct nvdimm *nvdimm = mapping->nvdimm;
2122 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2123 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
2124 spa->range_index, i);
2125 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2127 if (!memdev || !nfit_mem->dcr) {
2128 dev_err(dev, "%s: failed to find DCR\n", __func__);
2132 map->region_offset = memdev->region_offset;
2133 map->serial_number = dcr->serial_number;
2135 map2->region_offset = memdev->region_offset;
2136 map2->serial_number = dcr->serial_number;
2137 map2->vendor_id = dcr->vendor_id;
2138 map2->manufacturing_date = dcr->manufacturing_date;
2139 map2->manufacturing_location = dcr->manufacturing_location;
2142 /* v1.1 namespaces */
2143 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2145 nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2147 /* v1.2 namespaces */
2148 sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
2150 nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
2152 /* support v1.1 namespaces created with the wrong sort order */
2153 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2154 cmp_map_compat, NULL);
2155 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2157 /* record the result of the sort for the mapping position */
2158 for (i = 0; i < nr; i++) {
2159 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2162 for (j = 0; j < nr; j++) {
2163 struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
2164 struct nvdimm *nvdimm = mapping->nvdimm;
2165 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2166 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2168 if (map2->serial_number == dcr->serial_number &&
2169 map2->vendor_id == dcr->vendor_id &&
2170 map2->manufacturing_date == dcr->manufacturing_date &&
2171 map2->manufacturing_location
2172 == dcr->manufacturing_location) {
2173 mapping->position = i;
2179 ndr_desc->nd_set = nd_set;
2180 devm_kfree(dev, info);
2181 devm_kfree(dev, info2);
2186 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
2188 struct acpi_nfit_interleave *idt = mmio->idt;
2189 u32 sub_line_offset, line_index, line_offset;
2190 u64 line_no, table_skip_count, table_offset;
2192 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
2193 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
2194 line_offset = idt->line_offset[line_index]
2196 table_offset = table_skip_count * mmio->table_size;
2198 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
2201 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
2203 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2204 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
2205 const u32 STATUS_MASK = 0x80000037;
2207 if (mmio->num_lines)
2208 offset = to_interleave_offset(offset, mmio);
2210 return readl(mmio->addr.base + offset) & STATUS_MASK;
2213 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
2214 resource_size_t dpa, unsigned int len, unsigned int write)
2217 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2220 BCW_OFFSET_MASK = (1ULL << 48)-1,
2222 BCW_LEN_MASK = (1ULL << 8) - 1,
2226 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
2227 len = len >> L1_CACHE_SHIFT;
2228 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
2229 cmd |= ((u64) write) << BCW_CMD_SHIFT;
2231 offset = nfit_blk->cmd_offset + mmio->size * bw;
2232 if (mmio->num_lines)
2233 offset = to_interleave_offset(offset, mmio);
2235 writeq(cmd, mmio->addr.base + offset);
2236 nvdimm_flush(nfit_blk->nd_region);
2238 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
2239 readq(mmio->addr.base + offset);
2242 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
2243 resource_size_t dpa, void *iobuf, size_t len, int rw,
2246 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2247 unsigned int copied = 0;
2251 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
2252 + lane * mmio->size;
2253 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
2258 if (mmio->num_lines) {
2261 offset = to_interleave_offset(base_offset + copied,
2263 div_u64_rem(offset, mmio->line_size, &line_offset);
2264 c = min_t(size_t, len, mmio->line_size - line_offset);
2266 offset = base_offset + nfit_blk->bdw_offset;
2271 memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
2273 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
2274 arch_invalidate_pmem((void __force *)
2275 mmio->addr.aperture + offset, c);
2277 memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
2285 nvdimm_flush(nfit_blk->nd_region);
2287 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
2291 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
2292 resource_size_t dpa, void *iobuf, u64 len, int rw)
2294 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
2295 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2296 struct nd_region *nd_region = nfit_blk->nd_region;
2297 unsigned int lane, copied = 0;
2300 lane = nd_region_acquire_lane(nd_region);
2302 u64 c = min(len, mmio->size);
2304 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
2305 iobuf + copied, c, rw, lane);
2312 nd_region_release_lane(nd_region, lane);
2317 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
2318 struct acpi_nfit_interleave *idt, u16 interleave_ways)
2321 mmio->num_lines = idt->line_count;
2322 mmio->line_size = idt->line_size;
2323 if (interleave_ways == 0)
2325 mmio->table_size = mmio->num_lines * interleave_ways
2332 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
2333 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
2335 struct nd_cmd_dimm_flags flags;
2338 memset(&flags, 0, sizeof(flags));
2339 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2340 sizeof(flags), NULL);
2342 if (rc >= 0 && flags.status == 0)
2343 nfit_blk->dimm_flags = flags.flags;
2344 else if (rc == -ENOTTY) {
2345 /* fall back to a conservative default */
2346 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2354 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2357 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
2358 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
2359 struct nfit_blk_mmio *mmio;
2360 struct nfit_blk *nfit_blk;
2361 struct nfit_mem *nfit_mem;
2362 struct nvdimm *nvdimm;
2365 nvdimm = nd_blk_region_to_dimm(ndbr);
2366 nfit_mem = nvdimm_provider_data(nvdimm);
2367 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
2368 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
2369 nfit_mem ? "" : " nfit_mem",
2370 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
2371 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2375 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
2378 nd_blk_region_set_provider_data(ndbr, nfit_blk);
2379 nfit_blk->nd_region = to_nd_region(dev);
2381 /* map block aperture memory */
2382 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
2383 mmio = &nfit_blk->mmio[BDW];
2384 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2385 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2386 if (!mmio->addr.base) {
2387 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
2388 nvdimm_name(nvdimm));
2391 mmio->size = nfit_mem->bdw->size;
2392 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
2393 mmio->idt = nfit_mem->idt_bdw;
2394 mmio->spa = nfit_mem->spa_bdw;
2395 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
2396 nfit_mem->memdev_bdw->interleave_ways);
2398 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
2399 __func__, nvdimm_name(nvdimm));
2403 /* map block control memory */
2404 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
2405 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
2406 mmio = &nfit_blk->mmio[DCR];
2407 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
2408 nfit_mem->spa_dcr->length);
2409 if (!mmio->addr.base) {
2410 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
2411 nvdimm_name(nvdimm));
2414 mmio->size = nfit_mem->dcr->window_size;
2415 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
2416 mmio->idt = nfit_mem->idt_dcr;
2417 mmio->spa = nfit_mem->spa_dcr;
2418 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
2419 nfit_mem->memdev_dcr->interleave_ways);
2421 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
2422 __func__, nvdimm_name(nvdimm));
2426 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2428 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
2429 __func__, nvdimm_name(nvdimm));
2433 if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2434 dev_warn(dev, "unable to guarantee persistence of writes\n");
2436 if (mmio->line_size == 0)
2439 if ((u32) nfit_blk->cmd_offset % mmio->line_size
2440 + 8 > mmio->line_size) {
2441 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2443 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
2444 + 8 > mmio->line_size) {
2445 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2452 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2453 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2455 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2456 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2459 cmd->address = spa->address;
2460 cmd->length = spa->length;
2461 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2462 sizeof(*cmd), &cmd_rc);
2468 static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
2472 struct nd_cmd_ars_start ars_start;
2473 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2474 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2476 memset(&ars_start, 0, sizeof(ars_start));
2477 ars_start.address = spa->address;
2478 ars_start.length = spa->length;
2479 ars_start.flags = acpi_desc->ars_start_flags;
2480 if (nfit_spa_type(spa) == NFIT_SPA_PM)
2481 ars_start.type = ND_ARS_PERSISTENT;
2482 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2483 ars_start.type = ND_ARS_VOLATILE;
2487 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2488 sizeof(ars_start), &cmd_rc);
2495 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2498 struct nd_cmd_ars_start ars_start;
2499 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2500 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2502 memset(&ars_start, 0, sizeof(ars_start));
2503 ars_start.address = ars_status->restart_address;
2504 ars_start.length = ars_status->restart_length;
2505 ars_start.type = ars_status->type;
2506 ars_start.flags = acpi_desc->ars_start_flags;
2507 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2508 sizeof(ars_start), &cmd_rc);
2514 static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2516 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2517 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2520 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2521 acpi_desc->ars_status_size, &cmd_rc);
2527 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
2528 struct nd_cmd_ars_status *ars_status)
2530 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2535 * First record starts at 44 byte offset from the start of the
2538 if (ars_status->out_length < 44)
2540 for (i = 0; i < ars_status->num_records; i++) {
2541 /* only process full records */
2542 if (ars_status->out_length
2543 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2545 rc = nvdimm_bus_add_badrange(nvdimm_bus,
2546 ars_status->records[i].err_address,
2547 ars_status->records[i].length);
2551 if (i < ars_status->num_records)
2552 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2557 static void acpi_nfit_remove_resource(void *data)
2559 struct resource *res = data;
2561 remove_resource(res);
2564 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2565 struct nd_region_desc *ndr_desc)
2567 struct resource *res, *nd_res = ndr_desc->res;
2570 /* No operation if the region is already registered as PMEM */
2571 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2572 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2573 if (is_pmem == REGION_INTERSECTS)
2576 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2580 res->name = "Persistent Memory";
2581 res->start = nd_res->start;
2582 res->end = nd_res->end;
2583 res->flags = IORESOURCE_MEM;
2584 res->desc = IORES_DESC_PERSISTENT_MEMORY;
2586 ret = insert_resource(&iomem_resource, res);
2590 ret = devm_add_action_or_reset(acpi_desc->dev,
2591 acpi_nfit_remove_resource,
2599 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2600 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2601 struct acpi_nfit_memory_map *memdev,
2602 struct nfit_spa *nfit_spa)
2604 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2605 memdev->device_handle);
2606 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2607 struct nd_blk_region_desc *ndbr_desc;
2608 struct nfit_mem *nfit_mem;
2609 int blk_valid = 0, rc;
2612 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2613 spa->range_index, memdev->device_handle);
2617 mapping->nvdimm = nvdimm;
2618 switch (nfit_spa_type(spa)) {
2620 case NFIT_SPA_VOLATILE:
2621 mapping->start = memdev->address;
2622 mapping->size = memdev->region_size;
2625 nfit_mem = nvdimm_provider_data(nvdimm);
2626 if (!nfit_mem || !nfit_mem->bdw) {
2627 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2628 spa->range_index, nvdimm_name(nvdimm));
2630 mapping->size = nfit_mem->bdw->capacity;
2631 mapping->start = nfit_mem->bdw->start_address;
2632 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2636 ndr_desc->mapping = mapping;
2637 ndr_desc->num_mappings = blk_valid;
2638 ndbr_desc = to_blk_region_desc(ndr_desc);
2639 ndbr_desc->enable = acpi_nfit_blk_region_enable;
2640 ndbr_desc->do_io = acpi_desc->blk_do_io;
2641 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2644 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2646 if (!nfit_spa->nd_region)
2654 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2656 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2657 nfit_spa_type(spa) == NFIT_SPA_VCD ||
2658 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2659 nfit_spa_type(spa) == NFIT_SPA_PCD);
2662 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
2664 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2665 nfit_spa_type(spa) == NFIT_SPA_VCD ||
2666 nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
2669 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2670 struct nfit_spa *nfit_spa)
2672 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2673 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2674 struct nd_blk_region_desc ndbr_desc;
2675 struct nd_region_desc *ndr_desc;
2676 struct nfit_memdev *nfit_memdev;
2677 struct nvdimm_bus *nvdimm_bus;
2678 struct resource res;
2681 if (nfit_spa->nd_region)
2684 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2685 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
2690 memset(&res, 0, sizeof(res));
2691 memset(&mappings, 0, sizeof(mappings));
2692 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
2693 res.start = spa->address;
2694 res.end = res.start + spa->length - 1;
2695 ndr_desc = &ndbr_desc.ndr_desc;
2696 ndr_desc->res = &res;
2697 ndr_desc->provider_data = nfit_spa;
2698 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2699 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2700 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2701 spa->proximity_domain);
2703 ndr_desc->numa_node = NUMA_NO_NODE;
2706 * Persistence domain bits are hierarchical, if
2707 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
2708 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
2710 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2711 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2712 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2713 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2715 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2716 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2717 struct nd_mapping_desc *mapping;
2719 if (memdev->range_index != spa->range_index)
2721 if (count >= ND_MAX_MAPPINGS) {
2722 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2723 spa->range_index, ND_MAX_MAPPINGS);
2726 mapping = &mappings[count++];
2727 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2733 ndr_desc->mapping = mappings;
2734 ndr_desc->num_mappings = count;
2735 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2739 nvdimm_bus = acpi_desc->nvdimm_bus;
2740 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
2741 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
2743 dev_warn(acpi_desc->dev,
2744 "failed to insert pmem resource to iomem: %d\n",
2749 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2751 if (!nfit_spa->nd_region)
2753 } else if (nfit_spa_is_volatile(spa)) {
2754 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2756 if (!nfit_spa->nd_region)
2758 } else if (nfit_spa_is_virtual(spa)) {
2759 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2761 if (!nfit_spa->nd_region)
2767 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2768 nfit_spa->spa->range_index);
2772 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
2775 struct device *dev = acpi_desc->dev;
2776 struct nd_cmd_ars_status *ars_status;
2778 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
2779 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
2783 if (acpi_desc->ars_status)
2784 devm_kfree(dev, acpi_desc->ars_status);
2785 acpi_desc->ars_status = NULL;
2786 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2789 acpi_desc->ars_status = ars_status;
2790 acpi_desc->ars_status_size = max_ars;
2794 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
2795 struct nfit_spa *nfit_spa)
2797 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2800 if (!nfit_spa->max_ars) {
2801 struct nd_cmd_ars_cap ars_cap;
2803 memset(&ars_cap, 0, sizeof(ars_cap));
2804 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2807 nfit_spa->max_ars = ars_cap.max_ars_out;
2808 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2809 /* check that the supported scrub types match the spa type */
2810 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2811 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2813 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2814 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2818 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2821 rc = ars_get_status(acpi_desc);
2822 if (rc < 0 && rc != -ENOSPC)
2825 if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
2831 static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2832 struct nfit_spa *nfit_spa)
2834 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2835 unsigned int overflow_retry = scrub_overflow_abort;
2836 u64 init_ars_start = 0, init_ars_len = 0;
2837 struct device *dev = acpi_desc->dev;
2838 unsigned int tmo = scrub_timeout;
2841 if (!nfit_spa->ars_required || !nfit_spa->nd_region)
2844 rc = ars_start(acpi_desc, nfit_spa);
2846 * If we timed out the initial scan we'll still be busy here,
2847 * and will wait another timeout before giving up permanently.
2849 if (rc < 0 && rc != -EBUSY)
2853 u64 ars_start, ars_len;
2855 if (acpi_desc->cancel)
2857 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2860 if (rc == -EBUSY && !tmo) {
2861 dev_warn(dev, "range %d ars timeout, aborting\n",
2868 * Note, entries may be appended to the list
2869 * while the lock is dropped, but the workqueue
2870 * being active prevents entries being deleted /
2873 mutex_unlock(&acpi_desc->init_mutex);
2876 mutex_lock(&acpi_desc->init_mutex);
2880 /* we got some results, but there are more pending... */
2881 if (rc == -ENOSPC && overflow_retry--) {
2882 if (!init_ars_len) {
2883 init_ars_len = acpi_desc->ars_status->length;
2884 init_ars_start = acpi_desc->ars_status->address;
2886 rc = ars_continue(acpi_desc);
2890 dev_warn(dev, "range %d ars continuation failed\n",
2896 ars_start = init_ars_start;
2897 ars_len = init_ars_len;
2899 ars_start = acpi_desc->ars_status->address;
2900 ars_len = acpi_desc->ars_status->length;
2902 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
2903 spa->range_index, ars_start, ars_len);
2904 /* notify the region about new poison entries */
2905 nvdimm_region_notify(nfit_spa->nd_region,
2906 NVDIMM_REVALIDATE_POISON);
2911 static void acpi_nfit_scrub(struct work_struct *work)
2914 u64 init_scrub_length = 0;
2915 struct nfit_spa *nfit_spa;
2916 u64 init_scrub_address = 0;
2917 bool init_ars_done = false;
2918 struct acpi_nfit_desc *acpi_desc;
2919 unsigned int tmo = scrub_timeout;
2920 unsigned int overflow_retry = scrub_overflow_abort;
2922 acpi_desc = container_of(work, typeof(*acpi_desc), work);
2923 dev = acpi_desc->dev;
2926 * We scrub in 2 phases. The first phase waits for any platform
2927 * firmware initiated scrubs to complete and then we go search for the
2928 * affected spa regions to mark them scanned. In the second phase we
2929 * initiate a directed scrub for every range that was not scrubbed in
2930 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2931 * the first phase, but really only care about running phase 2, where
2932 * regions can be notified of new poison.
2935 /* process platform firmware initiated scrubs */
2937 mutex_lock(&acpi_desc->init_mutex);
2938 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2939 struct nd_cmd_ars_status *ars_status;
2940 struct acpi_nfit_system_address *spa;
2941 u64 ars_start, ars_len;
2944 if (acpi_desc->cancel)
2947 if (nfit_spa->nd_region)
2950 if (init_ars_done) {
2952 * No need to re-query, we're now just
2953 * reconciling all the ranges covered by the
2958 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2960 if (rc == -ENOTTY) {
2961 /* no ars capability, just register spa and move on */
2962 acpi_nfit_register_region(acpi_desc, nfit_spa);
2966 if (rc == -EBUSY && !tmo) {
2967 /* fallthrough to directed scrub in phase 2 */
2968 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2970 } else if (rc == -EBUSY) {
2971 mutex_unlock(&acpi_desc->init_mutex);
2977 /* we got some results, but there are more pending... */
2978 if (rc == -ENOSPC && overflow_retry--) {
2979 ars_status = acpi_desc->ars_status;
2981 * Record the original scrub range, so that we
2982 * can recall all the ranges impacted by the
2985 if (!init_scrub_length) {
2986 init_scrub_length = ars_status->length;
2987 init_scrub_address = ars_status->address;
2989 rc = ars_continue(acpi_desc);
2991 mutex_unlock(&acpi_desc->init_mutex);
2998 * Initial scrub failed, we'll give it one more
3004 /* We got some final results, record completed ranges */
3005 ars_status = acpi_desc->ars_status;
3006 if (init_scrub_length) {
3007 ars_start = init_scrub_address;
3008 ars_len = ars_start + init_scrub_length;
3010 ars_start = ars_status->address;
3011 ars_len = ars_status->length;
3013 spa = nfit_spa->spa;
3015 if (!init_ars_done) {
3016 init_ars_done = true;
3017 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
3018 ars_start, ars_len);
3020 if (ars_start <= spa->address && ars_start + ars_len
3021 >= spa->address + spa->length)
3022 acpi_nfit_register_region(acpi_desc, nfit_spa);
3026 * For all the ranges not covered by an initial scrub we still
3027 * want to see if there are errors, but it's ok to discover them
3030 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3032 * Flag all the ranges that still need scrubbing, but
3033 * register them now to make data available.
3035 if (!nfit_spa->nd_region) {
3036 nfit_spa->ars_required = 1;
3037 acpi_nfit_register_region(acpi_desc, nfit_spa);
3040 acpi_desc->init_complete = 1;
3042 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
3043 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
3044 acpi_desc->scrub_count++;
3045 acpi_desc->ars_start_flags = 0;
3046 if (acpi_desc->scrub_count_state)
3047 sysfs_notify_dirent(acpi_desc->scrub_count_state);
3048 mutex_unlock(&acpi_desc->init_mutex);
3051 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3053 struct nfit_spa *nfit_spa;
3056 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
3057 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
3058 /* BLK regions don't need to wait for ars results */
3059 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3064 acpi_desc->ars_start_flags = 0;
3065 if (!acpi_desc->cancel)
3066 queue_work(nfit_wq, &acpi_desc->work);
3070 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
3071 struct nfit_table_prev *prev)
3073 struct device *dev = acpi_desc->dev;
3075 if (!list_empty(&prev->spas) ||
3076 !list_empty(&prev->memdevs) ||
3077 !list_empty(&prev->dcrs) ||
3078 !list_empty(&prev->bdws) ||
3079 !list_empty(&prev->idts) ||
3080 !list_empty(&prev->flushes)) {
3081 dev_err(dev, "new nfit deletes entries (unsupported)\n");
3087 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
3089 struct device *dev = acpi_desc->dev;
3090 struct kernfs_node *nfit;
3091 struct device *bus_dev;
3093 if (!ars_supported(acpi_desc->nvdimm_bus))
3096 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3097 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
3099 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
3102 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
3104 if (!acpi_desc->scrub_count_state) {
3105 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
3112 static void acpi_nfit_unregister(void *data)
3114 struct acpi_nfit_desc *acpi_desc = data;
3116 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
3119 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3121 struct device *dev = acpi_desc->dev;
3122 struct nfit_table_prev prev;
3126 if (!acpi_desc->nvdimm_bus) {
3127 acpi_nfit_init_dsms(acpi_desc);
3129 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
3130 &acpi_desc->nd_desc);
3131 if (!acpi_desc->nvdimm_bus)
3134 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3139 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
3143 /* register this acpi_desc for mce notifications */
3144 mutex_lock(&acpi_desc_lock);
3145 list_add_tail(&acpi_desc->list, &acpi_descs);
3146 mutex_unlock(&acpi_desc_lock);
3149 mutex_lock(&acpi_desc->init_mutex);
3151 INIT_LIST_HEAD(&prev.spas);
3152 INIT_LIST_HEAD(&prev.memdevs);
3153 INIT_LIST_HEAD(&prev.dcrs);
3154 INIT_LIST_HEAD(&prev.bdws);
3155 INIT_LIST_HEAD(&prev.idts);
3156 INIT_LIST_HEAD(&prev.flushes);
3158 list_cut_position(&prev.spas, &acpi_desc->spas,
3159 acpi_desc->spas.prev);
3160 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
3161 acpi_desc->memdevs.prev);
3162 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
3163 acpi_desc->dcrs.prev);
3164 list_cut_position(&prev.bdws, &acpi_desc->bdws,
3165 acpi_desc->bdws.prev);
3166 list_cut_position(&prev.idts, &acpi_desc->idts,
3167 acpi_desc->idts.prev);
3168 list_cut_position(&prev.flushes, &acpi_desc->flushes,
3169 acpi_desc->flushes.prev);
3172 while (!IS_ERR_OR_NULL(data))
3173 data = add_table(acpi_desc, &prev, data, end);
3176 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
3182 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
3186 rc = nfit_mem_init(acpi_desc);
3190 rc = acpi_nfit_register_dimms(acpi_desc);
3194 rc = acpi_nfit_register_regions(acpi_desc);
3197 mutex_unlock(&acpi_desc->init_mutex);
3200 EXPORT_SYMBOL_GPL(acpi_nfit_init);
3202 struct acpi_nfit_flush_work {
3203 struct work_struct work;
3204 struct completion cmp;
3207 static void flush_probe(struct work_struct *work)
3209 struct acpi_nfit_flush_work *flush;
3211 flush = container_of(work, typeof(*flush), work);
3212 complete(&flush->cmp);
3215 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3217 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3218 struct device *dev = acpi_desc->dev;
3219 struct acpi_nfit_flush_work flush;
3222 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3226 /* bounce the init_mutex to make init_complete valid */
3227 mutex_lock(&acpi_desc->init_mutex);
3228 if (acpi_desc->cancel || acpi_desc->init_complete) {
3229 mutex_unlock(&acpi_desc->init_mutex);
3234 * Scrub work could take 10s of seconds, userspace may give up so we
3235 * need to be interruptible while waiting.
3237 INIT_WORK_ONSTACK(&flush.work, flush_probe);
3238 init_completion(&flush.cmp);
3239 queue_work(nfit_wq, &flush.work);
3240 mutex_unlock(&acpi_desc->init_mutex);
3242 rc = wait_for_completion_interruptible(&flush.cmp);
3243 cancel_work_sync(&flush.work);
3247 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3248 struct nvdimm *nvdimm, unsigned int cmd)
3250 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3254 if (cmd != ND_CMD_ARS_START)
3258 * The kernel and userspace may race to initiate a scrub, but
3259 * the scrub thread is prepared to lose that initial race. It
3260 * just needs guarantees that any ars it initiates are not
3261 * interrupted by any intervening start reqeusts from userspace.
3263 if (work_busy(&acpi_desc->work))
3269 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags)
3271 struct device *dev = acpi_desc->dev;
3272 struct nfit_spa *nfit_spa;
3274 if (work_busy(&acpi_desc->work))
3277 mutex_lock(&acpi_desc->init_mutex);
3278 if (acpi_desc->cancel) {
3279 mutex_unlock(&acpi_desc->init_mutex);
3283 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3284 struct acpi_nfit_system_address *spa = nfit_spa->spa;
3286 if (nfit_spa_type(spa) != NFIT_SPA_PM)
3289 nfit_spa->ars_required = 1;
3291 acpi_desc->ars_start_flags = flags;
3292 queue_work(nfit_wq, &acpi_desc->work);
3293 dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
3294 mutex_unlock(&acpi_desc->init_mutex);
3299 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3301 struct nvdimm_bus_descriptor *nd_desc;
3303 dev_set_drvdata(dev, acpi_desc);
3304 acpi_desc->dev = dev;
3305 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
3306 nd_desc = &acpi_desc->nd_desc;
3307 nd_desc->provider_name = "ACPI.NFIT";
3308 nd_desc->module = THIS_MODULE;
3309 nd_desc->ndctl = acpi_nfit_ctl;
3310 nd_desc->flush_probe = acpi_nfit_flush_probe;
3311 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3312 nd_desc->attr_groups = acpi_nfit_attribute_groups;
3314 INIT_LIST_HEAD(&acpi_desc->spas);
3315 INIT_LIST_HEAD(&acpi_desc->dcrs);
3316 INIT_LIST_HEAD(&acpi_desc->bdws);
3317 INIT_LIST_HEAD(&acpi_desc->idts);
3318 INIT_LIST_HEAD(&acpi_desc->flushes);
3319 INIT_LIST_HEAD(&acpi_desc->memdevs);
3320 INIT_LIST_HEAD(&acpi_desc->dimms);
3321 INIT_LIST_HEAD(&acpi_desc->list);
3322 mutex_init(&acpi_desc->init_mutex);
3323 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);