aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds2021-11-09 10:11:53 -0800
committerLinus Torvalds2021-11-09 10:11:53 -0800
commit59a2ceeef6d6bb8f68550fdbd84246b74a99f06b (patch)
treed8302a240dfe56defb8d56df555bb046a5a7bb5c /drivers
parentd2f38a3c6507b2520101f9a3807ed98f1bdc545a (diff)
parent0e9beb8a96f21a6df1579cb3a679e150e3269d80 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "87 patches. Subsystems affected by this patch series: mm (pagecache and hugetlb), procfs, misc, MAINTAINERS, lib, checkpatch, binfmt, kallsyms, ramfs, init, codafs, nilfs2, hfs, crash_dump, signals, seq_file, fork, sysvfs, kcov, gdb, resource, selftests, and ipc" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (87 commits) ipc/ipc_sysctl.c: remove fallback for !CONFIG_PROC_SYSCTL ipc: check checkpoint_restore_ns_capable() to modify C/R proc files selftests/kselftest/runner/run_one(): allow running non-executable files virtio-mem: disallow mapping virtio-mem memory via /dev/mem kernel/resource: disallow access to exclusive system RAM regions kernel/resource: clean up and optimize iomem_is_exclusive() scripts/gdb: handle split debug for vmlinux kcov: replace local_irq_save() with a local_lock_t kcov: avoid enable+disable interrupts if !in_task() kcov: allocate per-CPU memory on the relevant node Documentation/kcov: define `ip' in the example Documentation/kcov: include types.h in the example sysv: use BUILD_BUG_ON instead of runtime check kernel/fork.c: unshare(): use swap() to make code cleaner seq_file: fix passing wrong private data seq_file: move seq_escape() to a header signal: remove duplicate include in signal.h crash_dump: remove duplicate include in crash_dump.h crash_dump: fix boolreturn.cocci warning hfs/hfsplus: use WARN_ON for sanity check ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c5
-rw-r--r--drivers/gpu/drm/drm_mm.c5
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c5
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c20
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_common.h1
-rw-r--r--drivers/virtio/Kconfig1
-rw-r--r--drivers/virtio/virtio_mem.c301
7 files changed, 226 insertions, 112 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 571da0c2f39f..f3d79eda94bb 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1668,13 +1668,10 @@ __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
for (i = 0; i < history->len; i++) {
const struct drm_dp_mst_topology_ref_entry *entry =
&history->entries[i];
- ulong *entries;
- uint nr_entries;
u64 ts_nsec = entry->ts_nsec;
u32 rem_nsec = do_div(ts_nsec, 1000000000);
- nr_entries = stack_depot_fetch(entry->backtrace, &entries);
- stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
+ stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
entry->count,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 93d48a6f04ab..7d1c578388d3 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -118,8 +118,6 @@ static noinline void save_stack(struct drm_mm_node *node)
static void show_leaks(struct drm_mm *mm)
{
struct drm_mm_node *node;
- unsigned long *entries;
- unsigned int nr_entries;
char *buf;
buf = kmalloc(BUFSZ, GFP_KERNEL);
@@ -133,8 +131,7 @@ static void show_leaks(struct drm_mm *mm)
continue;
}
- nr_entries = stack_depot_fetch(node->stack, &entries);
- stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
+ stack_depot_snprint(node->stack, buf, BUFSZ, 0);
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
node->start, node->size, buf);
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 90546fa58fc1..bef795e265a6 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -56,8 +56,6 @@ void i915_vma_free(struct i915_vma *vma)
static void vma_print_allocator(struct i915_vma *vma, const char *reason)
{
- unsigned long *entries;
- unsigned int nr_entries;
char buf[512];
if (!vma->node.stack) {
@@ -66,8 +64,7 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
return;
}
- nr_entries = stack_depot_fetch(vma->node.stack, &entries);
- stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
+ stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
vma->node.start, vma->node.size, reason, buf);
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index eaf7688f517d..0d85f3c5c526 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -65,16 +65,6 @@ static noinline depot_stack_handle_t __save_depot_stack(void)
return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
-static void __print_depot_stack(depot_stack_handle_t stack,
- char *buf, int sz, int indent)
-{
- unsigned long *entries;
- unsigned int nr_entries;
-
- nr_entries = stack_depot_fetch(stack, &entries);
- stack_trace_snprint(buf, sz, entries, nr_entries, indent);
-}
-
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
spin_lock_init(&rpm->debug.lock);
@@ -146,12 +136,12 @@ static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
if (!buf)
return;
- __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
stack = READ_ONCE(rpm->debug.last_release);
if (stack) {
- __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
}
@@ -183,12 +173,12 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p,
return;
if (dbg->last_acquire) {
- __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
+ stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref last acquired:\n%s", buf);
}
if (dbg->last_release) {
- __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
+ stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref last released:\n%s", buf);
}
@@ -203,7 +193,7 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p,
rep = 1;
while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
rep++, i++;
- __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
}
diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_common.h b/drivers/media/dvb-frontends/cxd2880/cxd2880_common.h
index b05bce71ab35..9dc15a5a9683 100644
--- a/drivers/media/dvb-frontends/cxd2880/cxd2880_common.h
+++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_common.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <linux/bits.h>
#include <linux/string.h>
int cxd2880_convert2s_complement(u32 value, u32 bitlen);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 10607be76a88..34f80b7a8a64 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -111,6 +111,7 @@ config VIRTIO_MEM
depends on MEMORY_HOTPLUG
depends on MEMORY_HOTREMOVE
depends on CONTIG_ALLOC
+ depends on EXCLUSIVE_SYSTEM_RAM
help
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index bef8ad6bf466..0da0af251c73 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -223,6 +223,9 @@ struct virtio_mem {
* When this lock is held the pointers can't change, ONLINE and
* OFFLINE blocks can't change the state and no subblocks will get
* plugged/unplugged.
+ *
+ * In kdump mode, used to serialize requests, last_block_addr and
+ * last_block_plugged.
*/
struct mutex hotplug_mutex;
bool hotplug_active;
@@ -230,6 +233,9 @@ struct virtio_mem {
/* An error occurred we cannot handle - stop processing requests. */
bool broken;
+ /* Cached valued of is_kdump_kernel() when the device was probed. */
+ bool in_kdump;
+
/* The driver is being removed. */
spinlock_t removal_lock;
bool removing;
@@ -243,6 +249,13 @@ struct virtio_mem {
/* Memory notifier (online/offline events). */
struct notifier_block memory_notifier;
+#ifdef CONFIG_PROC_VMCORE
+ /* vmcore callback for /proc/vmcore handling in kdump mode */
+ struct vmcore_cb vmcore_cb;
+ uint64_t last_block_addr;
+ bool last_block_plugged;
+#endif /* CONFIG_PROC_VMCORE */
+
/* Next device in the list of virtio-mem devices. */
struct list_head next;
};
@@ -260,6 +273,8 @@ static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
unsigned long nr_pages);
static void virtio_mem_retry(struct virtio_mem *vm);
+static int virtio_mem_create_resource(struct virtio_mem *vm);
+static void virtio_mem_delete_resource(struct virtio_mem *vm);
/*
* Register a virtio-mem device so it will be considered for the online_page
@@ -2291,6 +2306,12 @@ static void virtio_mem_run_wq(struct work_struct *work)
uint64_t diff;
int rc;
+ if (unlikely(vm->in_kdump)) {
+ dev_warn_once(&vm->vdev->dev,
+ "unexpected workqueue run in kdump kernel\n");
+ return;
+ }
+
hrtimer_cancel(&vm->retry_timer);
if (vm->broken)
@@ -2392,41 +2413,11 @@ static int virtio_mem_init_vq(struct virtio_mem *vm)
return 0;
}
-static int virtio_mem_init(struct virtio_mem *vm)
+static int virtio_mem_init_hotplug(struct virtio_mem *vm)
{
const struct range pluggable_range = mhp_get_pluggable_range(true);
- uint64_t sb_size, addr;
- uint16_t node_id;
-
- if (!vm->vdev->config->get) {
- dev_err(&vm->vdev->dev, "config access disabled\n");
- return -EINVAL;
- }
-
- /*
- * We don't want to (un)plug or reuse any memory when in kdump. The
- * memory is still accessible (but not mapped).
- */
- if (is_kdump_kernel()) {
- dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
- return -EBUSY;
- }
-
- /* Fetch all properties that can't change. */
- virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
- &vm->plugged_size);
- virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
- &vm->device_block_size);
- virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
- &node_id);
- vm->nid = virtio_mem_translate_node_id(vm, node_id);
- virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
- virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
- &vm->region_size);
-
- /* Determine the nid for the device based on the lowest address. */
- if (vm->nid == NUMA_NO_NODE)
- vm->nid = memory_add_physaddr_to_nid(vm->addr);
+ uint64_t unit_pages, sb_size, addr;
+ int rc;
/* bad device setup - warn only */
if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
@@ -2496,10 +2487,6 @@ static int virtio_mem_init(struct virtio_mem *vm)
vm->offline_threshold);
}
- dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
- dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
- dev_info(&vm->vdev->dev, "device block size: 0x%llx",
- (unsigned long long)vm->device_block_size);
dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
memory_block_size_bytes());
if (vm->in_sbm)
@@ -2508,10 +2495,170 @@ static int virtio_mem_init(struct virtio_mem *vm)
else
dev_info(&vm->vdev->dev, "big block size: 0x%llx",
(unsigned long long)vm->bbm.bb_size);
+
+ /* create the parent resource for all memory */
+ rc = virtio_mem_create_resource(vm);
+ if (rc)
+ return rc;
+
+ /* use a single dynamic memory group to cover the whole memory device */
+ if (vm->in_sbm)
+ unit_pages = PHYS_PFN(memory_block_size_bytes());
+ else
+ unit_pages = PHYS_PFN(vm->bbm.bb_size);
+ rc = memory_group_register_dynamic(vm->nid, unit_pages);
+ if (rc < 0)
+ goto out_del_resource;
+ vm->mgid = rc;
+
+ /*
+ * If we still have memory plugged, we have to unplug all memory first.
+ * Registering our parent resource makes sure that this memory isn't
+ * actually in use (e.g., trying to reload the driver).
+ */
+ if (vm->plugged_size) {
+ vm->unplug_all_required = true;
+ dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
+ }
+
+ /* register callbacks */
+ vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
+ rc = register_memory_notifier(&vm->memory_notifier);
+ if (rc)
+ goto out_unreg_group;
+ rc = register_virtio_mem_device(vm);
+ if (rc)
+ goto out_unreg_mem;
+
+ return 0;
+out_unreg_mem:
+ unregister_memory_notifier(&vm->memory_notifier);
+out_unreg_group:
+ memory_group_unregister(vm->mgid);
+out_del_resource:
+ virtio_mem_delete_resource(vm);
+ return rc;
+}
+
+#ifdef CONFIG_PROC_VMCORE
+static int virtio_mem_send_state_request(struct virtio_mem *vm, uint64_t addr,
+ uint64_t size)
+{
+ const uint64_t nb_vm_blocks = size / vm->device_block_size;
+ const struct virtio_mem_req req = {
+ .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_STATE),
+ .u.state.addr = cpu_to_virtio64(vm->vdev, addr),
+ .u.state.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
+ };
+ int rc = -ENOMEM;
+
+ dev_dbg(&vm->vdev->dev, "requesting state: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+
+ switch (virtio_mem_send_request(vm, &req)) {
+ case VIRTIO_MEM_RESP_ACK:
+ return virtio16_to_cpu(vm->vdev, vm->resp.u.state.state);
+ case VIRTIO_MEM_RESP_ERROR:
+ rc = -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&vm->vdev->dev, "requesting state failed: %d\n", rc);
+ return rc;
+}
+
+static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
+ unsigned long pfn)
+{
+ struct virtio_mem *vm = container_of(cb, struct virtio_mem,
+ vmcore_cb);
+ uint64_t addr = PFN_PHYS(pfn);
+ bool is_ram;
+ int rc;
+
+ if (!virtio_mem_contains_range(vm, addr, PAGE_SIZE))
+ return true;
+ if (!vm->plugged_size)
+ return false;
+
+ /*
+ * We have to serialize device requests and access to the information
+ * about the block queried last.
+ */
+ mutex_lock(&vm->hotplug_mutex);
+
+ addr = ALIGN_DOWN(addr, vm->device_block_size);
+ if (addr != vm->last_block_addr) {
+ rc = virtio_mem_send_state_request(vm, addr,
+ vm->device_block_size);
+ /* On any kind of error, we're going to signal !ram. */
+ if (rc == VIRTIO_MEM_STATE_PLUGGED)
+ vm->last_block_plugged = true;
+ else
+ vm->last_block_plugged = false;
+ vm->last_block_addr = addr;
+ }
+
+ is_ram = vm->last_block_plugged;
+ mutex_unlock(&vm->hotplug_mutex);
+ return is_ram;
+}
+#endif /* CONFIG_PROC_VMCORE */
+
+static int virtio_mem_init_kdump(struct virtio_mem *vm)
+{
+#ifdef CONFIG_PROC_VMCORE
+ dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
+ vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
+ register_vmcore_cb(&vm->vmcore_cb);
+ return 0;
+#else /* CONFIG_PROC_VMCORE */
+ dev_warn(&vm->vdev->dev, "disabled in kdump kernel without vmcore\n");
+ return -EBUSY;
+#endif /* CONFIG_PROC_VMCORE */
+}
+
+static int virtio_mem_init(struct virtio_mem *vm)
+{
+ uint16_t node_id;
+
+ if (!vm->vdev->config->get) {
+ dev_err(&vm->vdev->dev, "config access disabled\n");
+ return -EINVAL;
+ }
+
+ /* Fetch all properties that can't change. */
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
+ &vm->plugged_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
+ &vm->device_block_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
+ &node_id);
+ vm->nid = virtio_mem_translate_node_id(vm, node_id);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
+ &vm->region_size);
+
+ /* Determine the nid for the device based on the lowest address. */
+ if (vm->nid == NUMA_NO_NODE)
+ vm->nid = memory_add_physaddr_to_nid(vm->addr);
+
+ dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
+ dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
+ dev_info(&vm->vdev->dev, "device block size: 0x%llx",
+ (unsigned long long)vm->device_block_size);
if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
- return 0;
+ /*
+ * We don't want to (un)plug or reuse any memory when in kdump. The
+ * memory is still accessible (but not exposed to Linux).
+ */
+ if (vm->in_kdump)
+ return virtio_mem_init_kdump(vm);
+ return virtio_mem_init_hotplug(vm);
}
static int virtio_mem_create_resource(struct virtio_mem *vm)
@@ -2525,8 +2672,10 @@ static int virtio_mem_create_resource(struct virtio_mem *vm)
if (!name)
return -ENOMEM;
+ /* Disallow mapping device memory via /dev/mem completely. */
vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
- name, IORESOURCE_SYSTEM_RAM);
+ name, IORESOURCE_SYSTEM_RAM |
+ IORESOURCE_EXCLUSIVE);
if (!vm->parent_resource) {
kfree(name);
dev_warn(&vm->vdev->dev, "could not reserve device region\n");
@@ -2571,7 +2720,6 @@ static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
static int virtio_mem_probe(struct virtio_device *vdev)
{
struct virtio_mem *vm;
- uint64_t unit_pages;
int rc;
BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
@@ -2590,6 +2738,7 @@ static int virtio_mem_probe(struct virtio_device *vdev)
hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vm->retry_timer.function = virtio_mem_timer_expired;
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
+ vm->in_kdump = is_kdump_kernel();
/* register the virtqueue */
rc = virtio_mem_init_vq(vm);
@@ -2601,53 +2750,15 @@ static int virtio_mem_probe(struct virtio_device *vdev)
if (rc)
goto out_del_vq;
- /* create the parent resource for all memory */
- rc = virtio_mem_create_resource(vm);
- if (rc)
- goto out_del_vq;
-
- /* use a single dynamic memory group to cover the whole memory device */
- if (vm->in_sbm)
- unit_pages = PHYS_PFN(memory_block_size_bytes());
- else
- unit_pages = PHYS_PFN(vm->bbm.bb_size);
- rc = memory_group_register_dynamic(vm->nid, unit_pages);
- if (rc < 0)
- goto out_del_resource;
- vm->mgid = rc;
-
- /*
- * If we still have memory plugged, we have to unplug all memory first.
- * Registering our parent resource makes sure that this memory isn't
- * actually in use (e.g., trying to reload the driver).
- */
- if (vm->plugged_size) {
- vm->unplug_all_required = true;
- dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
- }
-
- /* register callbacks */
- vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
- rc = register_memory_notifier(&vm->memory_notifier);
- if (rc)
- goto out_unreg_group;
- rc = register_virtio_mem_device(vm);
- if (rc)
- goto out_unreg_mem;
-
virtio_device_ready(vdev);
/* trigger a config update to start processing the requested_size */
- atomic_set(&vm->config_changed, 1);
- queue_work(system_freezable_wq, &vm->wq);
+ if (!vm->in_kdump) {
+ atomic_set(&vm->config_changed, 1);
+ queue_work(system_freezable_wq, &vm->wq);
+ }
return 0;
-out_unreg_mem:
- unregister_memory_notifier(&vm->memory_notifier);
-out_unreg_group:
- memory_group_unregister(vm->mgid);
-out_del_resource:
- virtio_mem_delete_resource(vm);
out_del_vq:
vdev->config->del_vqs(vdev);
out_free_vm:
@@ -2657,9 +2768,8 @@ out_free_vm:
return rc;
}
-static void virtio_mem_remove(struct virtio_device *vdev)
+static void virtio_mem_deinit_hotplug(struct virtio_mem *vm)
{
- struct virtio_mem *vm = vdev->priv;
unsigned long mb_id;
int rc;
@@ -2706,7 +2816,8 @@ static void virtio_mem_remove(struct virtio_device *vdev)
* away. Warn at least.
*/
if (virtio_mem_has_memory_added(vm)) {
- dev_warn(&vdev->dev, "device still has system memory added\n");
+ dev_warn(&vm->vdev->dev,
+ "device still has system memory added\n");
} else {
virtio_mem_delete_resource(vm);
kfree_const(vm->resource_name);
@@ -2720,6 +2831,23 @@ static void virtio_mem_remove(struct virtio_device *vdev)
} else {
vfree(vm->bbm.bb_states);
}
+}
+
+static void virtio_mem_deinit_kdump(struct virtio_mem *vm)
+{
+#ifdef CONFIG_PROC_VMCORE
+ unregister_vmcore_cb(&vm->vmcore_cb);
+#endif /* CONFIG_PROC_VMCORE */
+}
+
+static void virtio_mem_remove(struct virtio_device *vdev)
+{
+ struct virtio_mem *vm = vdev->priv;
+
+ if (vm->in_kdump)
+ virtio_mem_deinit_kdump(vm);
+ else
+ virtio_mem_deinit_hotplug(vm);
/* reset the device and cleanup the queues */
vdev->config->reset(vdev);
@@ -2733,6 +2861,9 @@ static void virtio_mem_config_changed(struct virtio_device *vdev)
{
struct virtio_mem *vm = vdev->priv;
+ if (unlikely(vm->in_kdump))
+ return;
+
atomic_set(&vm->config_changed, 1);
virtio_mem_retry(vm);
}