aboutsummaryrefslogtreecommitdiff
path: root/drivers/nvdimm/nd-core.h
blob: 99b04106434be7344be5bf8c2a1d27d87551a8af (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 */
#ifndef __ND_CORE_H__
#define __ND_CORE_H__
#include <linux/libnvdimm.h>
#include <linux/device.h>
#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/nd.h>
#include "nd.h"

extern struct list_head nvdimm_bus_list;
extern struct mutex nvdimm_bus_list_mutex;
extern int nvdimm_major;
extern struct workqueue_struct *nvdimm_wq;

struct nvdimm_bus {
	struct nvdimm_bus_descriptor *nd_desc;
	wait_queue_head_t wait;
	struct list_head list;
	struct device dev;
	int id, probe_active;
	atomic_t ioctl_active;
	struct list_head mapping_list;
	struct mutex reconfig_mutex;
	struct badrange badrange;
};

struct nvdimm {
	unsigned long flags;
	void *provider_data;
	unsigned long cmd_mask;
	struct device dev;
	atomic_t busy;
	int id, num_flush;
	struct resource *flush_wpq;
	const char *dimm_id;
	struct {
		const struct nvdimm_security_ops *ops;
		unsigned long flags;
		unsigned long ext_flags;
		unsigned int overwrite_tmo;
		struct kernfs_node *overwrite_state;
	} sec;
	struct delayed_work dwork;
	const struct nvdimm_fw_ops *fw_ops;
};

static inline unsigned long nvdimm_security_flags(
		struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
{
	u64 flags;
	const u64 state_flags = 1UL << NVDIMM_SECURITY_DISABLED
		| 1UL << NVDIMM_SECURITY_LOCKED
		| 1UL << NVDIMM_SECURITY_UNLOCKED
		| 1UL << NVDIMM_SECURITY_OVERWRITE;

	if (!nvdimm->sec.ops)
		return 0;

	flags = nvdimm->sec.ops->get_flags(nvdimm, ptype);
	/* disabled, locked, unlocked, and overwrite are mutually exclusive */
	dev_WARN_ONCE(&nvdimm->dev, hweight64(flags & state_flags) > 1,
			"reported invalid security state: %#llx\n",
			(unsigned long long) flags);
	return flags;
}
int nvdimm_security_freeze(struct nvdimm *nvdimm);
#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len);
void nvdimm_security_overwrite_query(struct work_struct *work);
#else
static inline ssize_t nvdimm_security_store(struct device *dev,
		const char *buf, size_t len)
{
	return -EOPNOTSUPP;
}
static inline void nvdimm_security_overwrite_query(struct work_struct *work)
{
}
#endif

bool is_nvdimm(struct device *dev);
bool is_nd_pmem(struct device *dev);
bool is_nd_volatile(struct device *dev);
static inline bool is_nd_region(struct device *dev)
{
	return is_nd_pmem(dev) || is_nd_volatile(dev);
}
static inline bool is_memory(struct device *dev)
{
	return is_nd_pmem(dev) || is_nd_volatile(dev);
}
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void);
void nvdimm_devs_exit(void);
struct nd_region;
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev);
void nd_region_create_ns_seed(struct nd_region *nd_region);
void nd_region_create_btt_seed(struct nd_region *nd_region);
void nd_region_create_pfn_seed(struct nd_region *nd_region);
void nd_region_create_dax_seed(struct nd_region *nd_region);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
void nd_synchronize(void);
void nd_device_register(struct device *dev);
struct nd_label_id;
char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
		      u32 flags);
bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid);
struct nd_region;
struct nvdimm_drvdata;
struct nd_mapping;
void nd_mapping_free_labels(struct nd_mapping *nd_mapping);

int __reserve_free_pmem(struct device *dev, void *data);
void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
		       struct nd_mapping *nd_mapping);

resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
					   struct nd_mapping *nd_mapping);
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
				      struct nd_mapping *nd_mapping);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
		resource_size_t size);
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
		struct nd_label_id *label_id);
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
void get_ndd(struct nvdimm_drvdata *ndd);
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
		struct nd_namespace_common **_ndns);
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
		struct nd_namespace_common **_ndns);
ssize_t nd_namespace_store(struct device *dev,
		struct nd_namespace_common **_ndns, const char *buf,
		size_t len);
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
bool is_nvdimm_bus(struct device *dev);

#if IS_ENABLED(CONFIG_ND_CLAIM)
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
		resource_size_t size);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
#else
static inline int devm_nsio_enable(struct device *dev,
		struct nd_namespace_io *nsio, resource_size_t size)
{
	return -ENXIO;
}

static inline void devm_nsio_disable(struct device *dev,
		struct nd_namespace_io *nsio)
{
}
#endif

#ifdef CONFIG_PROVE_NVDIMM_LOCKING
extern struct class *nd_class;

enum {
	LOCK_BUS,
	LOCK_NDCTL,
	LOCK_REGION,
	LOCK_DIMM = LOCK_REGION,
	LOCK_NAMESPACE,
	LOCK_CLAIM,
};

static inline void debug_nvdimm_lock(struct device *dev)
{
	if (is_nd_region(dev))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION);
	else if (is_nvdimm(dev))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM);
	else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM);
	else if (dev->parent && (is_nd_region(dev->parent)))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE);
	else if (is_nvdimm_bus(dev))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS);
	else if (dev->class && dev->class == nd_class)
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL);
	else
		dev_WARN(dev, "unknown lock level\n");
}

static inline void debug_nvdimm_unlock(struct device *dev)
{
	mutex_unlock(&dev->lockdep_mutex);
}

static inline void nd_device_lock(struct device *dev)
{
	device_lock(dev);
	debug_nvdimm_lock(dev);
}

static inline void nd_device_unlock(struct device *dev)
{
	debug_nvdimm_unlock(dev);
	device_unlock(dev);
}
#else
static inline void nd_device_lock(struct device *dev)
{
	device_lock(dev);
}

static inline void nd_device_unlock(struct device *dev)
{
	device_unlock(dev);
}

static inline void debug_nvdimm_lock(struct device *dev)
{
}

static inline void debug_nvdimm_unlock(struct device *dev)
{
}
#endif
#endif /* __ND_CORE_H__ */