1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* RDMA Network Block Driver
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#ifndef RNBD_CLT_H
#define RNBD_CLT_H
#include <linux/wait.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/blk-mq.h>
#include <linux/refcount.h>
#include <rtrs.h>
#include "rnbd-proto.h"
#include "rnbd-log.h"
/* time in seconds between reconnect tries, default to 30 s */
#define RECONNECT_DELAY 30
/*
* Number of times to reconnect on error before giving up, 0 for * disabled,
* -1 for forever
*/
#define MAX_RECONNECTS -1
enum rnbd_clt_dev_state {
DEV_STATE_INIT,
DEV_STATE_MAPPED,
DEV_STATE_MAPPED_DISCONNECTED,
DEV_STATE_UNMAPPED,
};
struct rnbd_iu_comp {
wait_queue_head_t wait;
int errno;
};
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define RNBD_INLINE_SG_CNT 0
#else
#define RNBD_INLINE_SG_CNT 2
#endif
#define RNBD_RDMA_SGL_SIZE (sizeof(struct scatterlist) * RNBD_INLINE_SG_CNT)
struct rnbd_iu {
union {
struct request *rq; /* for block io */
void *buf; /* for user messages */
};
struct rtrs_permit *permit;
union {
/* use to send msg associated with a dev */
struct rnbd_clt_dev *dev;
/* use to send msg associated with a sess */
struct rnbd_clt_session *sess;
};
struct sg_table sgt;
struct work_struct work;
int errno;
struct rnbd_iu_comp comp;
atomic_t refcount;
struct scatterlist first_sgl[]; /* must be the last one */
};
struct rnbd_cpu_qlist {
struct list_head requeue_list;
spinlock_t requeue_lock;
unsigned int cpu;
};
struct rnbd_clt_session {
struct list_head list;
struct rtrs_clt_sess *rtrs;
wait_queue_head_t rtrs_waitq;
bool rtrs_ready;
struct rnbd_cpu_qlist __percpu
*cpu_queues;
DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
int __percpu *cpu_rr; /* per-cpu var for CPU round-robin */
atomic_t busy;
size_t queue_depth;
u32 max_io_size;
u32 max_segments;
struct blk_mq_tag_set tag_set;
u32 nr_poll_queues;
struct mutex lock; /* protects state and devs_list */
struct list_head devs_list; /* list of struct rnbd_clt_dev */
refcount_t refcount;
char sessname[NAME_MAX];
u8 ver; /* protocol version */
};
/**
* Submission queues.
*/
struct rnbd_queue {
struct list_head requeue_list;
unsigned long in_list;
struct rnbd_clt_dev *dev;
struct blk_mq_hw_ctx *hctx;
};
struct rnbd_clt_dev {
struct rnbd_clt_session *sess;
struct request_queue *queue;
struct rnbd_queue *hw_queues;
u32 device_id;
/* local Idr index - used to track minor number allocations. */
u32 clt_device_id;
struct mutex lock;
enum rnbd_clt_dev_state dev_state;
char *pathname;
enum rnbd_access_mode access_mode;
u32 nr_poll_queues;
u64 size; /* device size in bytes */
struct list_head list;
struct gendisk *gd;
struct kobject kobj;
char *blk_symlink_name;
refcount_t refcount;
struct work_struct unmap_on_rmmod_work;
};
/* rnbd-clt.c */
struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
struct rtrs_addr *paths,
size_t path_cnt, u16 port_nr,
const char *pathname,
enum rnbd_access_mode access_mode,
u32 nr_poll_queues);
int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
const struct attribute *sysfs_self);
int rnbd_clt_remap_device(struct rnbd_clt_dev *dev);
int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize);
/* rnbd-clt-sysfs.c */
int rnbd_clt_create_sysfs_files(void);
void rnbd_clt_destroy_sysfs_files(void);
void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev);
#endif /* RNBD_CLT_H */
|