aboutsummaryrefslogtreecommitdiff
path: root/drivers/hv
diff options
context:
space:
mode:
authorLinus Torvalds2020-12-16 11:49:46 -0800
committerLinus Torvalds2020-12-16 11:49:46 -0800
commit571b12dd1ad41f371448b693c0bd2e64968c7af4 (patch)
tree6d8450241759ed4f8f965eaf24a56facdb1759b1 /drivers/hv
parente994cc240a3b75744c33ca9b8d74f71f0fcd8852 (diff)
parentd1df458cbfdb0c3384c03c7fbcb1689bc02a746c (diff)
Merge tag 'hyperv-next-signed-20201214' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux
Pull Hyper-V updates from Wei Liu: - harden VMBus (Andres Beltran) - clean up VMBus driver (Matheus Castello) - fix hv_balloon reporting (Vitaly Kuznetsov) - fix a potential OOB issue (Andrea Parri) - remove an obsolete TODO item (Stefan Eschenbacher) * tag 'hyperv-next-signed-20201214' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux: hv_balloon: do adjust_managed_page_count() when ballooning/un-ballooning hv_balloon: simplify math in alloc_balloon_pages() drivers/hv: remove obsolete TODO and fix misleading typo in comment drivers: hv: vmbus: Fix checkpatch SPLIT_STRING hv_netvsc: Validate number of allocated sub-channels drivers: hv: vmbus: Fix call msleep using < 20ms drivers: hv: vmbus: Fix checkpatch LINE_SPACING drivers: hv: vmbus: Replace symbolic permissions by octal permissions drivers: hv: Fix hyperv_record_panic_msg path on comment hv_netvsc: Use vmbus_requestor to generate transaction IDs for VMBus hardening scsi: storvsc: Use vmbus_requestor to generate transaction IDs for VMBus hardening Drivers: hv: vmbus: Add vmbus_requestor data structure for VMBus hardening
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/channel.c174
-rw-r--r--drivers/hv/hv_balloon.c5
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/ring_buffer.c29
-rw-r--r--drivers/hv/vmbus_drv.c52
5 files changed, 232 insertions, 34 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index fbdda9938039..6fb0c76bfbf8 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -503,6 +503,70 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
+/**
+ * request_arr_init - Allocates memory for the requestor array. Each slot
+ * keeps track of the next available slot in the array. Initially, each
+ * slot points to the next one (as in a Linked List). The last slot
+ * does not point to anything, so its value is U64_MAX by default.
+ * @size The size of the array
+ */
+static u64 *request_arr_init(u32 size)
+{
+ int i;
+ u64 *req_arr;
+
+ req_arr = kcalloc(size, sizeof(u64), GFP_KERNEL);
+ if (!req_arr)
+ return NULL;
+
+ for (i = 0; i < size - 1; i++)
+ req_arr[i] = i + 1;
+
+ /* Last slot (no more available slots) */
+ req_arr[i] = U64_MAX;
+
+ return req_arr;
+}
+
+/*
+ * vmbus_alloc_requestor - Initializes @rqstor's fields.
+ * Index 0 is the first free slot
+ * @size: Size of the requestor array
+ */
+static int vmbus_alloc_requestor(struct vmbus_requestor *rqstor, u32 size)
+{
+ u64 *rqst_arr;
+ unsigned long *bitmap;
+
+ rqst_arr = request_arr_init(size);
+ if (!rqst_arr)
+ return -ENOMEM;
+
+ bitmap = bitmap_zalloc(size, GFP_KERNEL);
+ if (!bitmap) {
+ kfree(rqst_arr);
+ return -ENOMEM;
+ }
+
+ rqstor->req_arr = rqst_arr;
+ rqstor->req_bitmap = bitmap;
+ rqstor->size = size;
+ rqstor->next_request_id = 0;
+ spin_lock_init(&rqstor->req_lock);
+
+ return 0;
+}
+
+/*
+ * vmbus_free_requestor - Frees memory allocated for @rqstor
+ * @rqstor: Pointer to the requestor struct
+ */
+static void vmbus_free_requestor(struct vmbus_requestor *rqstor)
+{
+ kfree(rqstor->req_arr);
+ bitmap_free(rqstor->req_bitmap);
+}
+
static int __vmbus_open(struct vmbus_channel *newchannel,
void *userdata, u32 userdatalen,
void (*onchannelcallback)(void *context), void *context)
@@ -523,6 +587,12 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (newchannel->state != CHANNEL_OPEN_STATE)
return -EINVAL;
+ /* Create and init requestor */
+ if (newchannel->rqstor_size) {
+ if (vmbus_alloc_requestor(&newchannel->requestor, newchannel->rqstor_size))
+ return -ENOMEM;
+ }
+
newchannel->state = CHANNEL_OPENING_STATE;
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
@@ -626,6 +696,7 @@ error_free_gpadl:
error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
+ vmbus_free_requestor(&newchannel->requestor);
newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
@@ -808,6 +879,9 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
channel->ringbuffer_gpadlhandle = 0;
}
+ if (!ret)
+ vmbus_free_requestor(&channel->requestor);
+
return ret;
}
@@ -888,7 +962,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
/* in 8-bytes granularity */
desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
desc.len8 = (u16)(packetlen_aligned >> 3);
- desc.trans_id = requestid;
+ desc.trans_id = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
bufferlist[0].iov_base = &desc;
bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
@@ -897,7 +971,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, num_vecs);
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid);
}
EXPORT_SYMBOL(vmbus_sendpacket);
@@ -939,7 +1013,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
- desc.transactionid = requestid;
+ desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc.reserved = 0;
desc.rangecount = pagecount;
@@ -956,7 +1030,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3);
+ return hv_ringbuffer_write(channel, bufferlist, 3, requestid);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
@@ -983,7 +1057,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
- desc->transactionid = requestid;
+ desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc->reserved = 0;
desc->rangecount = 1;
@@ -994,7 +1068,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3);
+ return hv_ringbuffer_write(channel, bufferlist, 3, requestid);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -1042,3 +1116,91 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
buffer_actual_len, requestid, true);
}
EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
+
+/*
+ * vmbus_next_request_id - Returns a new request id. It is also
+ * the index at which the guest memory address is stored.
+ * Uses a spin lock to avoid race conditions.
+ * @rqstor: Pointer to the requestor struct
+ * @rqst_add: Guest memory address to be stored in the array
+ */
+u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr)
+{
+ unsigned long flags;
+ u64 current_id;
+ const struct vmbus_channel *channel =
+ container_of(rqstor, const struct vmbus_channel, requestor);
+
+ /* Check rqstor has been initialized */
+ if (!channel->rqstor_size)
+ return VMBUS_NO_RQSTOR;
+
+ spin_lock_irqsave(&rqstor->req_lock, flags);
+ current_id = rqstor->next_request_id;
+
+ /* Requestor array is full */
+ if (current_id >= rqstor->size) {
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+ return VMBUS_RQST_ERROR;
+ }
+
+ rqstor->next_request_id = rqstor->req_arr[current_id];
+ rqstor->req_arr[current_id] = rqst_addr;
+
+ /* The already held spin lock provides atomicity */
+ bitmap_set(rqstor->req_bitmap, current_id, 1);
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+
+ /*
+ * Cannot return an ID of 0, which is reserved for an unsolicited
+ * message from Hyper-V.
+ */
+ return current_id + 1;
+}
+EXPORT_SYMBOL_GPL(vmbus_next_request_id);
+
+/*
+ * vmbus_request_addr - Returns the memory address stored at @trans_id
+ * in @rqstor. Uses a spin lock to avoid race conditions.
+ * @rqstor: Pointer to the requestor struct
+ * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
+ * next request id.
+ */
+u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id)
+{
+ unsigned long flags;
+ u64 req_addr;
+ const struct vmbus_channel *channel =
+ container_of(rqstor, const struct vmbus_channel, requestor);
+
+ /* Check rqstor has been initialized */
+ if (!channel->rqstor_size)
+ return VMBUS_NO_RQSTOR;
+
+ /* Hyper-V can send an unsolicited message with ID of 0 */
+ if (!trans_id)
+ return trans_id;
+
+ spin_lock_irqsave(&rqstor->req_lock, flags);
+
+ /* Data corresponding to trans_id is stored at trans_id - 1 */
+ trans_id--;
+
+ /* Invalid trans_id */
+ if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap)) {
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+ return VMBUS_RQST_ERROR;
+ }
+
+ req_addr = rqstor->req_arr[trans_id];
+ rqstor->req_arr[trans_id] = rqstor->next_request_id;
+ rqstor->next_request_id = trans_id;
+
+ /* The already held spin lock provides atomicity */
+ bitmap_clear(rqstor->req_bitmap, trans_id, 1);
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+ return req_addr;
+}
+EXPORT_SYMBOL_GPL(vmbus_request_addr);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index eb56e09ae15f..8c471823a5af 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1198,6 +1198,7 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
__ClearPageOffline(pg);
__free_page(pg);
dm->num_pages_ballooned--;
+ adjust_managed_page_count(pg, 1);
}
}
@@ -1238,8 +1239,10 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
/* mark all pages offline */
- for (j = 0; j < (1 << get_order(alloc_unit << PAGE_SHIFT)); j++)
+ for (j = 0; j < alloc_unit; j++) {
__SetPageOffline(pg + j);
+ adjust_managed_page_count(pg + j, -1);
+ }
bl_resp->range_count++;
bl_resp->range_array[i].finfo.start_page =
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 40e2b9f91163..9416e09ebd58 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -179,21 +179,21 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct vmbus_channel *channel,
- const struct kvec *kv_list, u32 kv_count);
+ const struct kvec *kv_list, u32 kv_count,
+ u64 requestid);
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw);
/*
- * The Maximum number of channels (16348) is determined by the size of the
+ * The Maximum number of channels (16384) is determined by the size of the
* interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
* send endpoint interrupts, and the other is to receive endpoint interrupts.
*/
#define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3)
/* The value here must be in multiple of 32 */
-/* TODO: Need to make this configurable */
#define MAX_NUM_CHANNELS_SUPPORTED 256
#define MAX_CHANNEL_RELIDS \
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 356e22159e83..35833d4d1a1d 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -248,7 +248,8 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
- const struct kvec *kv_list, u32 kv_count)
+ const struct kvec *kv_list, u32 kv_count,
+ u64 requestid)
{
int i;
u32 bytes_avail_towrite;
@@ -258,6 +259,8 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
u64 prev_indices;
unsigned long flags;
struct hv_ring_buffer_info *outring_info = &channel->outbound;
+ struct vmpacket_descriptor *desc = kv_list[0].iov_base;
+ u64 rqst_id = VMBUS_NO_RQSTOR;
if (channel->rescind)
return -ENODEV;
@@ -300,6 +303,23 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
kv_list[i].iov_len);
}
+ /*
+ * Allocate the request ID after the data has been copied into the
+ * ring buffer. Once this request ID is allocated, the completion
+ * path could find the data and free it.
+ */
+
+ if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
+ rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
+ if (rqst_id == VMBUS_RQST_ERROR) {
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ pr_err("No request id available\n");
+ return -EAGAIN;
+ }
+ }
+ desc = hv_get_ring_buffer(outring_info) + old_write;
+ desc->trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
+
/* Set previous packet start */
prev_indices = hv_get_ring_bufferindices(outring_info);
@@ -319,8 +339,13 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
hv_signal_on_write(old_write, channel);
- if (channel->rescind)
+ if (channel->rescind) {
+ if (rqst_id != VMBUS_NO_RQSTOR) {
+ /* Reclaim request ID to avoid leak of IDs */
+ vmbus_request_addr(&channel->requestor, rqst_id);
+ }
return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4fad3e6745e5..502f8cd95f6d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -55,7 +55,7 @@ int vmbus_interrupt;
/*
* Boolean to control whether to report panic messages over Hyper-V.
*
- * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
+ * It can be set via /proc/sys/kernel/hyperv_record_panic_msg
*/
static int sysctl_record_panic_msg = 1;
@@ -156,6 +156,7 @@ static u32 channel_conn_id(struct vmbus_channel *channel,
{
u8 monitor_group = channel_monitor_group(channel);
u8 monitor_offset = channel_monitor_offset(channel);
+
return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
}
@@ -550,6 +551,7 @@ static ssize_t vendor_show(struct device *dev,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
+
return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
}
static DEVICE_ATTR_RO(vendor);
@@ -559,6 +561,7 @@ static ssize_t device_show(struct device *dev,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
+
return sprintf(buf, "0x%x\n", hv_dev->device_id);
}
static DEVICE_ATTR_RO(device);
@@ -1384,6 +1387,24 @@ static struct kmsg_dumper hv_kmsg_dumper = {
.dump = hv_kmsg_dump,
};
+static void hv_kmsg_dump_register(void)
+{
+ int ret;
+
+ hv_panic_page = hv_alloc_hyperv_zeroed_page();
+ if (!hv_panic_page) {
+ pr_err("Hyper-V: panic message page memory allocation failed\n");
+ return;
+ }
+
+ ret = kmsg_dump_register(&hv_kmsg_dumper);
+ if (ret) {
+ pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
+ hv_free_hyperv_page((unsigned long)hv_panic_page);
+ hv_panic_page = NULL;
+ }
+}
+
static struct ctl_table_header *hv_ctl_table_hdr;
/*
@@ -1474,21 +1495,8 @@ static int vmbus_bus_init(void)
* capability is supported by the hypervisor.
*/
hv_get_crash_ctl(hyperv_crash_ctl);
- if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
- hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page();
- if (hv_panic_page) {
- ret = kmsg_dump_register(&hv_kmsg_dumper);
- if (ret) {
- pr_err("Hyper-V: kmsg dump register "
- "error 0x%x\n", ret);
- hv_free_hyperv_page(
- (unsigned long)hv_panic_page);
- hv_panic_page = NULL;
- }
- } else
- pr_err("Hyper-V: panic message page memory "
- "allocation failed");
- }
+ if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
+ hv_kmsg_dump_register();
register_die_notifier(&hyperv_die_block);
}
@@ -1812,7 +1820,7 @@ static ssize_t channel_pending_show(struct vmbus_channel *channel,
channel_pending(channel,
vmbus_connection.monitor_pages[1]));
}
-static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
+static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
static ssize_t channel_latency_show(struct vmbus_channel *channel,
char *buf)
@@ -1821,19 +1829,19 @@ static ssize_t channel_latency_show(struct vmbus_channel *channel,
channel_latency(channel,
vmbus_connection.monitor_pages[1]));
}
-static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
+static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->interrupts);
}
-static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
+static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->sig_events);
}
-static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
char *buf)
@@ -1872,7 +1880,7 @@ static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
{
return sprintf(buf, "%u\n", channel->offermsg.monitorid);
}
-static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
+static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
static ssize_t subchannel_id_show(struct vmbus_channel *channel,
char *buf)
@@ -2377,7 +2385,7 @@ static int vmbus_bus_suspend(struct device *dev)
* We wait here until the completion of any channel
* offers that are currently in progress.
*/
- msleep(1);
+ usleep_range(1000, 2000);
}
mutex_lock(&vmbus_connection.channel_mutex);