diff options
author | Corey Minyard | 2018-10-23 11:29:02 -0500 |
---|---|---|
committer | Corey Minyard | 2019-02-09 19:48:42 -0600 |
commit | c65ea996595005be470fbfa16711deba414fd33b (patch) | |
tree | 55b50d2d008afe4411e7085bf48f1f7e9e032f11 /drivers/char/ipmi | |
parent | a1466ec5b671651b848df17fc9233ecbb7d35f9f (diff) |
ipmi: Fix how the lower layers are told to watch for messages
The IPMI driver has a mechanism to tell the lower layers it needs
to watch for messages, commands, and watchdogs (so it doesn't
needlessly poll). However, it needed some extensions, it needed
a way to tell what is being waited for so it could set the timeout
appropriately.
The update to the lower layer was also being done once a second
at best because it was done in the main timeout handler. However,
if a command is sent and a response message is coming back,
it needed to be started immediately. So modify the code to
update immediately if it needs to be enabled. Disable is still
lazy.
Signed-off-by: Corey Minyard <cminyard@mvista.com>
Tested-by: Kamlakant Patel <kamlakant.patel@cavium.com>
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 119 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 5 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_ssif.c | 26 |
3 files changed, 105 insertions, 45 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index c518659b4d9f..2e008efa735f 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -529,9 +529,22 @@ struct ipmi_smi { unsigned int waiting_events_count; /* How many events in queue? */ char delivering_events; char event_msg_printed; + + /* How many users are waiting for events? */ atomic_t event_waiters; unsigned int ticks_to_req_ev; - int last_needs_timer; + + /* How many users are waiting for commands? */ + atomic_t command_waiters; + + /* How many users are waiting for watchdogs? */ + atomic_t watchdog_waiters; + + /* + * Tells what the lower layer has last been asked to watch for, + * messages and/or watchdogs. Protected by xmit_msgs_lock. + */ + unsigned int last_watch_mask; /* * The event receiver for my BMC, only really used at panic @@ -1078,6 +1091,29 @@ static int intf_err_seq(struct ipmi_smi *intf, return rv; } +/* Must be called with xmit_msgs_lock held. */ +static void smi_tell_to_watch(struct ipmi_smi *intf, + unsigned int flags, + struct ipmi_smi_msg *smi_msg) +{ + if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) { + if (!smi_msg) + return; + + if (!smi_msg->needs_response) + return; + } + + if (!intf->handlers->set_need_watch) + return; + + if ((intf->last_watch_mask & flags) == flags) + return; + + intf->last_watch_mask |= flags; + intf->handlers->set_need_watch(intf->send_info, + intf->last_watch_mask); +} int ipmi_create_user(unsigned int if_num, const struct ipmi_user_hndl *handler, @@ -1141,8 +1177,9 @@ int ipmi_create_user(unsigned int if_num, spin_unlock_irqrestore(&intf->seq_lock, flags); if (handler->ipmi_watchdog_pretimeout) { /* User wants pretimeouts, so make sure to watch for them. */ - if (atomic_inc_return(&intf->event_waiters) == 1) - need_waiter(intf); + if (atomic_inc_return(&intf->watchdog_waiters) == 1) + smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG, + NULL); } srcu_read_unlock(&ipmi_interfaces_srcu, index); *user = new_user; @@ -1214,7 +1251,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) user->handler->shutdown(user->handler_data); if (user->handler->ipmi_watchdog_pretimeout) - atomic_dec(&intf->event_waiters); + atomic_dec(&intf->watchdog_waiters); if (user->gets_events) atomic_dec(&intf->event_waiters); @@ -1569,8 +1606,8 @@ int ipmi_register_for_cmd(struct ipmi_user *user, goto out_unlock; } - if (atomic_inc_return(&intf->event_waiters) == 1) - need_waiter(intf); + if (atomic_inc_return(&intf->command_waiters) == 1) + smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS, NULL); list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); @@ -1620,7 +1657,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user, synchronize_rcu(); release_ipmi_user(user, index); while (rcvrs) { - atomic_dec(&intf->event_waiters); + atomic_dec(&intf->command_waiters); rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); @@ -1737,22 +1774,21 @@ static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, return smi_msg; } - static void smi_send(struct ipmi_smi *intf, const struct ipmi_smi_handlers *handlers, struct ipmi_smi_msg *smi_msg, int priority) { int run_to_completion = intf->run_to_completion; + unsigned long flags = 0; - if (run_to_completion) { - smi_msg = smi_add_send_msg(intf, smi_msg, priority); - } else { - unsigned long flags; - + if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); - smi_msg = smi_add_send_msg(intf, smi_msg, priority); + smi_msg = smi_add_send_msg(intf, smi_msg, priority); + + smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, smi_msg); + + if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); - } if (smi_msg) handlers->sender(intf->send_info, smi_msg); @@ -1950,6 +1986,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf, ipmb_seq, broadcast, source_address, source_lun); + /* We will be getting a response in the BMC message queue. */ + smi_msg->needs_response = true; + /* * Copy the message into the recv message data, so we * can retransmit it later if necessary. @@ -2137,6 +2176,7 @@ static int i_ipmi_request(struct ipmi_user *user, goto out; } } + smi_msg->needs_response = false; rcu_read_lock(); if (intf->in_shutdown) { @@ -3351,6 +3391,8 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, INIT_LIST_HEAD(&intf->hp_xmit_msgs); spin_lock_init(&intf->events_lock); atomic_set(&intf->event_waiters, 0); + atomic_set(&intf->watchdog_waiters, 0); + atomic_set(&intf->command_waiters, 0); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; INIT_LIST_HEAD(&intf->waiting_events); intf->waiting_events_count = 0; @@ -4365,6 +4407,9 @@ static void smi_recv_tasklet(unsigned long val) intf->curr_msg = newmsg; } } + + smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, newmsg); + if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (newmsg) @@ -4492,7 +4537,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, struct list_head *timeouts, unsigned long timeout_period, int slot, unsigned long *flags, - unsigned int *waiting_msgs) + unsigned int *watch_mask) { struct ipmi_recv_msg *msg; @@ -4504,7 +4549,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, if (timeout_period < ent->timeout) { ent->timeout -= timeout_period; - (*waiting_msgs)++; + *watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES; return; } @@ -4523,7 +4568,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, struct ipmi_smi_msg *smi_msg; /* More retries, send again. */ - (*waiting_msgs)++; + *watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES; /* * Start with the max timer, set to normal timer after @@ -4575,13 +4620,13 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg, *msg2; unsigned long flags; int i; - unsigned int waiting_msgs = 0; + unsigned int watch_mask = 0; if (!intf->bmc_registered) { kref_get(&intf->refcount); if (!schedule_work(&intf->bmc_reg_work)) { kref_put(&intf->refcount, intf_free); - waiting_msgs++; + watch_mask |= IPMI_WATCH_MASK_INTERNAL; } } @@ -4601,7 +4646,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) check_msg_timeout(intf, &intf->seq_table[i], &timeouts, timeout_period, i, - &flags, &waiting_msgs); + &flags, &watch_mask); spin_unlock_irqrestore(&intf->seq_lock, flags); list_for_each_entry_safe(msg, msg2, &timeouts, link) @@ -4632,7 +4677,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, tasklet_schedule(&intf->recv_tasklet); - return waiting_msgs; + return watch_mask; } static void ipmi_request_event(struct ipmi_smi *intf) @@ -4652,37 +4697,43 @@ static atomic_t stop_operation; static void ipmi_timeout(struct timer_list *unused) { struct ipmi_smi *intf; - int nt = 0, index; + unsigned int watch_mask = 0; + int index; + unsigned long flags; if (atomic_read(&stop_operation)) return; index = srcu_read_lock(&ipmi_interfaces_srcu); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - int lnt = 0; - if (atomic_read(&intf->event_waiters)) { intf->ticks_to_req_ev--; if (intf->ticks_to_req_ev == 0) { ipmi_request_event(intf); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; } - lnt++; + watch_mask |= IPMI_WATCH_MASK_INTERNAL; } - lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); + if (atomic_read(&intf->watchdog_waiters)) + watch_mask |= IPMI_WATCH_MASK_CHECK_WATCHDOG; - lnt = !!lnt; - if (lnt != intf->last_needs_timer && - intf->handlers->set_need_watch) - intf->handlers->set_need_watch(intf->send_info, lnt); - intf->last_needs_timer = lnt; + if (atomic_read(&intf->command_waiters)) + watch_mask |= IPMI_WATCH_MASK_CHECK_COMMANDS; + + watch_mask |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); - nt += lnt; + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + if (watch_mask != intf->last_watch_mask && + intf->handlers->set_need_watch) + intf->handlers->set_need_watch(intf->send_info, + watch_mask); + intf->last_watch_mask = watch_mask; + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); } srcu_read_unlock(&ipmi_interfaces_srcu, index); - if (nt) + if (watch_mask) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index f1b9fda6b9df..c81c84a723b6 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -1060,10 +1060,13 @@ static void request_events(void *send_info) atomic_set(&smi_info->req_events, 1); } -static void set_need_watch(void *send_info, bool enable) +static void set_need_watch(void *send_info, unsigned int watch_mask) { struct smi_info *smi_info = send_info; unsigned long flags; + int enable; + + enable = !!(watch_mask & ~IPMI_WATCH_MASK_INTERNAL); atomic_set(&smi_info->need_watch, enable); spin_lock_irqsave(&smi_info->si_lock, flags); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 1aacc1144d2a..a1219af32105 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -93,8 +93,8 @@ /* * Timeout for the watch, only used for get flag timer. */ -#define SSIF_WATCH_TIMEOUT_MSEC 100 -#define SSIF_WATCH_TIMEOUT_JIFFIES msecs_to_jiffies(SSIF_WATCH_TIMEOUT_MSEC) +#define SSIF_WATCH_MSG_TIMEOUT msecs_to_jiffies(10) +#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250) enum ssif_intf_state { SSIF_NORMAL, @@ -276,7 +276,7 @@ struct ssif_info { struct timer_list retry_timer; int retries_left; - bool need_watch; /* Need to look for flags? */ + long watch_timeout; /* Timeout for flags check, 0 if off. */ struct timer_list watch_timer; /* Flag fetch timer. */ /* Info from SSIF cmd */ @@ -578,9 +578,9 @@ static void watch_timeout(struct timer_list *t) return; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - if (ssif_info->need_watch) { + if (ssif_info->watch_timeout) { mod_timer(&ssif_info->watch_timer, - jiffies + SSIF_WATCH_TIMEOUT_JIFFIES); + jiffies + ssif_info->watch_timeout); if (SSIF_IDLE(ssif_info)) { start_flag_fetch(ssif_info, flags); /* Releases lock */ return; @@ -1121,17 +1121,23 @@ static void request_events(void *send_info) * Upper layer is changing the flag saying whether we need to request * flags periodically or not. */ -static void ssif_set_need_watch(void *send_info, bool enable) +static void ssif_set_need_watch(void *send_info, unsigned int watch_mask) { struct ssif_info *ssif_info = (struct ssif_info *) send_info; unsigned long oflags, *flags; + long timeout = 0; + + if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES) + timeout = SSIF_WATCH_MSG_TIMEOUT; + else if (watch_mask & ~IPMI_WATCH_MASK_INTERNAL) + timeout = SSIF_WATCH_WATCHDOG_TIMEOUT; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - if (enable != ssif_info->need_watch) { - ssif_info->need_watch = enable; - if (ssif_info->need_watch) + if (timeout != ssif_info->watch_timeout) { + ssif_info->watch_timeout = timeout; + if (ssif_info->watch_timeout) mod_timer(&ssif_info->watch_timer, - jiffies + SSIF_WATCH_TIMEOUT_JIFFIES); + jiffies + ssif_info->watch_timeout); } ipmi_ssif_unlock_cond(ssif_info, flags); } |