summaryrefslogtreecommitdiff
path: root/drivers/acpi/ec.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/ec.c')
-rw-r--r--drivers/acpi/ec.c117
1 files changed, 52 insertions, 65 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e0cb1bcfffb2..13565629ce0a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -169,7 +169,7 @@ struct acpi_ec_query {
};
static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
-static void advance_transaction(struct acpi_ec *ec);
+static void advance_transaction(struct acpi_ec *ec, bool interrupt);
static void acpi_ec_event_handler(struct work_struct *work);
static void acpi_ec_event_processor(struct work_struct *work);
@@ -335,12 +335,12 @@ static const char *acpi_ec_cmd_string(u8 cmd)
* GPE Registers
* -------------------------------------------------------------------------- */
-static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
+static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec)
{
acpi_event_status gpe_status = 0;
(void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
- return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
+ return !!(gpe_status & ACPI_EVENT_FLAG_STATUS_SET);
}
static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
@@ -351,14 +351,14 @@ static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
BUG_ON(ec->reference_count < 1);
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
}
- if (acpi_ec_is_gpe_raised(ec)) {
+ if (acpi_ec_gpe_status_set(ec)) {
/*
* On some platforms, EN=1 writes cannot trigger GPE. So
* software need to manually trigger a pseudo GPE event on
* EN=1 writes.
*/
ec_dbg_raw("Polling quirk");
- advance_transaction(ec);
+ advance_transaction(ec, false);
}
}
@@ -372,23 +372,6 @@ static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
}
}
-static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
-{
- /*
- * GPE STS is a W1C register, which means:
- * 1. Software can clear it without worrying about clearing other
- * GPEs' STS bits when the hardware sets them in parallel.
- * 2. As long as software can ensure only clearing it when it is
- * set, hardware won't set it in parallel.
- * So software can clear GPE in any contexts.
- * Warning: do not move the check into advance_transaction() as the
- * EC commands will be sent without GPE raised.
- */
- if (!acpi_ec_is_gpe_raised(ec))
- return;
- acpi_clear_gpe(NULL, ec->gpe);
-}
-
/* --------------------------------------------------------------------------
* Transaction Management
* -------------------------------------------------------------------------- */
@@ -488,7 +471,7 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
* Unconditionally invoke this once after enabling the event
* handling mechanism to detect the pending events.
*/
- advance_transaction(ec);
+ advance_transaction(ec, false);
}
static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
@@ -632,24 +615,41 @@ static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long f
}
}
-static void advance_transaction(struct acpi_ec *ec)
+static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t)
{
- struct transaction *t;
- u8 status;
+ if (t->irq_count < ec_storm_threshold)
+ ++t->irq_count;
+
+ /* Trigger if the threshold is 0 too. */
+ if (t->irq_count == ec_storm_threshold)
+ acpi_ec_mask_events(ec);
+}
+
+static void advance_transaction(struct acpi_ec *ec, bool interrupt)
+{
+ struct transaction *t = ec->curr;
bool wakeup = false;
+ u8 status;
+
+ ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
- ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
- smp_processor_id());
/*
- * By always clearing STS before handling all indications, we can
- * ensure a hardware STS 0->1 change after this clearing can always
- * trigger a GPE interrupt.
+ * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
+ * changes to always trigger a GPE interrupt.
+ *
+ * GPE STS is a W1C register, which means:
+ *
+ * 1. Software can clear it without worrying about clearing the other
+ * GPEs' STS bits when the hardware sets them in parallel.
+ *
+ * 2. As long as software can ensure only clearing it when it is set,
+ * hardware won't set it in parallel.
*/
- if (ec->gpe >= 0)
- acpi_ec_clear_gpe(ec);
+ if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
+ acpi_clear_gpe(NULL, ec->gpe);
status = acpi_ec_read_status(ec);
- t = ec->curr;
+
/*
* Another IRQ or a guarded polling mode advancement is detected,
* the next QR_EC submission is then allowed.
@@ -661,56 +661,43 @@ static void advance_transaction(struct acpi_ec *ec)
clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
acpi_ec_complete_query(ec);
}
+ if (!t)
+ goto out;
}
- if (!t)
- goto err;
+
if (t->flags & ACPI_EC_COMMAND_POLL) {
if (t->wlen > t->wi) {
- if ((status & ACPI_EC_FLAG_IBF) == 0)
+ if (!(status & ACPI_EC_FLAG_IBF))
acpi_ec_write_data(ec, t->wdata[t->wi++]);
- else
- goto err;
+ else if (interrupt && !(status & ACPI_EC_FLAG_SCI))
+ acpi_ec_spurious_interrupt(ec, t);
} else if (t->rlen > t->ri) {
- if ((status & ACPI_EC_FLAG_OBF) == 1) {
+ if (status & ACPI_EC_FLAG_OBF) {
t->rdata[t->ri++] = acpi_ec_read_data(ec);
if (t->rlen == t->ri) {
ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
+ wakeup = true;
if (t->command == ACPI_EC_COMMAND_QUERY)
ec_dbg_evt("Command(%s) completed by hardware",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- wakeup = true;
}
- } else
- goto err;
- } else if (t->wlen == t->wi &&
- (status & ACPI_EC_FLAG_IBF) == 0) {
+ } else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) {
+ acpi_ec_spurious_interrupt(ec, t);
+ }
+ } else if (t->wlen == t->wi && !(status & ACPI_EC_FLAG_IBF)) {
ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
wakeup = true;
}
- goto out;
} else if (!(status & ACPI_EC_FLAG_IBF)) {
acpi_ec_write_cmd(ec, t->command);
ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
- goto out;
- }
-err:
- /*
- * If SCI bit is set, then don't think it's a false IRQ
- * otherwise will take a not handled IRQ as a false one.
- */
- if (!(status & ACPI_EC_FLAG_SCI)) {
- if (in_interrupt() && t) {
- if (t->irq_count < ec_storm_threshold)
- ++t->irq_count;
- /* Allow triggering on 0 threshold */
- if (t->irq_count == ec_storm_threshold)
- acpi_ec_mask_events(ec);
- }
}
+
out:
if (status & ACPI_EC_FLAG_SCI)
acpi_ec_submit_query(ec);
- if (wakeup && in_interrupt())
+
+ if (wakeup && interrupt)
wake_up(&ec->wait);
}
@@ -767,7 +754,7 @@ static int ec_poll(struct acpi_ec *ec)
if (!ec_guard(ec))
return 0;
spin_lock_irqsave(&ec->lock, flags);
- advance_transaction(ec);
+ advance_transaction(ec, false);
spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
@@ -1216,7 +1203,7 @@ static void acpi_ec_check_event(struct acpi_ec *ec)
* taking care of it.
*/
if (!ec->curr)
- advance_transaction(ec);
+ advance_transaction(ec, false);
spin_unlock_irqrestore(&ec->lock, flags);
}
}
@@ -1259,7 +1246,7 @@ static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
- advance_transaction(ec);
+ advance_transaction(ec, true);
spin_unlock_irqrestore(&ec->lock, flags);
}