lkml.org 
[lkml]   [2012]   [Aug]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH][Update][RFC] PM / Runtime: Introduce pm_runtime_get_and_call()
Date

Unfortunately, pm_runtime_get() is not a very useful interface,
because if the device is not in the "active" state already, it
only queues up a work item supposed to resume the device. Then,
the caller doesn't really know when the device is going to be
resumed which makes it difficult to synchronize future device
accesses with the resume completion.

In that case, if the caller is the device's driver, the most
straightforward way for it to cope with the situation is to use its
.runtime_resume() callback for data processing unrelated to the
resume itself, but the correctness of this is questionable. Namely,
the driver's .runtime_resume() callback need not be executed directly
by the core and it may be run from within a subsystem or PM domain
callback. Then, the data processing carried out by the driver's
callback may disturb the subsystem's or PM domain's functionality
(for example, the subsystem may still be unready for the device to
process I/O when the driver's callback is about to return). Besides,
the .runtime_resume() callback is not supposed to do anything beyond
what is needed to resume the device.

For this reason, it appears to be necessary to introduce a mechanism
by which device drivers may schedule the execution of certain code
(say a procedure) to occur when the device in question is known to be
in the "active" state (preferably, as soon as it has been resumed).
Thus introduce a new runtime PM helper function,
__pm_runtime_get_and_call(), and its simplified variant
pm_runtime_get_and_call(), allowing the caller to increment the
device's runtime PM usage counter and execute a function provided
as the second argument, either synchronously, if the device is
in the "active" state, or from the runtime PM workqueue, after
resuming the device (in that case the execution of the function is
queued up along with a request to resume the device).

The simplified helper, pm_runtime_get_and_call(), will only execute
the function if the runtime PM status of the device is "active". The
more complicated one, __pm_runtime_get_and_call(), has one more
argument allowing the caller to specify whether or not the function
should be executed even if runtime PM is disabled for the device or
there has been a runtime PM error. In those cases, the function will
always be executed synchronously.

This version of the patch doesn't include any documentation updates.

No sign-off yet.
---

There was a bug in __pm_runtime_get_and_call() in the previous version,
please disregard it.

Rafael

---
drivers/base/power/runtime.c | 144 +++++++++++++++++++++++++++++++++++++++----
include/linux/pm.h | 2
include/linux/pm_runtime.h | 19 +++++
3 files changed, 152 insertions(+), 13 deletions(-)

Index: linux/drivers/base/power/runtime.c
===================================================================
--- linux.orig/drivers/base/power/runtime.c
+++ linux/drivers/base/power/runtime.c
@@ -484,6 +484,15 @@ static int rpm_suspend(struct device *de
goto out;
}

+void rpm_queue_up_resume(struct device *dev)
+{
+ dev->power.request = RPM_REQ_RESUME;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+}
+
/**
* rpm_resume - Carry out runtime resume of given device.
* @dev: Device to resume.
@@ -519,12 +528,18 @@ static int rpm_resume(struct device *dev
goto out;

/*
- * Other scheduled or pending requests need to be canceled. Small
- * optimization: If an autosuspend timer is running, leave it running
- * rather than cancelling it now only to restart it again in the near
- * future.
+ * Other scheduled or pending requests need to be canceled. If the
+ * execution of a function is queued up along with a resume request,
+ * do not cancel it.
+ */
+ if (dev->power.request != RPM_REQ_RESUME || !dev->power.func)
+ dev->power.request = RPM_REQ_NONE;
+
+ /*
+ * Small optimization: If an autosuspend timer is running, leave it
+ * running rather than cancelling it now only to restart it again in the
+ * near future.
*/
- dev->power.request = RPM_REQ_NONE;
if (!dev->power.timer_autosuspends)
pm_runtime_deactivate_timer(dev);

@@ -591,11 +606,7 @@ static int rpm_resume(struct device *dev

/* Carry out an asynchronous or a synchronous resume. */
if (rpmflags & RPM_ASYNC) {
- dev->power.request = RPM_REQ_RESUME;
- if (!dev->power.request_pending) {
- dev->power.request_pending = true;
- queue_work(pm_wq, &dev->power.work);
- }
+ rpm_queue_up_resume(dev);
retval = 0;
goto out;
}
@@ -691,6 +702,7 @@ static int rpm_resume(struct device *dev
static void pm_runtime_work(struct work_struct *work)
{
struct device *dev = container_of(work, struct device, power.work);
+ void (*func)(struct device *) = NULL;
enum rpm_request req;

spin_lock_irq(&dev->power.lock);
@@ -715,11 +727,37 @@ static void pm_runtime_work(struct work_
rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
break;
case RPM_REQ_RESUME:
- rpm_resume(dev, RPM_NOWAIT);
+ func = dev->power.func;
+ if (func) {
+ dev->power.func = NULL;
+ rpm_resume(dev, 0);
+ /*
+ * The function might have been replaced when
+ * rpm_resume() was running the resume callback.
+ */
+ if (dev->power.func)
+ func = dev->power.func;
+ } else {
+ rpm_resume(dev, RPM_NOWAIT);
+ }
break;
}

out:
+ if (func) {
+ pm_runtime_get_noresume(dev);
+ dev->power.function_execution = true;
+ spin_unlock_irq(&dev->power.lock);
+
+ func(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.function_execution = false;
+ wake_up_all(&dev->power.wait_queue);
+ pm_runtime_put_noidle(dev);
+ rpm_idle(dev, RPM_NOWAIT);
+ }
+
spin_unlock_irq(&dev->power.lock);
}

@@ -878,6 +916,83 @@ int __pm_runtime_resume(struct device *d
EXPORT_SYMBOL_GPL(__pm_runtime_resume);

/**
+ * __pm_runtime_get_and_call - Increment device usage count and run a function.
+ * @dev: Device to handle.
+ * @func: Function to run.
+ * @force: Whether to run @func if runtime PM is disabled or in error state.
+ *
+ * Increment the device's runtime PM usage counter and execute the given
+ * function if the device's status is "active". Otherwise, the function is
+ * scheduled for future execution along with a resume request.
+ *
+ * If this routine is called twice in a row, the function passed to it in the
+ * second call replaces the previous one unless the execution of it has started
+ * already (in which case both functions will be run, unless the later one
+ * is canceled along with its resume request).
+ */
+int __pm_runtime_get_and_call(struct device *dev, void (*func)(struct device *),
+ bool force)
+{
+ unsigned long flags;
+ int ret;
+
+ pm_runtime_get_noresume(dev);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ ret = dev->power.runtime_error;
+ if (!ret && dev->power.disable_depth > 0)
+ ret = -EINVAL;
+
+ if (ret) {
+ if (func && force) {
+ dev->power.disable_depth++;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ func(dev);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ dev->power.disable_depth--;
+ }
+ goto out;
+ }
+
+ /*
+ * The approach here is the same as in rpm_suspend(): autosuspend timers
+ * will be activated shortly anyway, so it is pointless to cancel them
+ * now.
+ */
+ if (!dev->power.timer_autosuspends)
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.runtime_status == RPM_ACTIVE) {
+ dev->power.func = NULL;
+ dev->power.request = RPM_REQ_NONE;
+ ret = 0;
+ } else {
+ dev->power.func = func;
+ rpm_queue_up_resume(dev);
+ ret = 1;
+ }
+
+ if (func) {
+ dev->power.function_execution = true;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ func(dev);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ dev->power.function_execution = false;
+ wake_up_all(&dev->power.wait_queue);
+ }
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
+/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
@@ -982,7 +1097,8 @@ static void __pm_runtime_barrier(struct

if (dev->power.runtime_status == RPM_SUSPENDING
|| dev->power.runtime_status == RPM_RESUMING
- || dev->power.idle_notification) {
+ || dev->power.idle_notification
+ || dev->power.function_execution) {
DEFINE_WAIT(wait);

/* Suspend, wake-up or idle notification in progress. */
@@ -991,7 +1107,8 @@ static void __pm_runtime_barrier(struct
TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_SUSPENDING
&& dev->power.runtime_status != RPM_RESUMING
- && !dev->power.idle_notification)
+ && !dev->power.idle_notification
+ && !dev->power.function_execution)
break;
spin_unlock_irq(&dev->power.lock);

@@ -1278,6 +1395,7 @@ void pm_runtime_init(struct device *dev)
{
dev->power.runtime_status = RPM_SUSPENDED;
dev->power.idle_notification = false;
+ dev->power.function_execution = false;

dev->power.disable_depth = 1;
atomic_set(&dev->power.usage_count, 0);
Index: linux/include/linux/pm.h
===================================================================
--- linux.orig/include/linux/pm.h
+++ linux/include/linux/pm.h
@@ -538,6 +538,7 @@ struct dev_pm_info {
unsigned int irq_safe:1;
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
+ bool function_execution:1;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;
@@ -547,6 +548,7 @@ struct dev_pm_info {
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
struct dev_pm_qos_request *pq_req;
+ void (*func)(struct device *);
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
struct pm_qos_constraints *constraints;
Index: linux/include/linux/pm_runtime.h
===================================================================
--- linux.orig/include/linux/pm_runtime.h
+++ linux/include/linux/pm_runtime.h
@@ -47,6 +47,9 @@ extern void pm_runtime_set_autosuspend_d
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
+extern int __pm_runtime_get_and_call(struct device *dev,
+ void (*func)(struct device *),
+ bool force);

static inline bool pm_children_suspended(struct device *dev)
{
@@ -150,6 +153,16 @@ static inline void pm_runtime_set_autosu
static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }

+static inline int __pm_runtime_get_and_call(struct device *dev,
+ void (*func)(struct device *),
+ bool force)
+{
+ if (func && force)
+ func(dev);
+
+ return 0;
+}
+
#endif /* !CONFIG_PM_RUNTIME */

static inline int pm_runtime_idle(struct device *dev)
@@ -248,4 +261,10 @@ static inline void pm_runtime_dont_use_a
__pm_runtime_use_autosuspend(dev, false);
}

+static inline int pm_runtime_get_and_call(struct device *dev,
+ void (*func)(struct device *))
+{
+ return __pm_runtime_get_and_call(dev, func, false);
+}
+
#endif


\
 
 \ /
  Last update: 2012-08-09 13:22    [W:0.074 / U:0.252 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site