[JAUNTY SRU] lp#368809 Make ignore_nice_load setting of ondemand work as expected.

Manoj Iyer manoj.iyer at canonical.com
Mon Jun 22 20:27:28 UTC 2009


SRU JUSTIFICATION

IMPACT: ondemand micro-accounting of idle time changes broke 
ignore_nice_load sysfs setting due to a thinko in the code.

FIX: http://bugzilla.kernel.org/show_bug.cgi?id=12310 backport/cherry-pick 
to Jaunty.

TEST: Originator of the bug tested the kernel in 
http://people.ubuntu.com/~manjo/lp368809-jaunty/ and reported to work.


The following changes since commit 
8d6d84357a3631767f391571e8741c95d829a92d:
   Stefan Bader (1):
         UBUNTU: Forgotten update to control files

are available in the git repository at:

   git://kernel.ubuntu.com/manjo/ubuntu-jaunty.git lp368809

Venkatesh Pallipadi (1):
       [CPUFREQ] Make ignore_nice_load setting of ondemand work as 
expected.

  drivers/cpufreq/cpufreq_ondemand.c |   47 
+++++++++++++++++++----------------
  1 files changed, 25 insertions(+), 22 deletions(-)

>From ec3d58dfd8feaba042db86463ae468cd1a11801e Mon Sep 17 00:00:00 2001
From: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Date: Fri, 23 Jan 2009 09:25:02 -0500
Subject: [PATCH] [CPUFREQ] Make ignore_nice_load setting of ondemand work as expected.

ondemand micro-accounting of idle time changes broke ignore_nice_load
sysfs setting due to a thinko in the code.

The bug entry:
http://bugzilla.kernel.org/show_bug.cgi?id=12310

Reported-by: Jim Bray <jimsantelmo at gmail.com>

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
Signed-off-by: Dave Jones <davej at redhat.com>
(cherry picked from commit 1ca3abdb6a4b87246b00292f048acd344325fd12)

Signed-off-by: Manoj Iyer <manoj.iyer at canonical.com>
---
  drivers/cpufreq/cpufreq_ondemand.c |   47 +++++++++++++++++++----------------
  1 files changed, 25 insertions(+), 22 deletions(-)

diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 2ab3c12..0646199 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -117,11 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
  	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
  	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
  	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
-
-	if (!dbs_tuners_ins.ignore_nice) {
-		busy_time = cputime64_add(busy_time,
-				kstat_cpu(cpu).cpustat.nice);
-	}
+	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);

  	idle_time = cputime64_sub(cur_wall_time, busy_time);
  	if (wall)
@@ -137,23 +133,6 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
  	if (idle_time == -1ULL)
  		return get_cpu_idle_time_jiffy(cpu, wall);

-	if (dbs_tuners_ins.ignore_nice) {
-		cputime64_t cur_nice;
-		unsigned long cur_nice_jiffies;
-		struct cpu_dbs_info_s *dbs_info;
-
-		dbs_info = &per_cpu(cpu_dbs_info, cpu);
-		cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice,
-					 dbs_info->prev_cpu_nice);
-		/*
-		 * Assumption: nice time between sampling periods will be
-		 * less than 2^32 jiffies for 32 bit sys
-		 */
-		cur_nice_jiffies = (unsigned long)
-					cputime64_to_jiffies64(cur_nice);
-		dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice;
-		return idle_time + jiffies_to_usecs(cur_nice_jiffies);
-	}
  	return idle_time;
  }

@@ -319,6 +298,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
  		dbs_info = &per_cpu(cpu_dbs_info, j);
  		dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
  						&dbs_info->prev_cpu_wall);
+		if (dbs_tuners_ins.ignore_nice)
+			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+
  	}
  	mutex_unlock(&dbs_mutex);

@@ -419,6 +401,23 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
  				j_dbs_info->prev_cpu_idle);
  		j_dbs_info->prev_cpu_idle = cur_idle_time;

+		if (dbs_tuners_ins.ignore_nice) {
+			cputime64_t cur_nice;
+			unsigned long cur_nice_jiffies;
+
+			cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
+					 j_dbs_info->prev_cpu_nice);
+			/*
+			 * Assumption: nice time between sampling periods will
+			 * be less than 2^32 jiffies for 32 bit sys
+			 */
+			cur_nice_jiffies = (unsigned long)
+					cputime64_to_jiffies64(cur_nice);
+
+			j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+			idle_time += jiffies_to_usecs(cur_nice_jiffies);
+		}
+
  		if (unlikely(!wall_time || wall_time < idle_time))
  			continue;

@@ -575,6 +574,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,

  			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
  						&j_dbs_info->prev_cpu_wall);
+			if (dbs_tuners_ins.ignore_nice) {
+				j_dbs_info->prev_cpu_nice =
+						kstat_cpu(j).cpustat.nice;
+			}
  		}
  		this_dbs_info->cpu = cpu;
  		/*
-- 
1.6.0.4


Cheers
--- manjo




More information about the kernel-team mailing list