summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-03-27 19:15:26 +1100
committerPaul Mackerras <paulus@samba.org>2006-03-27 19:15:26 +1100
commit9b781727fd1062671afa144b93e8c69b14bcac4d (patch)
tree1cec35f0cedc664394b15165d96944019b8e1ff2 /arch/powerpc
parent0eb4cb9b16aba6d610a0716503b96d299b308d44 (diff)
powerpc: Move cpu_setup_6xx.S and temp.c over to arch/powerpc
Also renamed temp.c to tau_6xx.c (for thermal assist unit) and updated the Kconfig option description and help text for CONFIG_TAU. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S474
-rw-r--r--arch/powerpc/kernel/tau_6xx.c271
4 files changed, 749 insertions, 3 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a433b7126d33..2cdc35ce8045 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -491,7 +491,7 @@ config PPC601_SYNC_FIX
If in doubt, say Y here.
config TAU
- bool "Thermal Management Support"
+ bool "On-chip CPU temperature sensor support"
depends on 6xx
help
G3 and G4 processors have an on-chip temperature sensor called the
@@ -500,7 +500,7 @@ config TAU
on-die temperature in /proc/cpuinfo if the cpu supports it.
Unfortunately, on some chip revisions, this sensor is very inaccurate
- and in some cases, does not work at all, so don't assume the cpu
+ and in many cases, does not work at all, so don't assume the cpu
temp is actually what /proc/cpuinfo says it is.
config TAU_INT
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index e7fddf1e42c7..754c227835bb 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -34,7 +34,8 @@ obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o
+obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
+obj-$(CONFIG_TAU) += tau_6xx.o
ifeq ($(CONFIG_PPC_MERGE),y)
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
new file mode 100644
index 000000000000..55ed7716636f
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -0,0 +1,474 @@
+/*
+ * This file contains low level CPU setup functions.
+ * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cache.h>
+
+_GLOBAL(__setup_cpu_603)
+ b setup_common_caches
+_GLOBAL(__setup_cpu_604)
+ mflr r4
+ bl setup_common_caches
+ bl setup_604_hid0
+ mtlr r4
+ blr
+_GLOBAL(__setup_cpu_750)
+ mflr r4
+ bl __init_fpu_registers
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ mtlr r4
+ blr
+_GLOBAL(__setup_cpu_750cx)
+ mflr r4
+ bl __init_fpu_registers
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ bl setup_750cx
+ mtlr r4
+ blr
+_GLOBAL(__setup_cpu_750fx)
+ mflr r4
+ bl __init_fpu_registers
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ bl setup_750fx
+ mtlr r4
+ blr
+_GLOBAL(__setup_cpu_7400)
+ mflr r4
+ bl __init_fpu_registers
+ bl setup_7400_workarounds
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ mtlr r4
+ blr
+_GLOBAL(__setup_cpu_7410)
+ mflr r4
+ bl __init_fpu_registers
+ bl setup_7410_workarounds
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ li r3,0
+ mtspr SPRN_L2CR2,r3
+ mtlr r4
+ blr
+_GLOBAL(__setup_cpu_745x)
+ mflr r4
+ bl setup_common_caches
+ bl setup_745x_specifics
+ mtlr r4
+ blr
+
+/* Enable caches for 603's, 604, 750 & 7400 */
+setup_common_caches:
+ mfspr r11,SPRN_HID0
+ andi. r0,r11,HID0_DCE
+ ori r11,r11,HID0_ICE|HID0_DCE
+ ori r8,r11,HID0_ICFI
+ bne 1f /* don't invalidate the D-cache */
+ ori r8,r8,HID0_DCI /* unless it wasn't enabled */
+1: sync
+ mtspr SPRN_HID0,r8 /* enable and invalidate caches */
+ sync
+ mtspr SPRN_HID0,r11 /* enable caches */
+ sync
+ isync
+ blr
+
+/* 604, 604e, 604ev, ...
+ * Enable superscalar execution & branch history table
+ */
+setup_604_hid0:
+ mfspr r11,SPRN_HID0
+ ori r11,r11,HID0_SIED|HID0_BHTE
+ ori r8,r11,HID0_BTCD
+ sync
+ mtspr SPRN_HID0,r8 /* flush branch target address cache */
+ sync /* on 604e/604r */
+ mtspr SPRN_HID0,r11
+ sync
+ isync
+ blr
+
+/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
+ * erratas we work around here.
+ * Moto MPC710CE.pdf describes them, those are errata
+ * #3, #4 and #5
+ * Note that we assume the firmware didn't choose to
+ * apply other workarounds (there are other ones documented
+ * in the .pdf). It appear that Apple firmware only works
+ * around #3 and with the same fix we use. We may want to
+ * check if the CPU is using 60x bus mode in which case
+ * the workaround for errata #4 is useless. Also, we may
+ * want to explicitely clear HID0_NOPDST as this is not
+ * needed once we have applied workaround #5 (though it's
+ * not set by Apple's firmware at least).
+ */
+setup_7400_workarounds:
+ mfpvr r3
+ rlwinm r3,r3,0,20,31
+ cmpwi 0,r3,0x0207
+ ble 1f
+ blr
+setup_7410_workarounds:
+ mfpvr r3
+ rlwinm r3,r3,0,20,31
+ cmpwi 0,r3,0x0100
+ bnelr
+1:
+ mfspr r11,SPRN_MSSSR0
+ /* Errata #3: Set L1OPQ_SIZE to 0x10 */
+ rlwinm r11,r11,0,9,6
+ oris r11,r11,0x0100
+ /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
+ oris r11,r11,0x0002
+ /* Errata #5: Set DRLT_SIZE to 0x01 */
+ rlwinm r11,r11,0,5,2
+ oris r11,r11,0x0800
+ sync
+ mtspr SPRN_MSSSR0,r11
+ sync
+ isync
+ blr
+
+/* 740/750/7400/7410
+ * Enable Store Gathering (SGE), Address Brodcast (ABE),
+ * Branch History Table (BHTE), Branch Target ICache (BTIC)
+ * Dynamic Power Management (DPM), Speculative (SPD)
+ * Clear Instruction cache throttling (ICTC)
+ */
+setup_750_7400_hid0:
+ mfspr r11,SPRN_HID0
+ ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
+ oris r11,r11,HID0_DPM@h
+BEGIN_FTR_SECTION
+ xori r11,r11,HID0_BTIC
+END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
+BEGIN_FTR_SECTION
+ xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */
+END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
+ li r3,HID0_SPD
+ andc r11,r11,r3 /* clear SPD: enable speculative */
+ li r3,0
+ mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
+ isync
+ mtspr SPRN_HID0,r11
+ sync
+ isync
+ blr
+
+/* 750cx specific
+ * Looks like we have to disable NAP feature for some PLL settings...
+ * (waiting for confirmation)
+ */
+setup_750cx:
+ mfspr r10, SPRN_HID1
+ rlwinm r10,r10,4,28,31
+ cmpwi cr0,r10,7
+ cmpwi cr1,r10,9
+ cmpwi cr2,r10,11
+ cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
+ bnelr
+ lwz r6,CPU_SPEC_FEATURES(r5)
+ li r7,CPU_FTR_CAN_NAP
+ andc r6,r6,r7
+ stw r6,CPU_SPEC_FEATURES(r5)
+ blr
+
+/* 750fx specific
+ */
+setup_750fx:
+ blr
+
+/* MPC 745x
+ * Enable Store Gathering (SGE), Branch Folding (FOLD)
+ * Branch History Table (BHTE), Branch Target ICache (BTIC)
+ * Dynamic Power Management (DPM), Speculative (SPD)
+ * Ensure our data cache instructions really operate.
+ * Timebase has to be running or we wouldn't have made it here,
+ * just ensure we don't disable it.
+ * Clear Instruction cache throttling (ICTC)
+ * Enable L2 HW prefetch
+ */
+setup_745x_specifics:
+ /* We check for the presence of an L3 cache setup by
+ * the firmware. If any, we disable NAP capability as
+ * it's known to be bogus on rev 2.1 and earlier
+ */
+ mfspr r11,SPRN_L3CR
+ andis. r11,r11,L3CR_L3E@h
+ beq 1f
+ lwz r6,CPU_SPEC_FEATURES(r5)
+ andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
+ beq 1f
+ li r7,CPU_FTR_CAN_NAP
+ andc r6,r6,r7
+ stw r6,CPU_SPEC_FEATURES(r5)
+1:
+ mfspr r11,SPRN_HID0
+
+ /* All of the bits we have to set.....
+ */
+ ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE
+ ori r11,r11,HID0_LRSTK | HID0_BTIC
+ oris r11,r11,HID0_DPM@h
+BEGIN_FTR_SECTION
+ xori r11,r11,HID0_BTIC
+END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
+BEGIN_FTR_SECTION
+ xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */
+END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
+
+ /* All of the bits we have to clear....
+ */
+ li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
+ andc r11,r11,r3 /* clear SPD: enable speculative */
+ li r3,0
+
+ mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
+ isync
+ mtspr SPRN_HID0,r11
+ sync
+ isync
+
+ /* Enable L2 HW prefetch, if L2 is enabled
+ */
+ mfspr r3,SPRN_L2CR
+ andis. r3,r3,L2CR_L2E@h
+ beqlr
+ mfspr r3,SPRN_MSSCR0
+ ori r3,r3,3
+ sync
+ mtspr SPRN_MSSCR0,r3
+ sync
+ isync
+ blr
+
+/*
+ * Initialize the FPU registers. This is needed to work around an errata
+ * in some 750 cpus where using a not yet initialized FPU register after
+ * power on reset may hang the CPU
+ */
+_GLOBAL(__init_fpu_registers)
+ mfmsr r10
+ ori r11,r10,MSR_FP
+ mtmsr r11
+ isync
+ addis r9,r3,empty_zero_page@ha
+ addi r9,r9,empty_zero_page@l
+ REST_32FPRS(0,r9)
+ sync
+ mtmsr r10
+ isync
+ blr
+
+
+/* Definitions for the table use to save CPU states */
+#define CS_HID0 0
+#define CS_HID1 4
+#define CS_HID2 8
+#define CS_MSSCR0 12
+#define CS_MSSSR0 16
+#define CS_ICTRL 20
+#define CS_LDSTCR 24
+#define CS_LDSTDB 28
+#define CS_SIZE 32
+
+ .data
+ .balign L1_CACHE_BYTES
+cpu_state_storage:
+ .space CS_SIZE
+ .balign L1_CACHE_BYTES,0
+ .text
+
+/* Called in normal context to backup CPU 0 state. This
+ * does not include cache settings. This function is also
+ * called for machine sleep. This does not include the MMU
+ * setup, BATs, etc... but rather the "special" registers
+ * like HID0, HID1, MSSCR0, etc...
+ */
+_GLOBAL(__save_cpu_setup)
+ /* Some CR fields are volatile, we back it up all */
+ mfcr r7
+
+ /* Get storage ptr */
+ lis r5,cpu_state_storage@h
+ ori r5,r5,cpu_state_storage@l
+
+ /* Save HID0 (common to all CONFIG_6xx cpus) */
+ mfspr r3,SPRN_HID0
+ stw r3,CS_HID0(r5)
+
+ /* Now deal with CPU type dependent registers */
+ mfspr r3,SPRN_PVR
+ srwi r3,r3,16
+ cmplwi cr0,r3,0x8000 /* 7450 */
+ cmplwi cr1,r3,0x000c /* 7400 */
+ cmplwi cr2,r3,0x800c /* 7410 */
+ cmplwi cr3,r3,0x8001 /* 7455 */
+ cmplwi cr4,r3,0x8002 /* 7457 */
+ cmplwi cr5,r3,0x8003 /* 7447A */
+ cmplwi cr6,r3,0x7000 /* 750FX */
+ cmplwi cr7,r3,0x8004 /* 7448 */
+ /* cr1 is 7400 || 7410 */
+ cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
+ /* cr0 is 74xx */
+ cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr7+eq
+ bne 1f
+ /* Backup 74xx specific regs */
+ mfspr r4,SPRN_MSSCR0
+ stw r4,CS_MSSCR0(r5)
+ mfspr r4,SPRN_MSSSR0
+ stw r4,CS_MSSSR0(r5)
+ beq cr1,1f
+ /* Backup 745x specific registers */
+ mfspr r4,SPRN_HID1
+ stw r4,CS_HID1(r5)
+ mfspr r4,SPRN_ICTRL
+ stw r4,CS_ICTRL(r5)
+ mfspr r4,SPRN_LDSTCR
+ stw r4,CS_LDSTCR(r5)
+ mfspr r4,SPRN_LDSTDB
+ stw r4,CS_LDSTDB(r5)
+1:
+ bne cr6,1f
+ /* Backup 750FX specific registers */
+ mfspr r4,SPRN_HID1
+ stw r4,CS_HID1(r5)
+ /* If rev 2.x, backup HID2 */
+ mfspr r3,SPRN_PVR
+ andi. r3,r3,0xff00
+ cmpwi cr0,r3,0x0200
+ bne 1f
+ mfspr r4,SPRN_HID2
+ stw r4,CS_HID2(r5)
+1:
+ mtcr r7
+ blr
+
+/* Called with no MMU context (typically MSR:IR/DR off) to
+ * restore CPU state as backed up by the previous
+ * function. This does not include cache setting
+ */
+_GLOBAL(__restore_cpu_setup)
+ /* Some CR fields are volatile, we back it up all */
+ mfcr r7
+
+ /* Get storage ptr */
+ lis r5,(cpu_state_storage-KERNELBASE)@h
+ ori r5,r5,cpu_state_storage@l
+
+ /* Restore HID0 */
+ lwz r3,CS_HID0(r5)
+ sync
+ isync
+ mtspr SPRN_HID0,r3
+ sync
+ isync
+
+ /* Now deal with CPU type dependent registers */
+ mfspr r3,SPRN_PVR
+ srwi r3,r3,16
+ cmplwi cr0,r3,0x8000 /* 7450 */
+ cmplwi cr1,r3,0x000c /* 7400 */
+ cmplwi cr2,r3,0x800c /* 7410 */
+ cmplwi cr3,r3,0x8001 /* 7455 */
+ cmplwi cr4,r3,0x8002 /* 7457 */
+ cmplwi cr5,r3,0x8003 /* 7447A */
+ cmplwi cr6,r3,0x7000 /* 750FX */
+ cmplwi cr7,r3,0x8004 /* 7448 */
+ /* cr1 is 7400 || 7410 */
+ cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
+ /* cr0 is 74xx */
+ cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr7+eq
+ bne 2f
+ /* Restore 74xx specific regs */
+ lwz r4,CS_MSSCR0(r5)
+ sync
+ mtspr SPRN_MSSCR0,r4
+ sync
+ isync
+ lwz r4,CS_MSSSR0(r5)
+ sync
+ mtspr SPRN_MSSSR0,r4
+ sync
+ isync
+ bne cr2,1f
+ /* Clear 7410 L2CR2 */
+ li r4,0
+ mtspr SPRN_L2CR2,r4
+1: beq cr1,2f
+ /* Restore 745x specific registers */
+ lwz r4,CS_HID1(r5)
+ sync
+ mtspr SPRN_HID1,r4
+ isync
+ sync
+ lwz r4,CS_ICTRL(r5)
+ sync
+ mtspr SPRN_ICTRL,r4
+ isync
+ sync
+ lwz r4,CS_LDSTCR(r5)
+ sync
+ mtspr SPRN_LDSTCR,r4
+ isync
+ sync
+ lwz r4,CS_LDSTDB(r5)
+ sync
+ mtspr SPRN_LDSTDB,r4
+ isync
+ sync
+2: bne cr6,1f
+ /* Restore 750FX specific registers
+ * that is restore HID2 on rev 2.x and PLL config & switch
+ * to PLL 0 on all
+ */
+ /* If rev 2.x, restore HID2 with low voltage bit cleared */
+ mfspr r3,SPRN_PVR
+ andi. r3,r3,0xff00
+ cmpwi cr0,r3,0x0200
+ bne 4f
+ lwz r4,CS_HID2(r5)
+ rlwinm r4,r4,0,19,17
+ mtspr SPRN_HID2,r4
+ sync
+4:
+ lwz r4,CS_HID1(r5)
+ rlwinm r5,r4,0,16,14
+ mtspr SPRN_HID1,r5
+ /* Wait for PLL to stabilize */
+ mftbl r5
+3: mftbl r6
+ sub r6,r6,r5
+ cmplwi cr0,r6,10000
+ ble 3b
+ /* Setup final PLL */
+ mtspr SPRN_HID1,r4
+1:
+ mtcr r7
+ blr
+
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
new file mode 100644
index 000000000000..26bd8ea35a4e
--- /dev/null
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -0,0 +1,271 @@
+/*
+ * temp.c Thermal management for cpu's with Thermal Assist Units
+ *
+ * Written by Troy Benjegerdes <hozer@drgw.net>
+ *
+ * TODO:
+ * dynamic power management to limit peak CPU temp (using ICTC)
+ * calibration???
+ *
+ * Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery
+ * life in portables, and add a 'performance/watt' metric somewhere in /proc
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/reg.h>
+#include <asm/nvram.h>
+#include <asm/cache.h>
+#include <asm/8xx_immap.h>
+#include <asm/machdep.h>
+
+static struct tau_temp
+{
+ int interrupts;
+ unsigned char low;
+ unsigned char high;
+ unsigned char grew;
+} tau[NR_CPUS];
+
+struct timer_list tau_timer;
+
+#undef DEBUG
+
+/* TODO: put these in a /proc interface, with some sanity checks, and maybe
+ * dynamic adjustment to minimize # of interrupts */
+/* configurable values for step size and how much to expand the window when
+ * we get an interrupt. These are based on the limit that was out of range */
+#define step_size 2 /* step size when temp goes out of range */
+#define window_expand 1 /* expand the window by this much */
+/* configurable values for shrinking the window */
+#define shrink_timer 2*HZ /* period between shrinking the window */
+#define min_window 2 /* minimum window size, degrees C */
+
+void set_thresholds(unsigned long cpu)
+{
+#ifdef CONFIG_TAU_INT
+ /*
+ * setup THRM1,
+ * threshold, valid bit, enable interrupts, interrupt when below threshold
+ */
+ mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
+
+ /* setup THRM2,
+ * threshold, valid bit, enable interrupts, interrupt when above threshhold
+ */
+ mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
+#else
+ /* same thing but don't enable interrupts */
+ mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
+ mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
+#endif
+}
+
+void TAUupdate(int cpu)
+{
+ unsigned thrm;
+
+#ifdef DEBUG
+ printk("TAUupdate ");
+#endif
+
+ /* if both thresholds are crossed, the step_sizes cancel out
+ * and the window winds up getting expanded twice. */
+ if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
+ if(thrm & THRM1_TIN){ /* crossed low threshold */
+ if (tau[cpu].low >= step_size){
+ tau[cpu].low -= step_size;
+ tau[cpu].high -= (step_size - window_expand);
+ }
+ tau[cpu].grew = 1;
+#ifdef DEBUG
+ printk("low threshold crossed ");
+#endif
+ }
+ }
+ if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
+ if(thrm & THRM1_TIN){ /* crossed high threshold */
+ if (tau[cpu].high <= 127-step_size){
+ tau[cpu].low += (step_size - window_expand);
+ tau[cpu].high += step_size;
+ }
+ tau[cpu].grew = 1;
+#ifdef DEBUG
+ printk("high threshold crossed ");
+#endif
+ }
+ }
+
+#ifdef DEBUG
+ printk("grew = %d\n", tau[cpu].grew);
+#endif
+
+#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
+ set_thresholds(cpu);
+#endif
+
+}
+
+#ifdef CONFIG_TAU_INT
+/*
+ * TAU interrupts - called when we have a thermal assist unit interrupt
+ * with interrupts disabled
+ */
+
+void TAUException(struct pt_regs * regs)
+{
+ int cpu = smp_processor_id();
+
+ irq_enter();
+ tau[cpu].interrupts++;
+
+ TAUupdate(cpu);
+
+ irq_exit();
+}
+#endif /* CONFIG_TAU_INT */
+
+static void tau_timeout(void * info)
+{
+ int cpu;
+ unsigned long flags;
+ int size;
+ int shrink;
+
+ /* disabling interrupts *should* be okay */
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+
+#ifndef CONFIG_TAU_INT
+ TAUupdate(cpu);
+#endif
+
+ size = tau[cpu].high - tau[cpu].low;
+ if (size > min_window && ! tau[cpu].grew) {
+ /* do an exponential shrink of half the amount currently over size */
+ shrink = (2 + size - min_window) / 4;
+ if (shrink) {
+ tau[cpu].low += shrink;
+ tau[cpu].high -= shrink;
+ } else { /* size must have been min_window + 1 */
+ tau[cpu].low += 1;
+#if 1 /* debug */
+ if ((tau[cpu].high - tau[cpu].low) != min_window){
+ printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__);
+ }
+#endif
+ }
+ }
+
+ tau[cpu].grew = 0;
+
+ set_thresholds(cpu);
+
+ /*
+ * Do the enable every time, since otherwise a bunch of (relatively)
+ * complex sleep code needs to be added. One mtspr every time
+ * tau_timeout is called is probably not a big deal.
+ *
+ * Enable thermal sensor and set up sample interval timer
+ * need 20 us to do the compare.. until a nice 'cpu_speed' function
+ * call is implemented, just assume a 500 mhz clock. It doesn't really
+ * matter if we take too long for a compare since it's all interrupt
+ * driven anyway.
+ *
+ * use a extra long time.. (60 us @ 500 mhz)
+ */
+ mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
+
+ local_irq_restore(flags);
+}
+
+static void tau_timeout_smp(unsigned long unused)
+{
+
+ /* schedule ourselves to be run again */
+ mod_timer(&tau_timer, jiffies + shrink_timer) ;
+ on_each_cpu(tau_timeout, NULL, 1, 0);
+}
+
+/*
+ * setup the TAU
+ *
+ * Set things up to use THRM1 as a temperature lower bound, and THRM2 as an upper bound.
+ * Start off at zero
+ */
+
+int tau_initialized = 0;
+
+void __init TAU_init_smp(void * info)
+{
+ unsigned long cpu = smp_processor_id();
+
+ /* set these to a reasonable value and let the timer shrink the
+ * window */
+ tau[cpu].low = 5;
+ tau[cpu].high = 120;
+
+ set_thresholds(cpu);
+}
+
+int __init TAU_init(void)
+{
+ /* We assume in SMP that if one CPU has TAU support, they
+ * all have it --BenH
+ */
+ if (!cpu_has_feature(CPU_FTR_TAU)) {
+ printk("Thermal assist unit not available\n");
+ tau_initialized = 0;
+ return 1;
+ }
+
+
+ /* first, set up the window shrinking timer */
+ init_timer(&tau_timer);
+ tau_timer.function = tau_timeout_smp;
+ tau_timer.expires = jiffies + shrink_timer;
+ add_timer(&tau_timer);
+
+ on_each_cpu(TAU_init_smp, NULL, 1, 0);
+
+ printk("Thermal assist unit ");
+#ifdef CONFIG_TAU_INT
+ printk("using interrupts, ");
+#else
+ printk("using timers, ");
+#endif
+ printk("shrink_timer: %d jiffies\n", shrink_timer);
+ tau_initialized = 1;
+
+ return 0;
+}
+
+__initcall(TAU_init);
+
+/*
+ * return current temp
+ */
+
+u32 cpu_temp_both(unsigned long cpu)
+{
+ return ((tau[cpu].high << 16) | tau[cpu].low);
+}
+
+int cpu_temp(unsigned long cpu)
+{
+ return ((tau[cpu].high + tau[cpu].low) / 2);
+}
+
+int tau_interrupts(unsigned long cpu)
+{
+ return (tau[cpu].interrupts);
+}