summaryrefslogtreecommitdiff
path: root/include/asm-cris/bitops.h
diff options
context:
space:
mode:
authorAdrian Bunk <bunk@stusta.de>2005-11-07 00:58:44 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 07:53:30 -0800
commitd9b5444eeb3a663ca4a625878b1421c9e9b18e8b (patch)
tree6cc32711116977944043c54e0c196c75358916be /include/asm-cris/bitops.h
parent5f9c3cbcd5d41be597aef9c0ff64ebfc8a91cd6f (diff)
[PATCH] cris: "extern inline" -> "static inline"
"extern inline" doesn't make much sense. Signed-off-by: Adrian Bunk <bunk@stusta.de> Acked-by: Mikael Starvik <starvik@axis.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-cris/bitops.h')
-rw-r--r--include/asm-cris/bitops.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h
index e3da57f97964..1bddb3f3a289 100644
--- a/include/asm-cris/bitops.h
+++ b/include/asm-cris/bitops.h
@@ -89,7 +89,7 @@ struct __dummy { unsigned long a[100]; };
* It also implies a memory barrier.
*/
-extern inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned long flags;
@@ -105,7 +105,7 @@ extern inline int test_and_set_bit(int nr, volatile unsigned long *addr)
return retval;
}
-extern inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned int *adr = (unsigned int *)addr;
@@ -132,7 +132,7 @@ extern inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* It also implies a memory barrier.
*/
-extern inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned long flags;
@@ -157,7 +157,7 @@ extern inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
* but actually fail. You must protect multiple accesses with a lock.
*/
-extern inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned int *adr = (unsigned int *)addr;
@@ -177,7 +177,7 @@ extern inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
* It also implies a memory barrier.
*/
-extern inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned long flags;
@@ -193,7 +193,7 @@ extern inline int test_and_change_bit(int nr, volatile unsigned long *addr)
/* WARNING: non atomic and it can be reordered! */
-extern inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned int *adr = (unsigned int *)addr;
@@ -214,7 +214,7 @@ extern inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
* This routine doesn't need to be atomic.
*/
-extern inline int test_bit(int nr, const volatile unsigned long *addr)
+static inline int test_bit(int nr, const volatile unsigned long *addr)
{
unsigned int mask;
unsigned int *adr = (unsigned int *)addr;
@@ -258,7 +258,7 @@ extern inline int test_bit(int nr, const volatile unsigned long *addr)
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
*/
-extern inline int find_next_zero_bit (const unsigned long * addr, int size, int offset)
+static inline int find_next_zero_bit (const unsigned long * addr, int size, int offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL;
@@ -366,7 +366,7 @@ found_middle:
#define minix_test_bit(nr,addr) test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-extern inline int sched_find_first_bit(const unsigned long *b)
+static inline int sched_find_first_bit(const unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);