diff --git a/arch/mips/include/asm/delay.h b/arch/mips/include/asm/delay.h index e7cd78277c23..dc0a5f77a35c 100644 --- a/arch/mips/include/asm/delay.h +++ b/arch/mips/include/asm/delay.h @@ -13,9 +13,9 @@ #include -extern void __delay(unsigned int loops); -extern void __ndelay(unsigned int ns); -extern void __udelay(unsigned int us); +extern void __delay(unsigned long loops); +extern void __ndelay(unsigned long ns); +extern void __udelay(unsigned long us); #define ndelay(ns) __ndelay(ns) #define udelay(us) __udelay(us) diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index c26e18250079..f5b521d5a67d 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -9,6 +9,7 @@ #ifndef _ASM_PGTABLE_64_H #define _ASM_PGTABLE_64_H +#include #include #include @@ -172,7 +173,19 @@ static inline int pmd_none(pmd_t pmd) return pmd_val(pmd) == (unsigned long) invalid_pte_table; } -#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) +static inline int pmd_bad(pmd_t pmd) +{ +#ifdef CONFIG_HUGETLB_PAGE + /* pmd_huge(pmd) but inline */ + if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) + return 0; +#endif + + if (unlikely(pmd_val(pmd) & ~PAGE_MASK)) + return 1; + + return 0; +} static inline int pmd_present(pmd_t pmd) { diff --git a/arch/mips/jz4740/serial.h b/arch/mips/jz4740/serial.h index b9fe3ade0289..8eb715bb1ea8 100644 --- a/arch/mips/jz4740/serial.h +++ b/arch/mips/jz4740/serial.h @@ -14,6 +14,9 @@ */ #ifndef __MIPS_JZ4740_SERIAL_H__ +#define __MIPS_JZ4740_SERIAL_H__ + +struct uart_port; void jz4740_serial_out(struct uart_port *p, int offset, int value); diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index afc379ca3753..06cd0c610f44 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -97,7 +97,7 @@ static void cmp_init_secondary(void) /* Enable per-cpu interrupts: platform specific */ - c->core = (read_c0_ebase() >> 1) & 0xff; + c->core = (read_c0_ebase() >> 1) & 0x1ff; #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; #endif diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 5995969e8c42..dc81ca8dc0dd 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -15,13 +15,17 @@ #include #include -inline void __delay(unsigned int loops) +void __delay(unsigned long loops) { __asm__ __volatile__ ( " .set noreorder \n" " .align 3 \n" "1: bnez %0, 1b \n" +#if __SIZEOF_LONG__ == 4 " subu %0, 1 \n" +#else + " dsubu %0, 1 \n" +#endif " .set reorder \n" : "=r" (loops) : "0" (loops)); diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 3f69725556af..a99c1d3fc567 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -50,8 +50,9 @@ static void dump_tlb(int first, int last) { unsigned long s_entryhi, entryhi, asid; unsigned long long entrylo0, entrylo1; - unsigned int s_index, pagemask, c0, c1, i; + unsigned int s_index, s_pagemask, pagemask, c0, c1, i; + s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); asid = s_entryhi & 0xff; @@ -103,6 +104,7 @@ static void dump_tlb(int first, int last) write_c0_entryhi(s_entryhi); write_c0_index(s_index); + write_c0_pagemask(s_pagemask); } void dump_tlb_all(void) diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 87b9cfcc30ff..4b9b935a070e 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -320,6 +320,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) tlb_write_random(); else tlb_write_indexed(); + tlbw_use_hazard(); write_c0_pagemask(PM_DEFAULT_MASK); } else #endif diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 658a520364ce..2833dcb67b5a 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -148,8 +148,8 @@ enum label_id { label_leave, label_vmalloc, label_vmalloc_done, - label_tlbw_hazard, - label_split, + label_tlbw_hazard_0, + label_split = label_tlbw_hazard_0 + 8, label_tlbl_goaround1, label_tlbl_goaround2, label_nopage_tlbl, @@ -167,7 +167,7 @@ UASM_L_LA(_second_part) UASM_L_LA(_leave) UASM_L_LA(_vmalloc) UASM_L_LA(_vmalloc_done) -UASM_L_LA(_tlbw_hazard) +/* _tlbw_hazard_x is handled differently. */ UASM_L_LA(_split) UASM_L_LA(_tlbl_goaround1) UASM_L_LA(_tlbl_goaround2) @@ -181,6 +181,30 @@ UASM_L_LA(_large_segbits_fault) UASM_L_LA(_tlb_huge_update) #endif +static int __cpuinitdata hazard_instance; + +static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) +{ + switch (instance) { + case 0 ... 7: + uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance); + return; + default: + BUG(); + } +} + +static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) +{ + switch (instance) { + case 0 ... 7: + uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); + break; + default: + BUG(); + } +} + /* * For debug purposes. */ @@ -478,21 +502,28 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, * This branch uses up a mtc0 hazard nop slot and saves * two nops after the tlbw instruction. */ - uasm_il_bgezl(p, r, 0, label_tlbw_hazard); + uasm_bgezl_hazard(p, r, hazard_instance); tlbw(p); - uasm_l_tlbw_hazard(l, *p); + uasm_bgezl_label(l, p, hazard_instance); + hazard_instance++; uasm_i_nop(p); break; case CPU_R4600: case CPU_R4700: - case CPU_R5000: - case CPU_R5000A: uasm_i_nop(p); tlbw(p); uasm_i_nop(p); break; + case CPU_R5000: + case CPU_R5000A: + case CPU_NEVADA: + uasm_i_nop(p); /* QED specifies 2 nops hazard */ + uasm_i_nop(p); /* QED specifies 2 nops hazard */ + tlbw(p); + break; + case CPU_R4300: case CPU_5KC: case CPU_TX49XX: @@ -526,17 +557,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, tlbw(p); break; - case CPU_NEVADA: - uasm_i_nop(p); /* QED specifies 2 nops hazard */ - /* - * This branch uses up a mtc0 hazard nop slot and saves - * a nop after the tlbw instruction. - */ - uasm_il_bgezl(p, r, 0, label_tlbw_hazard); - tlbw(p); - uasm_l_tlbw_hazard(l, *p); - break; - case CPU_RM7000: uasm_i_nop(p); uasm_i_nop(p);