linux/arch/mips/mm/tlb-r4k.c

420 lines
9.7 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
* Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/system.h>
extern void build_tlb_refill_handler(void);
/* CP0 hazard avoidance. */
#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
"nop; nop; nop; nop; nop; nop;\n\t" \
".set reorder\n\t")
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
int entry;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
entry = read_c0_wired();
/* Blast 'em all away. */
while (entry < current_cpu_data.tlbsize) {
/*
* Make sure all entries differ. If they're not different
* MIPS32 will take revenge ...
*/
write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
entry++;
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
local_irq_restore(flags);
}
void local_flush_tlb_mm(struct mm_struct *mm)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0)
drop_mmu_context(mm,cpu);
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= current_cpu_data.tlbsize/2) {
int oldpid = read_c0_entryhi();
int newpid = cpu_asid(cpu, mm);
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
while (start < end) {
int idx;
write_c0_entryhi(start | newpid);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
BARRIER;
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
write_c0_entryhi(CKSEG0 +
(idx << (PAGE_SHIFT + 1)));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(oldpid);
} else {
drop_mmu_context(mm, cpu);
}
local_irq_restore(flags);
}
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= current_cpu_data.tlbsize / 2) {
int pid = read_c0_entryhi();
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
while (start < end) {
int idx;
write_c0_entryhi(start);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
BARRIER;
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(pid);
} else {
local_flush_tlb_all();
}
local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
unsigned long flags;
int oldpid, newpid, idx;
newpid = cpu_asid(cpu, vma->vm_mm);
page &= (PAGE_MASK << 1);
local_irq_save(flags);
oldpid = read_c0_entryhi();
write_c0_entryhi(page | newpid);
mtc0_tlbw_hazard();
tlb_probe();
BARRIER;
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
goto finish;
/* Make sure all entries differ. */
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
finish:
write_c0_entryhi(oldpid);
local_irq_restore(flags);
}
}
/*
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.
*/
void local_flush_tlb_one(unsigned long page)
{
unsigned long flags;
int oldpid, idx;
local_irq_save(flags);
page &= (PAGE_MASK << 1);
oldpid = read_c0_entryhi();
write_c0_entryhi(page);
mtc0_tlbw_hazard();
tlb_probe();
BARRIER;
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx >= 0) {
/* Make sure all entries differ. */
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
}
write_c0_entryhi(oldpid);
local_irq_restore(flags);
}
/*
* We will need multiple versions of update_mmu_cache(), one that just
* updates the TLB with the new pte(s), and another which also checks
* for the R4k "end of page" hardware bug and does the needy.
*/
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
int idx, pid;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
pid = read_c0_entryhi() & ASID_MASK;
local_irq_save(flags);
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
BARRIER;
pmdp = pmd_offset(pgdp, address);
idx = read_c0_index();
ptep = pte_offset_map(pmdp, address);
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
write_c0_entrylo0(ptep->pte_high);
ptep++;
write_c0_entrylo1(ptep->pte_high);
#else
write_c0_entrylo0(pte_val(*ptep++) >> 6);
write_c0_entrylo1(pte_val(*ptep) >> 6);
#endif
write_c0_entryhi(address | pid);
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(pid);
local_irq_restore(flags);
}
#if 0
static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
unsigned long flags;
unsigned int asid;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
int idx;
local_irq_save(flags);
address &= (PAGE_MASK << 1);
asid = read_c0_entryhi() & ASID_MASK;
write_c0_entryhi(address | asid);
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
BARRIER;
pmdp = pmd_offset(pgdp, address);
idx = read_c0_index();
ptep = pte_offset_map(pmdp, address);
write_c0_entrylo0(pte_val(*ptep++) >> 6);
write_c0_entrylo1(pte_val(*ptep) >> 6);
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
tlbw_use_hazard();
local_irq_restore(flags);
}
#endif
void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
wired = read_c0_wired();
write_c0_wired(wired + 1);
write_c0_index(wired);
BARRIER;
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
BARRIER;
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
}
/*
* Used for loading TLB entries before trap_init() has started, when we
* don't actually want to add a wired entry which remains throughout the
* lifetime of the system
*/
static int temp_tlb_entry __initdata;
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
int ret = 0;
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
wired = read_c0_wired();
if (--temp_tlb_entry < wired) {
printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
ret = -ENOSPC;
goto out;
}
write_c0_index(temp_tlb_entry);
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
write_c0_pagemask(old_pagemask);
out:
local_irq_restore(flags);
return ret;
}
static void __init probe_tlb(unsigned long config)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int reg;
/*
* If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
* is not supported, we assume R4k style. Cpu probing already figured
* out the number of tlb entries.
*/
if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
return;
reg = read_c0_config1();
if (!((config >> 7) & 3))
panic("No TLB present");
c->tlbsize = ((reg >> 25) & 0x3f) + 1;
}
void __init tlb_init(void)
{
unsigned int config = read_c0_config();
/*
* You should never change this register:
* - On R4600 1.7 the tlbp never hits for pages smaller than
* the value in the c0_pagemask register.
* - The entire mm handling assumes the c0_pagemask register to
* be set for 4kb pages.
*/
probe_tlb(config);
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
temp_tlb_entry = current_cpu_data.tlbsize - 1;
local_flush_tlb_all();
build_tlb_refill_handler();
}