[Linux-Xtensa] [PATCH 3/3] xtensa: add MMU v3 support

Max Filippov jcmvbkbc at gmail.com
Fri Jan 4 16:57:18 PST 2013


MMUv3 comes out of reset with identity vaddr -> paddr mapping in the TLB
way 6:

Way 6 (512 MB)
        Vaddr       Paddr       ASID  Attr RWX Cache
        ----------  ----------  ----  ---- --- -------
        0x00000000  0x00000000  0x01  0x03 RWX Bypass
        0x20000000  0x20000000  0x01  0x03 RWX Bypass
        0x40000000  0x40000000  0x01  0x03 RWX Bypass
        0x60000000  0x60000000  0x01  0x03 RWX Bypass
        0x80000000  0x80000000  0x01  0x03 RWX Bypass
        0xa0000000  0xa0000000  0x01  0x03 RWX Bypass
        0xc0000000  0xc0000000  0x01  0x03 RWX Bypass
        0xe0000000  0xe0000000  0x01  0x03 RWX Bypass

This patch adds remapping code at the reset vector or at the kernel
_start (depending on CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) that
reconfigures MMUv3 as MMUv2:

Way 5 (128 MB)
        Vaddr       Paddr       ASID  Attr RWX Cache
        ----------  ----------  ----  ---- --- -------
        0xd0000000  0x00000000  0x01  0x07 RWX WB
        0xd8000000  0x00000000  0x01  0x03 RWX Bypass
Way 6 (256 MB)
        Vaddr       Paddr       ASID  Attr RWX Cache
        ----------  ----------  ----  ---- --- -------
        0xe0000000  0xf0000000  0x01  0x07 RWX WB
        0xf0000000  0xf0000000  0x01  0x03 RWX Bypass

Signed-off-by: Max Filippov <jcmvbkbc at gmail.com>
---
 arch/xtensa/Kconfig                      |   29 +++
 arch/xtensa/boot/boot-elf/Makefile       |    1 +
 arch/xtensa/boot/boot-elf/boot.lds.S     |   88 ++++++----
 arch/xtensa/boot/boot-elf/bootstrap.S    |  128 ++++++++++++--
 arch/xtensa/boot/boot-uboot/Makefile     |    2 +-
 arch/xtensa/include/asm/initialize_mmu.h |  291 ++++++++++++++++++++++++++++++
 arch/xtensa/include/asm/vectors.h        |  125 +++++++++++++
 arch/xtensa/kernel/Makefile              |    2 +
 arch/xtensa/kernel/head.S                |   38 +++-
 arch/xtensa/kernel/vectors.S             |    3 +-
 arch/xtensa/kernel/vmlinux.lds.S         |   48 ++++--
 arch/xtensa/mm/mmu.c                     |    4 +-
 12 files changed, 679 insertions(+), 80 deletions(-)
 create mode 100644 arch/xtensa/include/asm/vectors.h

diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index b08d59e..4fe8a89 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -100,6 +100,35 @@ config MATH_EMULATION
 	help
 	Can we use information of configuration file?
 
+config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	bool "Initialize Xtensa MMU inside the Linux kernel code"
+	default true
+	help
+	  Earlier version initialized the MMU in the exception vector
+	  before jumping to _startup in head.S and had an advantage that
+	  it was possible to place a software breakpoint at 'reset' and
+	  then enter your normal kernel breakpoints once the MMU was mapped
+	  to the kernel mappings (0XC000.0000).
+
+	  This unfortunately doesn't work for U-Boot and likley also wont
+	  work for using KEXEC to have a hot kernel ready for doing a
+	  KDUMP.
+
+	  So now the MMU is initialized in head.S but it's necessary to
+	  use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
+	  xt-gdb can't place a Software Breakpoint in the  0XD region prior
+	  to mapping the MMU and after mapping even if the area of low memory
+	  was mapped gdb wouldn't remove the breakpoint on hitting it as the
+	  PC wouldn't match. Since Hardware Breakpoints are recommended for
+	  Linux configurations it seems reasonable to just assume they exist
+	  and leave this older mechanism for unfortunate souls that choose
+	  not to follow Tensilica's recommendation.
+
+	  Selecting this will cause U-Boot to set the KERNEL Load and Entry
+	  address at 0x0000.2000 instead of the mapped std of 0xD000.2000.
+
+	  If in doubt, say Y.
+
 endmenu
 
 config XTENSA_CALIBRATE_CCOUNT
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
index 1fe01b7..89db089 100644
--- a/arch/xtensa/boot/boot-elf/Makefile
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -12,6 +12,7 @@ endif
 
 export OBJCOPY_ARGS
 export CPPFLAGS_boot.lds += -P -C
+export KBUILD_AFLAGS += -mtext-section-literals
 
 boot-y		:= bootstrap.o
 
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
index 7b646e0..f9a4871 100644
--- a/arch/xtensa/boot/boot-elf/boot.lds.S
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -1,41 +1,58 @@
-#include <variant/core.h>
+/*
+ *  linux/arch/xtensa/boot/boot-elf/"boot.lds.S
+ *
+ *  Copyright (C) 2008 - 2009 by Tensilica Inc.
+ *
+ *  Chris Zankel <chris at zankel.net>
+ *  Marc Gauthier <marc at tensilica.com
+ *  Pete Delaney <piet at tensilica.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This demonstrates how to append code to the start of the kernel
+ *  image, and boot the kernel after possibly remapping the MMU.  The
+ *  new Xtensa V3 MMU which runs initially with idenity mapping, ie:
+ *   			virtual == physical.
+ */
+#include <asm/vectors.h>
 OUTPUT_ARCH(xtensa)
 ENTRY(_ResetVector)
 
+/*
+ * This code runs at the Reset Vector (0XFE000000) and includes
+ * within it a section called 'image' make by the kernel
+ * makefiles that's linked to run at 0xD0000000 but starts running
+ * at 0x00001000; just after the exception vectors. This can be viewed
+ * with objdump by looking at the Program Headers and Sections with:
+ *
+ *   xt-objdump -wph Image.elf
+ *
+ * Program Header:
+ *     LOAD off    0x00001000 vaddr 0xd0001000 paddr 0x00001000 align 2**12
+ *          filesz 0x0019d29c memsz 0x0019d29c flags rw-
+ *     LOAD off    0x0019f000 vaddr 0xfe000000 paddr 0xfe000000 align 2**12
+ *          filesz 0x0000010e memsz 0x0000010e flags r-x
+ *
+ * Sections:
+ * Idx Name              Size      VMA       LMA       File off  Algn  Flags
+ *   0 .ResetVector.text 0000010e  fe000000  fe000000  0019f000  2**2  ...
+ *   1 .image            0019d29c  d0001000  00001000  00001000  2**0  ...
+ */
+
 SECTIONS
 {
-	.start 0xD0000000 : { *(.start) }
-
-	.text 0xD0000000:
-	{
-		__reloc_start = . ;
-		_text_start = . ;
-		*(.literal .text.literal .text)
-		_text_end = . ;
-	}
-
-	.rodata ALIGN(0x04):
-	{
-		*(.rodata)
-		*(.rodata1)
-	}
 
-	.data ALIGN(0x04):
+	.ResetVector.text XCHAL_RESET_VECTOR_VADDR :
 	{
-		*(.data)
-		*(.data1)
-		*(.sdata)
-		*(.sdata2)
-		*(.got.plt)
-		*(.got)
-		*(.dynamic)
+		*(.ResetVector.text)
 	}
 
-	__reloc_end = . ;
 
-	. = ALIGN(0x10);
-	__image_load = . ;
-	.image 0xd0001000:
+	/*      0xD0003000          0xD0003000          Without MMU */
+	/*      0xD0003000          0x00003000		With V3 MMU */
+	.image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS)
 	{
 		_image_start = .;
 		*(image)
@@ -53,14 +70,15 @@ SECTIONS
 		*(.bss)
 		__bss_end = .;
 	}
-	_end = .;
-	_param_start = .;
 
-	.ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+	/*
+	 * This is a remapped copy of the Reset Vector Code.
+	 * It keeps gdb in sync with the PC after switching
+	 * to the temporary mapping used while setting up
+	 * the V2 MMU mappings for Linux.
+	 */
+	.ResetVector.remapped_text 0x46000000 (INFO):
 	{
-		*(.ResetVector.text)
+		*(.ResetVector.remapped_text)
 	}
-
-
-	PROVIDE (end = .);
 }
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index 464298b..2a27660 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -1,29 +1,56 @@
+/*
+ * arch/xtensa/boot/boot-elf/bootstrap.S
+ *
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 - 2009 by Tensilica Inc.
+ *
+ * Chris Zankel <chris at zankel.net>
+ * Marc Gauthier <marc at tensilica.com>
+ * Piet Delaney <piet at tensilica.com>
+ */
 
 #include <asm/bootparam.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <linux/linkage.h>
 
 
-/* ResetVector
- */
-	.section        .ResetVector.text, "ax"
+	.section	.ResetVector.text, "ax"
 	.global         _ResetVector
+	.global         reset	# Place 1st Breakpoint here
+
 _ResetVector:
-	_j reset
+	_j _SetupMMU
+
+	/*
+	 *  Even if the processor supports the non-PC-relative L32R option,
+	 *  it will always start up in PC-relative mode.  We take advantage of
+	 *  this, and use PC-relative mode at least until we're sure the .lit4
+	 *  section is in place (which is sometimes only after unpacking).
+	 */
+	.begin  no-absolute-literals
+	.literal_position
+
 	.align 4
 RomInitAddr:
-	.word 0xd0001000
+#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
+	XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+	.word 0x00003000
+#else
+	.word 0xd0003000
+#endif
 RomBootParam:
 	.word _bootparam
-reset:
-	l32r    a0, RomInitAddr
-	l32r	a2, RomBootParam
-	movi	a3, 0
-	movi	a4, 0
-	jx      a0
-
-	.align 4
-	.section .bootstrap.data, "aw"
 
-	.globl _bootparam
+	.globl _bootparam	# See RomBootParam in bootstrap.S
 _bootparam:
 	.short	BP_TAG_FIRST
 	.short	4
@@ -31,3 +58,74 @@ _bootparam:
 	.short	BP_TAG_LAST
 	.short	0
 	.long	0
+
+	.align  4
+_SetupMMU:
+#if defined(CONFIG_INITIALIZE_RESET_REGISTERS)
+	/*
+	 * Initialized Registers that are reset to their initial values
+	 * Makes it possible to single step thru early MMU initialization
+	 * in the case of code branching to the ResetVector when in fact
+	 * the registers are not in their normal reset state.
+	 */
+	movi	a0, 0
+	wsr	a0, windowbase
+	rsync
+	movi	a0, 1
+	wsr	a0, windowstart
+	rsync
+	movi	a0, 0x1F
+	wsr	a0, ps
+	rsync
+#endif
+	Offset = _SetupMMU - _ResetVector
+
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+/* 2nd Copy Linked at 0x4600000 for ddd and xt-gdb */
+	initialize_mmu
+#endif
+
+	.end    no-absolute-literals
+
+				# Enable xt-gdb Breakpoints
+	rsil    a0, XCHAL_DEBUGLEVEL-1
+	rsync
+
+#
+# Place 1st Breakpoint Here. Until we get here the MMU is mapped
+# virtual == physical for V3 MMU, so it's not possible in this case to set
+# normal kernel breakpoints as the code wasn't mapped there yet; now it is.
+#
+reset:
+	l32r    a0, RomInitAddr
+	l32r	a2, RomBootParam
+	movi	a3, 0
+	movi	a4, 0
+
+	jx      a0		# Typically calls _startup; a2 = &BootParams
+
+	.align 4
+
+	.section	.ResetVector.remapped_text, "x"
+	.global         _RemappedResetVector
+
+	.org 0			# Need to do org before literals
+
+_RemappedResetVector:
+	.begin  no-absolute-literals
+	.literal_position
+
+
+	_j	_RemappedSetupMMU
+
+				# Position Remapped code at the same location
+       				# as the original code.
+	. = _RemappedResetVector + Offset
+
+_RemappedSetupMMU:
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	initialize_mmu
+#endif
+
+	.end    no-absolute-literals
+
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
index bfbf8af..904f660 100644
--- a/arch/xtensa/boot/boot-uboot/Makefile
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -4,7 +4,7 @@
 # for more details.
 #
 
-UIMAGE_LOADADDR = 0xd0001000
+UIMAGE_LOADADDR = 0xd0003000
 UIMAGE_COMPRESSION = gzip
 
 $(obj)/../uImage: vmlinux.bin.gz FORCE
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index e1f8ba4..de1a91f 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -23,6 +23,9 @@
 #ifndef _XTENSA_INITIALIZE_MMU_H
 #define _XTENSA_INITIALIZE_MMU_H
 
+#include <asm/pgtable.h>
+#include <asm/vectors.h>
+
 #ifdef __ASSEMBLY__
 
 #define XTENSA_HWVERSION_RC_2009_0 230000
@@ -48,6 +51,294 @@
 	 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
 	 */
 
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/*
+ * Have MMU v3
+ */
+
+#if !XCHAL_HAVE_VECBASE
+# error "MMU v3 requires reloc vectors"
+#endif
+
+/*  FIXME:  this code needs to be assembled -mtext-section-literals,
+ *	and contain an appropriate .literal_position that we jump over;
+ *	the standard reset vector usually does this already, so it's
+ *	not repeated here.
+ *
+ *  This code must execute *before* LITBASE gets initialized.
+ *
+ *  ASSUMPTIONS:
+ *
+ *  This code fragment is run only on an MMU v3.
+ *  TLBs are in their reset state.
+ *  ITLBCFG and DTLBCFG are zero (reset state).
+ *  RASID is 0x04030201 (reset state).
+ *  PS.RING is zero (reset state).
+ *  LITBASE is zero (reset state, PC-relative literals); required to be PIC.
+ *  This code is located in one of the following address ranges:
+ *
+ *
+ *	0xF0000000..0xFFFFFFFF	(will keep same address in MMU v2 layout;
+ *				 typically ROM)
+ *	0x00000000..0x07FFFFFF  (system RAM; this code is actually linked
+ *				 at 0xD0000000..0xD7FFFFFF [cached]
+ *				 or 0xD8000000..0xDFFFFFFF [uncached];
+ *				 does it have to be the latter?
+ *				 in any case, initially runs elsewhere
+ *				 than linked, so have to be careful)
+ *	(local instram/instrom)	(will move to ??? or not move?)
+ *
+ *
+ *  TLB setup proceeds along the following steps.  Legend:
+ *
+ *	VA = virtual address (two upper nibbles of it);
+ *	PA = physical address (two upper nibbles of it);
+ *	pc = physical range that contains this code;
+ *	LM = physical range that contains local memories.
+ *
+ *  After step 2, we jump to virtual address in 0x40000000..0x5fffffff
+ *  that corresponds to next instruction to execute in this code.
+ *  After step 4, we jump to intended (linked) address of this code.
+ *
+ *
+ *      Step 0     Step1     Step 2     Step3     Step 4     Step5
+ *   ============  =====  ============  =====  ============  =====
+ *     VA      PA     PA    VA      PA     PA    VA      PA     PA
+ *   ------    --     --  ------    --     --  ------    --     --
+ *   E0..FF -> E0  -> E0  E0..FF -> E0         F0..FF -> F0  -> F0
+ *   C0..DF -> C0  -> C0  C0..DF -> C0         E0..EF -> F0  -> F0
+ *   A0..BF -> A0  -> A0  A0..BF -> A0         D8..DF -> 00  -> 00
+ *   80..9F -> 80  -> 80  80..9F -> 80         D0..D7 -> 00  -> 00
+ *   60..7F -> 60  -> 60  60..7F -> 60         ??..?? -> LM  -> LM
+ *   40..5F -> 40         40..47 -> pc  -> pc  40..5F -> pc
+ *   20..3F -> 20  -> 20  20..3F -> 20
+ *   00..1F -> 00  -> 00  00..1F -> 00
+ *
+ * Initial way 6 mappings:
+ *	vaddr=0x00000000 asid=0x01  paddr=0x00000000  ca=3  ITLB way 6 (512 MB)
+ *	vaddr=0x20000000 asid=0x01  paddr=0x20000000  ca=3  ITLB way 6 (512 MB)
+ *	vaddr=0x40000000 asid=0x01  paddr=0x40000000  ca=3  ITLB way 6 (512 MB)
+ *	vaddr=0x60000000 asid=0x01  paddr=0x60000000  ca=3  ITLB way 6 (512 MB)
+ *	vaddr=0x80000000 asid=0x01  paddr=0x80000000  ca=3  ITLB way 6 (512 MB)
+ *	vaddr=0xa0000000 asid=0x01  paddr=0xa0000000  ca=3  ITLB way 6 (512 MB)
+ *	vaddr=0xc0000000 asid=0x01  paddr=0xc0000000  ca=3  ITLB way 6 (512 MB)
+ *	vaddr=0xe0000000 asid=0x01  paddr=0xe0000000  ca=4  ITLB way 6 (512 MB)
+ *
+ * Before we begin: double check our PC is in proper range.
+ * No need to do this of course, if you know it's an expected range;
+ * but you *do* need the first two instructions below (and first label),
+ * for later steps.
+ *
+ * Need to use a '_' prefix to prevent asm from doing a 'l32r' followed by a
+ * callx0. Added a '-no-transform' to assembler options to prevent any
+ * transformations like this.
+ *
+ * WARNING:
+ *	Code below in step 2b will use this address for a computed branch.
+ */
+
+	movi	a1, 0		/* Set $sp to NULL to minimize gdb confusion
+				 * trying to walk up the stack
+				 */
+
+	_call0	1f		/* get PC in a PIC manner (don't rely on
+				 * literal constants)
+				 */
+0:	j	2f		/* a0 = pc; NOTE: we get here AGAIN in Step 2b
+				 * after remapping to 0X46000000
+				 * REMIND: likely easier to understand if we
+				 * don't do this and just branch to the right
+				 * address in Step 2b.
+				 */
+
+	.align	4
+1:	movi	a2, 0x10000000
+	movi	a3, 0x18000000
+	add	a2, a2, a0	/* a2 = 0x10000000 + original_pc */
+	bltu	a2, a3, 1f	/* is PC >= 0xF0000000, or PC < 0x08000000 ?
+				 *
+				 * Panic!  bad PC, something wasn't linked or
+				 * loaded right.
+				 * REMIND: consider do something better than
+				 *         an infinite loop here?
+				 *         BREAK? SIMCALL to debugger?
+				 */
+9:	j	9b		/* Something's wrong, PC out of expected range
+				 */
+
+1:	/*  PC seems okay, proceed. */
+
+	/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
+
+	movi	a2, 0x40000006	/* 512MB region at vaddr 0x40000000, way 6 */
+	idtlb	a2		/* kick it out... */
+	iitlb	a2
+	isync
+
+	/* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code.
+	 *         Pages TLB Cache Attribute Should be R/W Exec w/o Caching.
+	 */
+#define CA_BYPASS	(_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#define CA_WRITEBACK	(_PAGE_CA_WB     | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+
+	srli	a3, a0, 27	/* get 128MB area containing PC ... */
+	slli	a3, a3, 27
+	addi	a3, a3, CA_BYPASS	/* bypass-cache access RWX */
+	addi	a7, a2, -1	/* 128MB region at vaddr 0x40000000, way 5 */
+	wdtlb	a3, a7		/* setup mapping... */
+	witlb	a3, a7
+	isync
+
+	/* The TLB entry in way 5 should now be visible with dtshow gdb macro.
+	 * Showing way 5
+	 * vaddr=0x40000000 asid=0x01 paddr=0x00000000 ca=3 DTLB way 5 (128 MB)
+	 *
+	 * Step 2b: jump to self, using new mapping above
+	 *          we jump back to the begining at 0: above
+	 */
+
+	slli	a4, a0, 5	/* clear upper 5 bits of PC (get 128MB
+				 * relative offset)
+				 */
+	srli	a4, a4, 5
+	addi	a5, a2, -6	/* a5 = 0x40000000 */
+	add	a4, a4, a5	/* address of above "j 2f" in 128MB page at
+				 * vaddr 0x40000000
+				 */
+	jx	a4		/* Note: jumps to 0x46000043; xt-gdb switches
+				 * to remapped text section. Like doing a j 0b
+				 */
+
+	/* Step 3: unmap everything other than current area.
+	 *	   We start at 0x60000000, wrap around, and end with 0x20000000
+	 * NOTE:
+	 *  You can't have any breakpoint set in the kernel during this period.
+	 *  xt-gdb won't be able to remove the breakpoints and you will lose
+	 *  control after going back to the V2 MMU mappings.
+	 */
+2:	movi	a4, 0x20000000
+	add	a5, a2, a4	/* start at 0x60000000 (+6 for way 6) */
+3:	idtlb	a5		/* invalidate entry... */
+	iitlb	a5
+	add	a5, a5, a4
+	bne	a5, a2, 3b	/* loop until we get to 0x40000000 */
+
+	/* Step 4: Setup MMU with the old V2 mappings.
+	 *   Step 4a:
+	 *     This changes the size of all of pre-initialized TLB
+	 *     entries in way 6 from 512MB to 256MB and changes the
+	 *     vaddrs by dividing them in half. Only Way 6 is effected
+	 *     because curently it's the only way with variable size pages.
+	 */
+	movi	a6, 0x01000000	/* way 6 page size = 256 MB (index 1) */
+	wsr	a6, ITLBCFG	/* apply way 6 page size (Inst) */
+	wsr	a6, DTLBCFG	/* apply way 6 page size (Data) */
+	isync
+
+	/*
+	 * TLB Way 6 is now set up for setting the old V2 mappings.
+	 * The TLB Currently should look like this:
+	 *
+	 * Showing way 5
+	 * vaddr=0x40000000 asid=0x01 paddr=0xf8000000 ca=3 ITLB way 5 (128 MB)
+	 * vaddr=0x08000000 asid=0x00 paddr=0x00000000 ca=0 ITLB way 5 (128 MB)
+	 * vaddr=0x10000000 asid=0x00 paddr=0x00000000 ca=0 ITLB way 5 (128 MB)
+	 * vaddr=0x18000000 asid=0x00 paddr=0x00000000 ca=0 ITLB way 5 (128 MB)
+	 *
+	 * Showing way 6
+	 * vaddr=0x00000000 asid=0x00 paddr=0x00000000 ca=3 ITLB way 6 (256 MB)
+	 * vaddr=0x10000000 asid=0x00 paddr=0x20000000 ca=3 ITLB way 6 (256 MB)
+	 * vaddr=0x20000000 asid=0x00 paddr=0x40000000 ca=3 ITLB way 6 (256 MB)
+	 * vaddr=0x30000000 asid=0x00 paddr=0x60000000 ca=3 ITLB way 6 (256 MB)
+	 * vaddr=0x40000000 asid=0x00 paddr=0x80000000 ca=3 ITLB way 6 (256 MB)
+	 * vaddr=0x50000000 asid=0x00 paddr=0xa0000000 ca=3 ITLB way 6 (256 MB)
+	 * vaddr=0x60000000 asid=0x00 paddr=0xc0000000 ca=3 ITLB way 6 (256 MB)
+	 * vaddr=0x70000000 asid=0x00 paddr=0xe0000000 ca=3 ITLB way 6 (256 MB)
+	 */
+
+	/* Step 4b:
+	 * Set up Way 5 TLB Entries:
+	 * vaddr=0xd0000000 asid=0x01 paddr=0x00000000 ca=7 ITLB way 5 (128 MB)
+	 * vaddr=0xd8000000 asid=0x01 paddr=0x00000000 ca=3 ITLB way 5 (128 MB)
+	 */
+
+	movi	a5, 0xd0000005	/* 128MB page at 0xd0000000 (way 5) */
+	movi	a4, CA_WRITEBACK/* paddr 0x00000000, writeback */
+	wdtlb	a4, a5
+	witlb	a4, a5
+
+	movi	a5, 0xd8000005	/* 128MB page at 0xd8000000 (way 5) */
+	movi	a4, CA_BYPASS	/* paddr 0x00000000, bypass */
+	wdtlb	a4, a5
+	witlb	a4, a5
+
+
+	/* Step 4c:
+	 * Set up Way 6 TLB Entries:
+	 * vaddr=0xe0000000 asid=0x01 paddr=0x00000000 ca=7 ITLB way 5 (128 MB)
+	 * vaddr=0xf0000000 asid=0x01 paddr=0x00000000 ca=3 ITLB way 5 (128 MB)
+	 */
+	movi	a5, 0xe0000006	/* 256MB page at 0xe0000000 (way 6) */
+	movi	a4, 0xf0000000 + CA_WRITEBACK /* paddr 0xf0000000, writeback */
+	wdtlb	a4, a5
+	witlb	a4, a5
+
+	movi	a5, 0xf0000006	/* 256MB page at 0xf0000000 (way 6) */
+	movi	a4, 0xf0000000 + CA_BYPASS /* paddr 0xf0000000, bypass */
+	wdtlb	a4, a5
+	witlb	a4, a5
+
+	/* TODO:  local memory mapping */
+
+	isync
+
+	/* Step 4b: jump to self, using MMU v2 mappings.
+	 * Well, just jump to where we've been linked to.
+	 */
+
+	movi	a4, 1f		/* using a constant -- absolute jump */
+	jx	a4
+
+1:
+	/* Assuming VECBASE points to system RAM,
+	 * bump it up to where system RAM can now be accessed (cached).
+	 * Typically changing from  0x00001000 to 0xD0000000.
+	 */
+	movi    a2, VECBASE_RESET_VADDR
+	wsr	a2, vecbase
+
+	/* Step 5: remove temporary mapping.
+	 *         a7 = 0x40000005 (Way 5)
+	 */
+	idtlb	a7
+	iitlb	a7
+	isync			/* If you lose control here while single
+				 * stepping it's likely could be because
+				 * you had a linux break point enabled.
+				 */
+
+	/* In case it wasn't done yet, initialize PTEVADDR.
+	 * Though in reality, its reset state probably should already be zero.
+	 */
+	movi	a0, 0
+	wsr	a0, ptevaddr
+	rsync
+
+	/* It's safe to enable kernel breakpoint now. */
+	nop			/* Done! */
+
+#else /* !(defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+	 XCHAL_HAVE_SPANNING_WAY) */
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+	  XCHAL_HAVE_SPANNING_WAY */
+
 	.endm
 
 #endif /*__ASSEMBLY__*/
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
new file mode 100644
index 0000000..c52b656
--- /dev/null
+++ b/arch/xtensa/include/asm/vectors.h
@@ -0,0 +1,125 @@
+/*
+ * arch/xtensa/include/asm/xchal_vaddr_remap.h
+ *
+ * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
+ * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
+ * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica Inc.
+ *
+ * Pete Delaney <piet at tensilica.com>
+ * Marc Gauthier <marc at tensilica.com
+ */
+
+#ifndef _XTENSA_VECTORS_H
+#define _XTENSA_VECTORS_H
+
+#include <variant/core.h>
+
+#if defined(CONFIG_MMU)
+
+/* Will Become VECBASE */
+#define VIRTUAL_MEMORY_ADDRESS		0xD0000000
+
+/* Image Virtual Start Address */
+#define KERNELOFFSET			0xD0003000
+
+#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+  /* MMU v3  - XCHAL_HAVE_PTP_MMU  == 1 */
+  #define PHYSICAL_MEMORY_ADDRESS	0x00000000
+  #define LOAD_MEMORY_ADDRESS		0x00003000
+#else
+  /* MMU V2 -  XCHAL_HAVE_PTP_MMU  == 0 */
+  #define PHYSICAL_MEMORY_ADDRESS	0xD0000000
+  #define LOAD_MEMORY_ADDRESS		0xD0003000
+#endif
+
+#else /* !defined(CONFIG_MMU) */
+  /* MMU Not being used - Virtual == Physical */
+
+  /* VECBASE */
+  #define VIRTUAL_MEMORY_ADDRESS	0x00002000
+
+  /* Location of the start of the kernel text, _start */
+  #define KERNELOFFSET			0x00003000
+  #define PHYSICAL_MEMORY_ADDRESS	0x00000000
+
+  /* Loaded just above possibly live vectors */
+  #define LOAD_MEMORY_ADDRESS		0x00003000
+
+#endif /* CONFIG_MMU */
+
+#define XC_VADDR(offset)		(VIRTUAL_MEMORY_ADDRESS  + offset)
+#define XC_PADDR(offset)		(PHYSICAL_MEMORY_ADDRESS + offset)
+
+/* Used to set VECBASE register */
+#define VECBASE_RESET_VADDR		VIRTUAL_MEMORY_ADDRESS
+
+#define RESET_VECTOR_VECOFS		(XCHAL_RESET_VECTOR_VADDR - \
+						VECBASE_RESET_VADDR)
+#define RESET_VECTOR_VADDR		XC_VADDR(RESET_VECTOR_VECOFS)
+
+#define RESET_VECTOR1_VECOFS		(XCHAL_RESET_VECTOR1_VADDR - \
+						VECBASE_RESET_VADDR)
+#define RESET_VECTOR1_VADDR		XC_VADDR(RESET_VECTOR1_VECOFS)
+
+#if XCHAL_HAVE_VECBASE
+
+#define USER_VECTOR_VADDR		XC_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR		XC_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR		XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR		XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
+
+#define DEBUG_VECTOR_VADDR		XC_VADDR(XCHAL_DEBUG_VECOFS)
+
+#undef  XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_NMI_VECTOR_VADDR		XC_VADDR(XCHAL_NMI_VECOFS)
+
+#undef  XCHAL_INTLEVEL7_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_VADDR	XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
+
+/*
+ * These XCHAL_* #defines from varian/core.h
+ * are not valid to use with V3 MMU. Non-XCHAL
+ * constants are defined above and should be used.
+ */
+#undef  XCHAL_VECBASE_RESET_VADDR
+#undef  XCHAL_RESET_VECTOR0_VADDR
+#undef  XCHAL_USER_VECTOR_VADDR
+#undef  XCHAL_KERNEL_VECTOR_VADDR
+#undef  XCHAL_DOUBLEEXC_VECTOR_VADDR
+#undef  XCHAL_WINDOW_VECTORS_VADDR
+#undef  XCHAL_INTLEVEL2_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL3_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL4_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL5_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL6_VECTOR_VADDR
+#undef  XCHAL_DEBUG_VECTOR_VADDR
+#undef  XCHAL_NMI_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL7_VECTOR_VADDR
+
+#else
+
+#define USER_VECTOR_VADDR		XCHAL_USER_VECTOR_VADDR
+#define KERNEL_VECTOR_VADDR		XCHAL_KERNEL_VECTOR_VADDR
+#define DOUBLEEXC_VECTOR_VADDR		XCHAL_DOUBLEEXC_VECTOR_VADDR
+#define WINDOW_VECTORS_VADDR		XCHAL_WINDOW_VECTORS_VADDR
+#define INTLEVEL2_VECTOR_VADDR		XCHAL_INTLEVEL2_VECTOR_VADDR
+#define INTLEVEL3_VECTOR_VADDR		XCHAL_INTLEVEL3_VECTOR_VADDR
+#define INTLEVEL4_VECTOR_VADDR		XCHAL_INTLEVEL4_VECTOR_VADDR
+#define INTLEVEL5_VECTOR_VADDR		XCHAL_INTLEVEL5_VECTOR_VADDR
+#define INTLEVEL6_VECTOR_VADDR		XCHAL_INTLEVEL6_VECTOR_VADDR
+#define DEBUG_VECTOR_VADDR		XCHAL_DEBUG_VECTOR_VADDR
+
+#endif
+
+#endif /* _XTENSA_VECTORS_H */
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index c3a59d9..c433a56 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -12,6 +12,8 @@ obj-$(CONFIG_KGDB) += xtensa-stub.o
 obj-$(CONFIG_PCI) += pci.o
 obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
 
+AFLAGS_head.o += -mtext-section-literals
+
 # In the Xtensa architecture, assembly generates literals which must always
 # precede the L32R instruction with a relative offset less than 256 kB.
 # Therefore, the .text and .literal section must be combined in parenthesis
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index df88f98..8d73202 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -48,17 +48,37 @@
 	 */
 
 	__HEAD
+	.begin	no-absolute-literals
+
 ENTRY(_start)
 
-	_j	2f
+	/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+	wsr     a2, excsave1
+	_j	_SetupMMU
+
+	.align	4
+	.literal_position
+.Lstartup:
+	.word	_startup
+
 	.align	4
-1:	.word	_startup
-2:	l32r	a0, 1b
+	.global _SetupMMU
+_SetupMMU:
+	Offset = _SetupMMU - _start
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	initialize_mmu
+#endif
+	.end	no-absolute-literals
+
+	l32r	a0, .Lstartup
 	jx	a0
 
+
 ENDPROC(_start)
 
-	.section .init.text, "ax"
+	__INIT
+	.literal_position
 
 ENTRY(_startup)
 
@@ -67,10 +87,6 @@ ENTRY(_startup)
 	movi	a0, LOCKLEVEL
 	wsr	a0, ps
 
-	/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
-
-	wsr	a2, excsave1
-
 	/* Start with a fresh windowbase and windowstart.  */
 
 	movi	a1, 1
@@ -156,8 +172,6 @@ ENTRY(_startup)
 
 	isync
 
-	initialize_mmu
-
 	/* Unpack data sections
 	 *
 	 * The linker script used to build the Linux kernel image
@@ -205,6 +219,10 @@ ENTRY(_startup)
 
 	___flush_dcache_all a2 a3
 #endif
+	memw
+	isync
+	___invalidate_icache_all a2 a3
+	isync
 
 	/* Setup stack and enable window exceptions (keep irqs disabled) */
 
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 82109b42..a7e1d08 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -50,6 +50,7 @@
 #include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/thread_info.h>
+#include <asm/vectors.h>
 
 #define WINDOW_VECTORS_SIZE   0x180
 
@@ -220,7 +221,7 @@ ENTRY(_DoubleExceptionVector)
 
 	xsr	a0, depc		# get DEPC, save a0
 
-	movi	a3, XCHAL_WINDOW_VECTORS_VADDR
+	movi	a3, WINDOW_VECTORS_VADDR
 	_bltu	a0, a3, .Lfixup
 	addi	a3, a3, WINDOW_VECTORS_SIZE
 	_bgeu	a0, a3, .Lfixup
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 1469524..21acd11 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -18,6 +18,7 @@
 #include <asm/page.h>
 #include <asm/thread_info.h>
 
+#include <asm/vectors.h>
 #include <variant/core.h>
 #include <platform/hardware.h>
 OUTPUT_ARCH(xtensa)
@@ -30,7 +31,7 @@ jiffies = jiffies_64;
 #endif
 
 #ifndef KERNELOFFSET
-#define KERNELOFFSET 0xd0001000
+#define KERNELOFFSET 0xd0003000
 #endif
 
 /* Note: In the following macros, it would be nice to specify only the
@@ -185,16 +186,16 @@ SECTIONS
 
   SECTION_VECTOR (_WindowVectors_text,
 		  .WindowVectors.text,
-		  XCHAL_WINDOW_VECTORS_VADDR, 4,
+		  WINDOW_VECTORS_VADDR, 4,
 		  .dummy)
   SECTION_VECTOR (_DebugInterruptVector_literal,
 		  .DebugInterruptVector.literal,
-		  XCHAL_DEBUG_VECTOR_VADDR - 4,
+		  DEBUG_VECTOR_VADDR - 4,
 		  SIZEOF(.WindowVectors.text),
 		  .WindowVectors.text)
   SECTION_VECTOR (_DebugInterruptVector_text,
 		  .DebugInterruptVector.text,
-		  XCHAL_DEBUG_VECTOR_VADDR,
+		  DEBUG_VECTOR_VADDR,
 		  4,
 		  .DebugInterruptVector.literal)
 #undef LAST
@@ -202,7 +203,7 @@ SECTIONS
 #if XCHAL_EXCM_LEVEL >= 2
   SECTION_VECTOR (_Level2InterruptVector_text,
 		  .Level2InterruptVector.text,
-		  XCHAL_INTLEVEL2_VECTOR_VADDR,
+		  INTLEVEL2_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level2InterruptVector.text
@@ -210,7 +211,7 @@ SECTIONS
 #if XCHAL_EXCM_LEVEL >= 3
   SECTION_VECTOR (_Level3InterruptVector_text,
 		  .Level3InterruptVector.text,
-		  XCHAL_INTLEVEL3_VECTOR_VADDR,
+		  INTLEVEL3_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level3InterruptVector.text
@@ -218,7 +219,7 @@ SECTIONS
 #if XCHAL_EXCM_LEVEL >= 4
   SECTION_VECTOR (_Level4InterruptVector_text,
 		  .Level4InterruptVector.text,
-		  XCHAL_INTLEVEL4_VECTOR_VADDR,
+		  INTLEVEL4_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level4InterruptVector.text
@@ -226,7 +227,7 @@ SECTIONS
 #if XCHAL_EXCM_LEVEL >= 5
   SECTION_VECTOR (_Level5InterruptVector_text,
 		  .Level5InterruptVector.text,
-		  XCHAL_INTLEVEL5_VECTOR_VADDR,
+		  INTLEVEL5_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level5InterruptVector.text
@@ -234,39 +235,39 @@ SECTIONS
 #if XCHAL_EXCM_LEVEL >= 6
   SECTION_VECTOR (_Level6InterruptVector_text,
 		  .Level6InterruptVector.text,
-		  XCHAL_INTLEVEL6_VECTOR_VADDR,
+		  INTLEVEL6_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level6InterruptVector.text
 #endif
   SECTION_VECTOR (_KernelExceptionVector_literal,
 		  .KernelExceptionVector.literal,
-		  XCHAL_KERNEL_VECTOR_VADDR - 4,
+		  KERNEL_VECTOR_VADDR - 4,
 		  SIZEOF(LAST), LAST)
 #undef LAST
   SECTION_VECTOR (_KernelExceptionVector_text,
 		  .KernelExceptionVector.text,
-		  XCHAL_KERNEL_VECTOR_VADDR,
+		  KERNEL_VECTOR_VADDR,
 		  4,
 		  .KernelExceptionVector.literal)
   SECTION_VECTOR (_UserExceptionVector_literal,
 		  .UserExceptionVector.literal,
-		  XCHAL_USER_VECTOR_VADDR - 4,
+		  USER_VECTOR_VADDR - 4,
 		  SIZEOF(.KernelExceptionVector.text),
 		  .KernelExceptionVector.text)
   SECTION_VECTOR (_UserExceptionVector_text,
 		  .UserExceptionVector.text,
-		  XCHAL_USER_VECTOR_VADDR,
+		  USER_VECTOR_VADDR,
 		  4,
 		  .UserExceptionVector.literal)
   SECTION_VECTOR (_DoubleExceptionVector_literal,
 		  .DoubleExceptionVector.literal,
-		  XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
+		  DOUBLEEXC_VECTOR_VADDR - 16,
 		  SIZEOF(.UserExceptionVector.text),
 		  .UserExceptionVector.text)
   SECTION_VECTOR (_DoubleExceptionVector_text,
 		  .DoubleExceptionVector.text,
-		  XCHAL_DOUBLEEXC_VECTOR_VADDR,
+		  DOUBLEEXC_VECTOR_VADDR,
 		  32,
 		  .DoubleExceptionVector.literal)
 
@@ -284,11 +285,26 @@ SECTIONS
   . = ALIGN(0x10);
   .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
 
-  .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+  .ResetVector.text RESET_VECTOR_VADDR :
   {
     *(.ResetVector.text)
   }
 
+
+  /*
+   * This is a remapped copy of the Secondary Reset Vector Code.
+   * It keeps gdb in sync with the PC after switching
+   * to the temporary mapping used while setting up
+   * the V2 MMU mappings for Linux.
+   *
+   * Only debug information about this section is put in the kernel image.
+   */
+  .SecondaryResetVector.remapped_text 0x46000000 (INFO):
+  {
+	*(.SecondaryResetVector.remapped_text)
+  }
+
+
   .xt.lit : { *(.xt.lit) }
   .xt.prop : { *(.xt.prop) }
 
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 0f77f9d..dc16991 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -31,8 +31,8 @@ void __init init_mmu(void)
 	 * the best value to write.  Also, when changing PGSZID<w>
 	 * fields, the corresponding TLB must be flushed.
 	 */
-	set_itlbcfg_register(0);
-	set_dtlbcfg_register(0);
+	set_itlbcfg_register(0x01000000);
+	set_dtlbcfg_register(0x01000000);
 	flush_tlb_all();
 
 	/* Set rasid register to a known value. */
-- 
1.7.7.6



More information about the linux-xtensa mailing list