2008-01-30 12:32:27 +00:00
|
|
|
#
|
|
|
|
# Makefile for the linux kernel.
|
|
|
|
#
|
|
|
|
|
2008-06-02 06:26:23 +00:00
|
|
|
extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinux.lds
|
2008-01-30 12:32:27 +00:00
|
|
|
|
|
|
|
CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
|
2008-02-14 07:38:49 +00:00
|
|
|
|
2008-10-06 23:06:12 +00:00
|
|
|
ifdef CONFIG_FUNCTION_TRACER
|
2008-07-09 13:42:09 +00:00
|
|
|
# Do not profile debug and lowlevel utilities
|
2008-07-17 11:26:50 +00:00
|
|
|
CFLAGS_REMOVE_tsc.o = -pg
|
2008-05-15 01:30:32 +00:00
|
|
|
CFLAGS_REMOVE_rtc.o = -pg
|
2008-07-23 20:28:58 +00:00
|
|
|
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
|
2008-10-23 13:33:08 +00:00
|
|
|
CFLAGS_REMOVE_ftrace.o = -pg
|
2008-11-04 09:42:23 +00:00
|
|
|
CFLAGS_REMOVE_early_printk.o = -pg
|
2008-05-15 01:30:32 +00:00
|
|
|
endif
|
|
|
|
|
2008-02-14 07:38:49 +00:00
|
|
|
#
|
|
|
|
# vsyscalls (which work on the user stack) should have
|
|
|
|
# no stack-protector checks:
|
|
|
|
#
|
|
|
|
nostackp := $(call cc-option, -fno-stack-protector)
|
|
|
|
CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
|
|
|
|
CFLAGS_hpet.o := $(nostackp)
|
2008-07-11 13:09:15 +00:00
|
|
|
CFLAGS_tsc.o := $(nostackp)
|
2008-02-23 06:06:55 +00:00
|
|
|
CFLAGS_paravirt.o := $(nostackp)
|
2009-06-17 23:28:09 +00:00
|
|
|
GCOV_PROFILE_vsyscall_64.o := n
|
|
|
|
GCOV_PROFILE_hpet.o := n
|
2009-07-05 19:08:06 +00:00
|
|
|
GCOV_PROFILE_tsc.o := n
|
|
|
|
GCOV_PROFILE_paravirt.o := n
|
2008-01-30 12:32:27 +00:00
|
|
|
|
2008-11-25 02:24:11 +00:00
|
|
|
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
|
2008-10-16 09:32:24 +00:00
|
|
|
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
2009-08-20 14:47:19 +00:00
|
|
|
obj-y += time.o ioport.o ldt.o dumpstack.o
|
2009-08-19 12:48:38 +00:00
|
|
|
obj-y += setup.o x86_init.o i8259.o irqinit.o
|
2008-07-10 14:21:38 +00:00
|
|
|
obj-$(CONFIG_X86_VISWS) += visws_quirks.o
|
2009-02-17 19:35:16 +00:00
|
|
|
obj-$(CONFIG_X86_32) += probe_roms_32.o
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
|
|
|
|
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
|
2008-06-21 23:25:37 +00:00
|
|
|
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
|
2008-06-17 02:58:28 +00:00
|
|
|
obj-y += bootflag.o e820.o
|
2008-04-08 16:20:43 +00:00
|
|
|
obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
|
2009-06-01 18:13:57 +00:00
|
|
|
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
|
2008-07-01 18:43:34 +00:00
|
|
|
obj-y += tsc.o io_delay.o rtc.o
|
2008-01-30 12:32:27 +00:00
|
|
|
|
2008-03-21 14:23:19 +00:00
|
|
|
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
|
2008-03-10 22:28:04 +00:00
|
|
|
obj-y += process.o
|
2008-07-29 17:29:19 +00:00
|
|
|
obj-y += i387.o xsave.o
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-y += ptrace.o
|
|
|
|
obj-$(CONFIG_X86_32) += tls.o
|
|
|
|
obj-$(CONFIG_IA32_EMULATION) += tls.o
|
|
|
|
obj-y += step.o
|
x86, intel_txt: Intel TXT boot support
This patch adds kernel configuration and boot support for Intel Trusted
Execution Technology (Intel TXT).
Intel's technology for safer computing, Intel Trusted Execution
Technology (Intel TXT), defines platform-level enhancements that
provide the building blocks for creating trusted platforms.
Intel TXT was formerly known by the code name LaGrande Technology (LT).
Intel TXT in Brief:
o Provides dynamic root of trust for measurement (DRTM)
o Data protection in case of improper shutdown
o Measurement and verification of launched environment
Intel TXT is part of the vPro(TM) brand and is also available some
non-vPro systems. It is currently available on desktop systems based on
the Q35, X38, Q45, and Q43 Express chipsets (e.g. Dell Optiplex 755, HP
dc7800, etc.) and mobile systems based on the GM45, PM45, and GS45
Express chipsets.
For more information, see http://www.intel.com/technology/security/.
This site also has a link to the Intel TXT MLE Developers Manual, which
has been updated for the new released platforms.
A much more complete description of how these patches support TXT, how to
configure a system for it, etc. is in the Documentation/intel_txt.txt file
in this patch.
This patch provides the TXT support routines for complete functionality,
documentation for TXT support and for the changes to the boot_params structure,
and boot detection of a TXT launch. Attempts to shutdown (reboot, Sx) the system
will result in platform resets; subsequent patches will support these shutdown modes
properly.
Documentation/intel_txt.txt | 210 +++++++++++++++++++++
Documentation/x86/zero-page.txt | 1
arch/x86/include/asm/bootparam.h | 3
arch/x86/include/asm/fixmap.h | 3
arch/x86/include/asm/tboot.h | 197 ++++++++++++++++++++
arch/x86/kernel/Makefile | 1
arch/x86/kernel/setup.c | 4
arch/x86/kernel/tboot.c | 379 +++++++++++++++++++++++++++++++++++++++
security/Kconfig | 30 +++
9 files changed, 827 insertions(+), 1 deletion(-)
Signed-off-by: Joseph Cihula <joseph.cihula@intel.com>
Signed-off-by: Shane Wang <shane.wang@intel.com>
Signed-off-by: Gang Wei <gang.wei@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-07-01 02:30:59 +00:00
|
|
|
obj-$(CONFIG_INTEL_TXT) += tboot.o
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
|
|
|
obj-y += cpu/
|
|
|
|
obj-y += acpi/
|
2009-08-14 19:23:29 +00:00
|
|
|
obj-$(CONFIG_SFI) += sfi.o
|
2009-01-27 16:17:55 +00:00
|
|
|
obj-y += reboot.o
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-$(CONFIG_MCA) += mca_32.o
|
|
|
|
obj-$(CONFIG_X86_MSR) += msr.o
|
|
|
|
obj-$(CONFIG_X86_CPUID) += cpuid.o
|
|
|
|
obj-$(CONFIG_PCI) += early-quirks.o
|
2008-02-04 15:47:55 +00:00
|
|
|
apm-y := apm_32.o
|
|
|
|
obj-$(CONFIG_APM) += apm.o
|
2009-01-27 16:07:08 +00:00
|
|
|
obj-$(CONFIG_SMP) += smp.o
|
2009-02-17 17:09:24 +00:00
|
|
|
obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o
|
2009-01-27 03:56:47 +00:00
|
|
|
obj-$(CONFIG_SMP) += setup_percpu.o
|
2009-01-27 03:56:48 +00:00
|
|
|
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
|
2008-04-04 19:43:18 +00:00
|
|
|
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
|
2009-02-17 22:12:48 +00:00
|
|
|
obj-y += apic/
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:20:42 +00:00
|
|
|
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
2009-03-13 16:02:17 +00:00
|
|
|
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
|
|
|
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
|
|
|
|
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
|
|
|
|
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
|
|
|
|
obj-$(CONFIG_KPROBES) += kprobes.o
|
2009-06-04 01:46:19 +00:00
|
|
|
obj-$(CONFIG_MODULES) += module.o
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
|
|
|
|
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
|
2008-04-17 18:05:37 +00:00
|
|
|
obj-$(CONFIG_KGDB) += kgdb.o
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-$(CONFIG_VM86) += vm86_32.o
|
|
|
|
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
|
|
|
|
|
|
|
obj-$(CONFIG_HPET_TIMER) += hpet.o
|
2009-09-02 14:37:17 +00:00
|
|
|
obj-$(CONFIG_APB_TIMER) += apb_timer.o
|
2007-10-17 20:06:30 +00:00
|
|
|
|
2008-01-30 12:32:27 +00:00
|
|
|
obj-$(CONFIG_K8_NB) += k8.o
|
2008-01-30 12:34:09 +00:00
|
|
|
obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
|
|
|
|
obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
|
2008-01-30 12:32:27 +00:00
|
|
|
|
|
|
|
obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
|
2008-02-22 17:21:36 +00:00
|
|
|
obj-$(CONFIG_KVM_GUEST) += kvm.o
|
2008-02-15 19:52:48 +00:00
|
|
|
obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
|
x86: Fix performance regression caused by paravirt_ops on native kernels
Xiaohui Xin and some other folks at Intel have been looking into what's
behind the performance hit of paravirt_ops when running native.
It appears that the hit is entirely due to the paravirtualized
spinlocks introduced by:
| commit 8efcbab674de2bee45a2e4cdf97de16b8e609ac8
| Date: Mon Jul 7 12:07:51 2008 -0700
|
| paravirt: introduce a "lock-byte" spinlock implementation
The extra call/return in the spinlock path is somehow
causing an increase in the cycles/instruction of somewhere around 2-7%
(seems to vary quite a lot from test to test). The working theory is
that the CPU's pipeline is getting upset about the
call->call->locked-op->return->return, and seems to be failing to
speculate (though I haven't seen anything definitive about the precise
reasons). This doesn't entirely make sense, because the performance
hit is also visible on unlock and other operations which don't involve
locked instructions. But spinlock operations clearly swamp all the
other pvops operations, even though I can't imagine that they're
nearly as common (there's only a .05% increase in instructions
executed).
If I disable just the pv-spinlock calls, my tests show that pvops is
identical to non-pvops performance on native (my measurements show that
it is actually about .1% faster, but Xiaohui shows a .05% slowdown).
Summary of results, averaging 10 runs of the "mmperf" test, using a
no-pvops build as baseline:
nopv Pv-nospin Pv-spin
CPU cycles 100.00% 99.89% 102.18%
instructions 100.00% 100.10% 100.15%
CPI 100.00% 99.79% 102.03%
cache ref 100.00% 100.84% 100.28%
cache miss 100.00% 90.47% 88.56%
cache miss rate 100.00% 89.72% 88.31%
branches 100.00% 99.93% 100.04%
branch miss 100.00% 103.66% 107.72%
branch miss rt 100.00% 103.73% 107.67%
wallclock 100.00% 99.90% 102.20%
The clear effect here is that the 2% increase in CPI is
directly reflected in the final wallclock time.
(The other interesting effect is that the more ops are
out of line calls via pvops, the lower the cache access
and miss rates. Not too surprising, but it suggests that
the non-pvops kernel is over-inlined. On the flipside,
the branch misses go up correspondingly...)
So, what's the fix?
Paravirt patching turns all the pvops calls into direct calls, so
_spin_lock etc do end up having direct calls. For example, the compiler
generated code for paravirtualized _spin_lock is:
<_spin_lock+0>: mov %gs:0xb4c8,%rax
<_spin_lock+9>: incl 0xffffffffffffe044(%rax)
<_spin_lock+15>: callq *0xffffffff805a5b30
<_spin_lock+22>: retq
The indirect call will get patched to:
<_spin_lock+0>: mov %gs:0xb4c8,%rax
<_spin_lock+9>: incl 0xffffffffffffe044(%rax)
<_spin_lock+15>: callq <__ticket_spin_lock>
<_spin_lock+20>: nop; nop /* or whatever 2-byte nop */
<_spin_lock+22>: retq
One possibility is to inline _spin_lock, etc, when building an
optimised kernel (ie, when there's no spinlock/preempt
instrumentation/debugging enabled). That will remove the outer
call/return pair, returning the instruction stream to a single
call/return, which will presumably execute the same as the non-pvops
case. The downsides arel 1) it will replicate the
preempt_disable/enable code at eack lock/unlock callsite; this code is
fairly small, but not nothing; and 2) the spinlock definitions are
already a very heavily tangled mass of #ifdefs and other preprocessor
magic, and making any changes will be non-trivial.
The other obvious answer is to disable pv-spinlocks. Making them a
separate config option is fairly easy, and it would be trivial to
enable them only when Xen is enabled (as the only non-default user).
But it doesn't really address the common case of a distro build which
is going to have Xen support enabled, and leaves the open question of
whether the native performance cost of pv-spinlocks is worth the
performance improvement on a loaded Xen system (10% saving of overall
system CPU when guests block rather than spin). Still it is a
reasonable short-term workaround.
[ Impact: fix pvops performance regression when running native ]
Analysed-by: "Xin Xiaohui" <xiaohui.xin@intel.com>
Analysed-by: "Li Xin" <xin.li@intel.com>
Analysed-by: "Nakajima Jun" <jun.nakajima@intel.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Xen-devel <xen-devel@lists.xensource.com>
LKML-Reference: <4A0B62F7.5030802@goop.org>
[ fixed the help text ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-14 00:16:55 +00:00
|
|
|
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
|
|
|
|
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
|
2008-06-03 14:17:29 +00:00
|
|
|
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
|
2008-01-30 12:33:18 +00:00
|
|
|
|
2008-05-07 10:39:56 +00:00
|
|
|
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
|
2008-01-30 12:32:27 +00:00
|
|
|
|
2008-02-04 15:47:55 +00:00
|
|
|
obj-$(CONFIG_SCx200) += scx200.o
|
|
|
|
scx200-y += scx200_32.o
|
2008-01-30 12:32:27 +00:00
|
|
|
|
2008-04-29 07:59:53 +00:00
|
|
|
obj-$(CONFIG_OLPC) += olpc.o
|
2009-08-29 12:54:20 +00:00
|
|
|
obj-$(CONFIG_X86_MRST) += mrst.o
|
2008-04-29 07:59:53 +00:00
|
|
|
|
2008-09-23 10:08:44 +00:00
|
|
|
microcode-y := microcode_core.o
|
|
|
|
microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
|
|
|
|
microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o
|
|
|
|
obj-$(CONFIG_MICROCODE) += microcode.o
|
|
|
|
|
2008-10-05 19:21:32 +00:00
|
|
|
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
|
|
|
|
|
2009-01-23 10:56:16 +00:00
|
|
|
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
|
2008-12-16 20:17:36 +00:00
|
|
|
|
2008-01-30 12:32:27 +00:00
|
|
|
###
|
|
|
|
# 64 bit specific files
|
|
|
|
ifeq ($(CONFIG_X86_64),y)
|
2009-03-04 18:59:18 +00:00
|
|
|
obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
|
2009-02-17 17:09:24 +00:00
|
|
|
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
|
|
|
|
obj-$(CONFIG_AUDIT) += audit_64.o
|
|
|
|
|
|
|
|
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
|
|
|
|
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
|
|
|
|
obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
|
|
|
|
|
|
|
|
obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
|
2009-03-24 06:14:29 +00:00
|
|
|
obj-y += vsmp_64.o
|
2008-01-30 12:32:27 +00:00
|
|
|
endif
|