2005-04-16 22:20:36 +00:00
|
|
|
/* Copyright 2002 Andi Kleen, SuSE Labs */
|
2006-09-26 08:52:32 +00:00
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/dwarf2.h>
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* ISO C memset - set a memory block to a byte value.
|
|
|
|
*
|
|
|
|
* rdi destination
|
|
|
|
* rsi value (char)
|
|
|
|
* rdx count (bytes)
|
|
|
|
*
|
|
|
|
* rax original destination
|
|
|
|
*/
|
2009-12-18 16:16:03 +00:00
|
|
|
.section .altinstr_replacement, "ax", @progbits
|
|
|
|
.Lmemset_c:
|
2006-09-26 08:52:32 +00:00
|
|
|
movq %rdi,%r9
|
|
|
|
movl %edx,%r8d
|
|
|
|
andl $7,%r8d
|
|
|
|
movl %edx,%ecx
|
|
|
|
shrl $3,%ecx
|
|
|
|
/* expand byte value */
|
|
|
|
movzbl %sil,%esi
|
|
|
|
movabs $0x0101010101010101,%rax
|
|
|
|
mulq %rsi /* with rax, clobbers rdx */
|
|
|
|
rep stosq
|
|
|
|
movl %r8d,%ecx
|
|
|
|
rep stosb
|
|
|
|
movq %r9,%rax
|
|
|
|
ret
|
2009-12-18 16:16:03 +00:00
|
|
|
.Lmemset_e:
|
|
|
|
.previous
|
2006-09-26 08:52:32 +00:00
|
|
|
|
|
|
|
ENTRY(memset)
|
|
|
|
ENTRY(__memset)
|
|
|
|
CFI_STARTPROC
|
2006-02-03 20:51:02 +00:00
|
|
|
movq %rdi,%r10
|
|
|
|
movq %rdx,%r11
|
|
|
|
|
|
|
|
/* expand byte value */
|
|
|
|
movzbl %sil,%ecx
|
|
|
|
movabs $0x0101010101010101,%rax
|
|
|
|
mul %rcx /* with rax, clobbers rdx */
|
|
|
|
|
|
|
|
/* align dst */
|
|
|
|
movl %edi,%r9d
|
|
|
|
andl $7,%r9d
|
|
|
|
jnz .Lbad_alignment
|
2006-09-26 08:52:32 +00:00
|
|
|
CFI_REMEMBER_STATE
|
2006-02-03 20:51:02 +00:00
|
|
|
.Lafter_bad_alignment:
|
|
|
|
|
|
|
|
movl %r11d,%ecx
|
|
|
|
shrl $6,%ecx
|
|
|
|
jz .Lhandle_tail
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
.Lloop_64:
|
|
|
|
decl %ecx
|
|
|
|
movq %rax,(%rdi)
|
|
|
|
movq %rax,8(%rdi)
|
|
|
|
movq %rax,16(%rdi)
|
|
|
|
movq %rax,24(%rdi)
|
|
|
|
movq %rax,32(%rdi)
|
|
|
|
movq %rax,40(%rdi)
|
|
|
|
movq %rax,48(%rdi)
|
|
|
|
movq %rax,56(%rdi)
|
|
|
|
leaq 64(%rdi),%rdi
|
|
|
|
jnz .Lloop_64
|
|
|
|
|
|
|
|
/* Handle tail in loops. The loops should be faster than hard
|
|
|
|
to predict jump tables. */
|
|
|
|
.p2align 4
|
|
|
|
.Lhandle_tail:
|
|
|
|
movl %r11d,%ecx
|
|
|
|
andl $63&(~7),%ecx
|
|
|
|
jz .Lhandle_7
|
|
|
|
shrl $3,%ecx
|
|
|
|
.p2align 4
|
|
|
|
.Lloop_8:
|
|
|
|
decl %ecx
|
|
|
|
movq %rax,(%rdi)
|
|
|
|
leaq 8(%rdi),%rdi
|
|
|
|
jnz .Lloop_8
|
|
|
|
|
|
|
|
.Lhandle_7:
|
|
|
|
movl %r11d,%ecx
|
|
|
|
andl $7,%ecx
|
|
|
|
jz .Lende
|
|
|
|
.p2align 4
|
|
|
|
.Lloop_1:
|
|
|
|
decl %ecx
|
|
|
|
movb %al,(%rdi)
|
|
|
|
leaq 1(%rdi),%rdi
|
|
|
|
jnz .Lloop_1
|
|
|
|
|
|
|
|
.Lende:
|
|
|
|
movq %r10,%rax
|
|
|
|
ret
|
|
|
|
|
2006-09-26 08:52:32 +00:00
|
|
|
CFI_RESTORE_STATE
|
2006-02-03 20:51:02 +00:00
|
|
|
.Lbad_alignment:
|
|
|
|
cmpq $7,%r11
|
|
|
|
jbe .Lhandle_7
|
|
|
|
movq %rax,(%rdi) /* unaligned store */
|
|
|
|
movq $8,%r8
|
|
|
|
subq %r9,%r8
|
|
|
|
addq %r8,%rdi
|
|
|
|
subq %r8,%r11
|
|
|
|
jmp .Lafter_bad_alignment
|
2006-09-26 08:52:32 +00:00
|
|
|
.Lfinal:
|
|
|
|
CFI_ENDPROC
|
|
|
|
ENDPROC(memset)
|
|
|
|
ENDPROC(__memset)
|
2006-02-03 20:51:02 +00:00
|
|
|
|
|
|
|
/* Some CPUs run faster using the string instructions.
|
|
|
|
It is also a lot simpler. Use this when possible */
|
|
|
|
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
|
|
|
|
.section .altinstructions,"a"
|
|
|
|
.align 8
|
2006-09-26 08:52:32 +00:00
|
|
|
.quad memset
|
2009-12-18 16:16:03 +00:00
|
|
|
.quad .Lmemset_c
|
2010-06-10 00:10:43 +00:00
|
|
|
.word X86_FEATURE_REP_GOOD
|
2006-09-26 08:52:32 +00:00
|
|
|
.byte .Lfinal - memset
|
2009-12-18 16:16:03 +00:00
|
|
|
.byte .Lmemset_e - .Lmemset_c
|
2006-02-03 20:51:02 +00:00
|
|
|
.previous
|