; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s define ptr @test_memcpy1_generic(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy1_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %P, ptr align 4 %Q, i32 1024, i32 1) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memcpy2_generic(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy2_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_2@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %P, ptr align 4 %Q, i32 1024, i32 2) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memcpy4_generic(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy4_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %P, ptr align 4 %Q, i32 1024, i32 4) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memcpy8(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy8: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_8@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %P, ptr align 8 %Q, i32 1024, i32 8) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memcpy16_generic(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy16_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_16@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %P, ptr align 16 %Q, i32 1024, i32 16) ret ptr %P ; 3rd arg (%edx) -- length } define void @test_memcpy_args(ptr %Storage) { ; CHECK-LABEL: test_memcpy_args: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movq (%rdi), %rax ; CHECK-NEXT: movq 8(%rdi), %rsi ; CHECK-NEXT: movq %rax, %rdi ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_4@PLT ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %Dst = load ptr, ptr %Storage %Src.addr = getelementptr ptr, ptr %Storage, i64 1 %Src = load ptr, ptr %Src.addr ; 1st arg (%rdi) ; 2nd arg (%rsi) ; 3rd arg (%edx) -- length call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %Dst, ptr align 4 %Src, i32 1024, i32 4) ret void } define ptr @test_memmove1_generic(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove1_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %P, ptr align 4 %Q, i32 1024, i32 1) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memmove2_generic(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove2_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_2@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %P, ptr align 4 %Q, i32 1024, i32 2) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memmove4_generic(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove4_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %P, ptr align 4 %Q, i32 1024, i32 4) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memmove8_generic(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove8_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_8@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 8 %P, ptr align 8 %Q, i32 1024, i32 8) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memmove16_generic(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove16_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_16@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %P, ptr align 16 %Q, i32 1024, i32 16) ret ptr %P ; 3rd arg (%edx) -- length } define void @test_memmove_args(ptr %Storage) { ; CHECK-LABEL: test_memmove_args: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movq (%rdi), %rax ; CHECK-NEXT: movq 8(%rdi), %rsi ; CHECK-NEXT: movq %rax, %rdi ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_4@PLT ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %Dst = load ptr, ptr %Storage %Src.addr = getelementptr ptr, ptr %Storage, i64 1 %Src = load ptr, ptr %Src.addr ; 1st arg (%rdi) ; 2nd arg (%rsi) ; 3rd arg (%edx) -- length call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %Dst, ptr align 4 %Src, i32 1024, i32 4) ret void } define ptr @test_memset1_generic(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset1_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %P, i8 %V, i32 1024, i32 1) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memset2_generic(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset2_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_2@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 2 %P, i8 %V, i32 1024, i32 2) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memset4_generic(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %P, i8 %V, i32 1024, i32 4) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memset8_generic(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset8_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_8@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 8 %P, i8 %V, i32 1024, i32 8) ret ptr %P ; 3rd arg (%edx) -- length } define ptr @test_memset16_generic(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset16_generic: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_16@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %P, i8 %V, i32 1024, i32 16) ret ptr %P ; 3rd arg (%edx) -- length } define void @test_memset_args(ptr %Storage, ptr %V) { ; CHECK-LABEL: test_memset_args: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movq (%rdi), %rdi ; CHECK-NEXT: movzbl (%rsi), %esi ; CHECK-NEXT: movl $1024, %edx # imm = 0x400 ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %Dst = load ptr, ptr %Storage %Val = load i8, ptr %V ; 1st arg (%rdi) ; 2nd arg (%rsi) ; 3rd arg (%edx) -- length call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %Dst, i8 %Val, i32 1024, i32 4) ret void } ;; Next batch of tests are cases where we could profitably lower to ;; atomic loads and stores directly, just as we do for non-atomic ones for ;; non element.unorderered.atomic variants. define ptr @test_memcpy1_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy1_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 %P, ptr align 1 %Q, i32 64, i32 1) ret ptr %P } ; Ensure align 16 generates vector load/stores even with small element size define ptr @test_memcpy1_64_align4(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy1_64_align4: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %P, ptr align 4 %Q, i32 64, i32 1) ret ptr %P } define ptr @test_memcpy1_64_align8(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy1_64_align8: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %P, ptr align 8 %Q, i32 64, i32 1) ret ptr %P } define ptr @test_memcpy1_64_align16(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy1_64_align16: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %P, ptr align 16 %Q, i32 64, i32 1) ret ptr %P } ; Make sure that different source & dest alignments are handled correctly. define ptr @test_memcpy1_64_diff_aligns(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy1_64_diff_aligns: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 %P, ptr align 16 %Q, i32 64, i32 1) ret ptr %P } define ptr @test_memcpy2_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy2_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_2@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 2 %P, ptr align 2 %Q, i32 64, i32 2) ret ptr %P } define ptr @test_memcpy4_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy4_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %P, ptr align 4 %Q, i32 64, i32 4) ret ptr %P } define ptr @test_memcpy8_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy8_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_8@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %P, ptr align 8 %Q, i32 64, i32 8) ret ptr %P } define ptr @test_memcpy16_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memcpy16_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_16@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %P, ptr align 16 %Q, i32 64, i32 16) ret ptr %P } ; ================================== define ptr @test_memmove1_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove1_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %P, ptr align 1 %Q, i32 64, i32 1) ret ptr %P } ; Ensure align 16 generates vector load/stores even with small element size define ptr @test_memmove1_64_align16(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove1_64_align16: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %P, ptr align 16 %Q, i32 64, i32 1) ret ptr %P } ; Make sure that different source & dest alignments are handled correctly. define ptr @test_memmove1_64_diff_aligns(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove1_64_diff_aligns: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %P, ptr align 16 %Q, i32 64, i32 1) ret ptr %P } define ptr @test_memmove2_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove2_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_2@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 2 %P, ptr align 2 %Q, i32 64, i32 2) ret ptr %P } define ptr @test_memmove4_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove4_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %P, ptr align 4 %Q, i32 64, i32 4) ret ptr %P } define ptr @test_memmove8_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove8_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_8@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 8 %P, ptr align 8 %Q, i32 64, i32 8) ret ptr %P } define ptr @test_memmove16_64(ptr %P, ptr %Q) { ; CHECK-LABEL: test_memmove16_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_16@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %P, ptr align 16 %Q, i32 64, i32 16) ret ptr %P } ; ============================== define ptr @test_memset1_64(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset1_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %P, i8 %V, i32 64, i32 1) ret ptr %P } define ptr @test_memset1_64_align16(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset1_64_align16: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_1@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %P, i8 %V, i32 64, i32 1) ret ptr %P } define ptr @test_memset2_64(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset2_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_2@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 2 %P, i8 %V, i32 64, i32 2) ret ptr %P } ;; Use the memset4 case to explore alignment and sizing requirements in the ;; lowering define ptr @test_memset4_64(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %P, i8 %V, i32 64, i32 4) ret ptr %P } define ptr @test_memset4_64_align8(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_64_align8: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 8 %P, i8 %V, i32 64, i32 4) ret ptr %P } define ptr @test_memset4_64_align16(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_64_align16: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %P, i8 %V, i32 64, i32 4) ret ptr %P } define ptr @test_memset4_64_align64(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_64_align64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 64 %P, i8 %V, i32 64, i32 4) ret ptr %P } define ptr @test_memset4_4(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_4: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $4, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %P, i8 %V, i32 4, i32 4) ret ptr %P } define ptr @test_memset4_8(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_8: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $8, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %P, i8 %V, i32 8, i32 4) ret ptr %P } define ptr @test_memset4_8_align8(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_8_align8: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $8, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 8 %P, i8 %V, i32 8, i32 4) ret ptr %P } define ptr @test_memset4_12(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_12: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $12, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %P, i8 %V, i32 12, i32 4) ret ptr %P } define ptr @test_memset4_16(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_16: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $16, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %P, i8 %V, i32 16, i32 4) ret ptr %P } define ptr @test_memset4_16_align16(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_16_align16: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $16, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %P, i8 %V, i32 16, i32 4) ret ptr %P } define ptr @test_memset4_60(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset4_60: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $60, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %P, i8 %V, i32 60, i32 4) ret ptr %P } define ptr @test_memset8_64(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset8_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_8@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 8 %P, i8 %V, i32 64, i32 8) ret ptr %P } define ptr @test_memset16_64(ptr %P, i8 %V) { ; CHECK-LABEL: test_memset16_64: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_16@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %P, i8 %V, i32 64, i32 16) ret ptr %P } define ptr @test_memset16_64_zero(ptr %P) { ; CHECK-LABEL: test_memset16_64_zero: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: xorl %esi, %esi ; CHECK-NEXT: movl $64, %edx ; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_16@PLT ; CHECK-NEXT: movq %rbx, %rax ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %P, i8 0, i32 64, i32 16) ret ptr %P } declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i32) nounwind declare void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i32) nounwind declare void @llvm.memset.element.unordered.atomic.p0.i32(ptr nocapture, i8, i32, i32) nounwind