Compiler projects using llvm
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-apple-darwin10.2 < %s | FileCheck %s -check-prefix=X64
; RUN: llc -mtriple=i686-apple-darwin10.2 -fixup-byte-word-insts=1 < %s | FileCheck %s -check-prefixes=X86,X86-BWON
; RUN: llc -mtriple=i686-apple-darwin10.2 -fixup-byte-word-insts=0 < %s | FileCheck %s -check-prefixes=X86,X86-BWOFF

target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"

; rdar://7860110

define void @test1(ptr nocapture %a0, i8 zeroext %a1) nounwind ssp {
; X64-LABEL: test1:
; X64:       ## %bb.0: ## %entry
; X64-NEXT:    movb %sil, (%rdi)
; X64-NEXT:    retq
;
; X86-BWON-LABEL: test1:
; X86-BWON:       ## %bb.0: ## %entry
; X86-BWON-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWON-NEXT:    movb %al, (%ecx)
; X86-BWON-NEXT:    retl
;
; X86-BWOFF-LABEL: test1:
; X86-BWOFF:       ## %bb.0: ## %entry
; X86-BWOFF-NEXT:    movb {{[0-9]+}}(%esp), %al
; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWOFF-NEXT:    movb %al, (%ecx)
; X86-BWOFF-NEXT:    retl
entry:
  %A = load i32, ptr %a0, align 4
  %B = and i32 %A, -256     ; 0xFFFFFF00
  %C = zext i8 %a1 to i32
  %D = or i32 %C, %B
  store i32 %D, ptr %a0, align 4
  ret void
}

define void @test2(ptr nocapture %a0, i8 zeroext %a1) nounwind ssp {
; X64-LABEL: test2:
; X64:       ## %bb.0: ## %entry
; X64-NEXT:    movb %sil, 1(%rdi)
; X64-NEXT:    retq
;
; X86-BWON-LABEL: test2:
; X86-BWON:       ## %bb.0: ## %entry
; X86-BWON-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWON-NEXT:    movb %al, 1(%ecx)
; X86-BWON-NEXT:    retl
;
; X86-BWOFF-LABEL: test2:
; X86-BWOFF:       ## %bb.0: ## %entry
; X86-BWOFF-NEXT:    movb {{[0-9]+}}(%esp), %al
; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWOFF-NEXT:    movb %al, 1(%ecx)
; X86-BWOFF-NEXT:    retl
entry:
  %A = load i32, ptr %a0, align 4
  %B = and i32 %A, -65281    ; 0xFFFF00FF
  %C = zext i8 %a1 to i32
  %CS = shl i32 %C, 8
  %D = or i32 %B, %CS
  store i32 %D, ptr %a0, align 4
  ret void
}

define void @test3(ptr nocapture %a0, i16 zeroext %a1) nounwind ssp {
; X64-LABEL: test3:
; X64:       ## %bb.0: ## %entry
; X64-NEXT:    movw %si, (%rdi)
; X64-NEXT:    retq
;
; X86-BWON-LABEL: test3:
; X86-BWON:       ## %bb.0: ## %entry
; X86-BWON-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWON-NEXT:    movw %ax, (%ecx)
; X86-BWON-NEXT:    retl
;
; X86-BWOFF-LABEL: test3:
; X86-BWOFF:       ## %bb.0: ## %entry
; X86-BWOFF-NEXT:    movw {{[0-9]+}}(%esp), %ax
; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWOFF-NEXT:    movw %ax, (%ecx)
; X86-BWOFF-NEXT:    retl
entry:
  %A = load i32, ptr %a0, align 4
  %B = and i32 %A, -65536    ; 0xFFFF0000
  %C = zext i16 %a1 to i32
  %D = or i32 %B, %C
  store i32 %D, ptr %a0, align 4
  ret void
}

define void @test4(ptr nocapture %a0, i16 zeroext %a1) nounwind ssp {
; X64-LABEL: test4:
; X64:       ## %bb.0: ## %entry
; X64-NEXT:    movw %si, 2(%rdi)
; X64-NEXT:    retq
;
; X86-BWON-LABEL: test4:
; X86-BWON:       ## %bb.0: ## %entry
; X86-BWON-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWON-NEXT:    movw %ax, 2(%ecx)
; X86-BWON-NEXT:    retl
;
; X86-BWOFF-LABEL: test4:
; X86-BWOFF:       ## %bb.0: ## %entry
; X86-BWOFF-NEXT:    movw {{[0-9]+}}(%esp), %ax
; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWOFF-NEXT:    movw %ax, 2(%ecx)
; X86-BWOFF-NEXT:    retl
entry:
  %A = load i32, ptr %a0, align 4
  %B = and i32 %A, 65535    ; 0x0000FFFF
  %C = zext i16 %a1 to i32
  %CS = shl i32 %C, 16
  %D = or i32 %B, %CS
  store i32 %D, ptr %a0, align 4
  ret void
}

define void @test5(ptr nocapture %a0, i16 zeroext %a1) nounwind ssp {
; X64-LABEL: test5:
; X64:       ## %bb.0: ## %entry
; X64-NEXT:    movw %si, 2(%rdi)
; X64-NEXT:    retq
;
; X86-BWON-LABEL: test5:
; X86-BWON:       ## %bb.0: ## %entry
; X86-BWON-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWON-NEXT:    movw %ax, 2(%ecx)
; X86-BWON-NEXT:    retl
;
; X86-BWOFF-LABEL: test5:
; X86-BWOFF:       ## %bb.0: ## %entry
; X86-BWOFF-NEXT:    movw {{[0-9]+}}(%esp), %ax
; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWOFF-NEXT:    movw %ax, 2(%ecx)
; X86-BWOFF-NEXT:    retl
entry:
  %A = load i64, ptr %a0, align 4
  %B = and i64 %A, -4294901761    ; 0xFFFFFFFF0000FFFF
  %C = zext i16 %a1 to i64
  %CS = shl i64 %C, 16
  %D = or i64 %B, %CS
  store i64 %D, ptr %a0, align 4
  ret void
}

define void @test6(ptr nocapture %a0, i8 zeroext %a1) nounwind ssp {
; X64-LABEL: test6:
; X64:       ## %bb.0: ## %entry
; X64-NEXT:    movb %sil, 5(%rdi)
; X64-NEXT:    retq
;
; X86-BWON-LABEL: test6:
; X86-BWON:       ## %bb.0: ## %entry
; X86-BWON-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWON-NEXT:    movb %al, 5(%ecx)
; X86-BWON-NEXT:    retl
;
; X86-BWOFF-LABEL: test6:
; X86-BWOFF:       ## %bb.0: ## %entry
; X86-BWOFF-NEXT:    movb {{[0-9]+}}(%esp), %al
; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-BWOFF-NEXT:    movb %al, 5(%ecx)
; X86-BWOFF-NEXT:    retl
entry:
  %A = load i64, ptr %a0, align 4
  %B = and i64 %A, -280375465082881    ; 0xFFFF00FFFFFFFFFF
  %C = zext i8 %a1 to i64
  %CS = shl i64 %C, 40
  %D = or i64 %B, %CS
  store i64 %D, ptr %a0, align 4
  ret void
}

define i32 @test7(ptr nocapture %a0, i8 zeroext %a1, ptr %P2) nounwind {
; X64-LABEL: test7:
; X64:       ## %bb.0: ## %entry
; X64-NEXT:    movl (%rdx), %eax
; X64-NEXT:    movb %sil, 5(%rdi)
; X64-NEXT:    retq
;
; X86-BWON-LABEL: test7:
; X86-BWON:       ## %bb.0: ## %entry
; X86-BWON-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edx
; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-BWON-NEXT:    movl (%eax), %eax
; X86-BWON-NEXT:    movb %cl, 5(%edx)
; X86-BWON-NEXT:    retl
;
; X86-BWOFF-LABEL: test7:
; X86-BWOFF:       ## %bb.0: ## %entry
; X86-BWOFF-NEXT:    movb {{[0-9]+}}(%esp), %cl
; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edx
; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-BWOFF-NEXT:    movl (%eax), %eax
; X86-BWOFF-NEXT:    movb %cl, 5(%edx)
; X86-BWOFF-NEXT:    retl
entry:
  %OtherLoad = load i32 , ptr%P2
  %A = load i64, ptr %a0, align 4
  %B = and i64 %A, -280375465082881    ; 0xFFFF00FFFFFFFFFF
  %C = zext i8 %a1 to i64
  %CS = shl i64 %C, 40
  %D = or i64 %B, %CS
  store i64 %D, ptr %a0, align 4
  ret i32 %OtherLoad
}

; PR7833

@g_16 = internal global i32 -1

define void @test8() nounwind {
; X64-LABEL: test8:
; X64:       ## %bb.0:
; X64-NEXT:    orb $1, _g_16(%rip)
; X64-NEXT:    retq
;
; X86-LABEL: test8:
; X86:       ## %bb.0:
; X86-NEXT:    orb $1, _g_16
; X86-NEXT:    retl
  %tmp = load i32, ptr @g_16
  store i32 0, ptr @g_16
  %or = or i32 %tmp, 1
  store i32 %or, ptr @g_16
  ret void
}

define void @test9() nounwind {
; X64-LABEL: test9:
; X64:       ## %bb.0:
; X64-NEXT:    orb $1, _g_16(%rip)
; X64-NEXT:    retq
;
; X86-LABEL: test9:
; X86:       ## %bb.0:
; X86-NEXT:    orb $1, _g_16
; X86-NEXT:    retl
  %tmp = load i32, ptr @g_16
  %or = or i32 %tmp, 1
  store i32 %or, ptr @g_16
  ret void
}

; rdar://8494845 + PR8244
define i8 @test10(ptr %P) nounwind ssp {
; X64-LABEL: test10:
; X64:       ## %bb.0: ## %entry
; X64-NEXT:    movsbl (%rdi), %eax
; X64-NEXT:    shrl $8, %eax
; X64-NEXT:    ## kill: def $al killed $al killed $eax
; X64-NEXT:    retq
;
; X86-LABEL: test10:
; X86:       ## %bb.0: ## %entry
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movsbl (%eax), %eax
; X86-NEXT:    movb %ah, %al
; X86-NEXT:    retl
entry:
  %tmp = load i8, ptr %P, align 1
  %conv = sext i8 %tmp to i32
  %shr3 = lshr i32 %conv, 8
  %conv2 = trunc i32 %shr3 to i8
  ret i8 %conv2
}