Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -mtriple arm64-- -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s
---
name:            build_vec_f16
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0

    ; Check that s16 operands are assigned fpr as we don't have 16 bit gpr regs.
    ; CHECK-LABEL: name: build_vec_f16
    ; CHECK: liveins: $w0
    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
    ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
    ; CHECK: [[COPY1:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
    ; CHECK: [[COPY2:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
    ; CHECK: [[COPY3:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
    ; CHECK: [[COPY4:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
    ; CHECK: [[COPY5:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
    ; CHECK: [[COPY6:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
    ; CHECK: [[COPY7:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
    ; CHECK: [[COPY8:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:fpr(<8 x s16>) = G_BUILD_VECTOR [[COPY1]](s16), [[COPY2]](s16), [[COPY3]](s16), [[COPY4]](s16), [[COPY5]](s16), [[COPY6]](s16), [[COPY7]](s16), [[COPY8]](s16)
    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>)
    ; CHECK: RET_ReallyLR implicit $q0
    %0:_(s32) = COPY $w0
    %1:_(s16) = G_TRUNC %0(s32)
    %2:_(<8 x s16>) = G_BUILD_VECTOR %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16)
    $q0 = COPY %2(<8 x s16>)
    RET_ReallyLR implicit $q0

...
---
name:            g_constant_operands_on_gpr
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.1:
    ; Check that we assign GPR to the operands even though they're < 32b in size.
    ; They're all constant, so we can select it via a constant-pool load if needed
    ; and this form is more amenable to selection by patterns (without x-bank copies).
    ; CHECK-LABEL: name: g_constant_operands_on_gpr
    ; CHECK: [[C:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 4
    ; CHECK: [[C1:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 10
    ; CHECK: [[C2:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 3
    ; CHECK: [[C3:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 11
    ; CHECK: [[C4:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 15
    ; CHECK: [[C5:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 44
    ; CHECK: [[C6:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 22
    ; CHECK: [[C7:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 19
    ; CHECK: [[C8:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 55
    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:fpr(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C1]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C]](s8), [[C1]](s8), [[C5]](s8), [[C6]](s8), [[C4]](s8), [[C]](s8), [[C7]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C8]](s8)
    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<16 x s8>)
    ; CHECK: RET_ReallyLR implicit $q0
    %1:_(s8) = G_CONSTANT i8 4
    %2:_(s8) = G_CONSTANT i8 10
    %3:_(s8) = G_CONSTANT i8 3
    %4:_(s8) = G_CONSTANT i8 11
    %5:_(s8) = G_CONSTANT i8 15
    %6:_(s8) = G_CONSTANT i8 44
    %7:_(s8) = G_CONSTANT i8 22
    %8:_(s8) = G_CONSTANT i8 19
    %9:_(s8) = G_CONSTANT i8 55
    %0:_(<16 x s8>) = G_BUILD_VECTOR %1(s8), %2(s8), %3(s8), %4(s8), %5(s8), %1(s8), %2(s8), %6(s8), %7(s8), %5(s8), %1(s8), %8(s8), %3(s8), %4(s8), %5(s8), %9(s8)
    $q0 = COPY %0(<16 x s8>)
    RET_ReallyLR implicit $q0

...
---
name:            fed_by_fp_load
alignment:       4
legalized:       true
tracksRegLiveness: true
liveins:
  - { reg: '$x0' }
  - { reg: '$x1' }
  - { reg: '$x2' }
  - { reg: '$s0' }
frameInfo:
  maxAlignment:    1
body:             |
  bb.1:
    liveins: $s0, $x0, $x1, $x2

    ; CHECK-LABEL: name: fed_by_fp_load
    ; CHECK: liveins: $s0, $x0, $x1, $x2
    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 328
    ; CHECK: [[PTR_ADD:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
    ; CHECK: [[C1:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 344
    ; CHECK: [[PTR_ADD1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
    ; CHECK: [[LOAD:%[0-9]+]]:fpr(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32))
    ; CHECK: [[LOAD1:%[0-9]+]]:fpr(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32))
    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:fpr(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
    ; CHECK: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
    ; CHECK: RET_ReallyLR implicit $d0
    %0:_(p0) = COPY $x0
    %4:_(s64) = G_CONSTANT i64 328
    %5:_(p0) = G_PTR_ADD %0, %4(s64)
    %6:_(s64) = G_CONSTANT i64 344
    %7:_(p0) = G_PTR_ADD %0, %6(s64)
    %15:_(s32) = G_LOAD %5(p0) :: (load (s32))
    %20:_(s32) = G_LOAD %7(p0) :: (load (s32))
    %21:_(<2 x s32>) = G_BUILD_VECTOR %15(s32), %20(s32)
    $d0 = COPY %21(<2 x s32>)
    RET_ReallyLR implicit $d0

...