Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64-unknown-unknown -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s
#
# Verify folding operations into G_ICMP.
#
# E.g cmn/adds folding:
#
# x = G_SUB 0, y
# G_ICMP intpred(something_safe) z, x
#
# Folds to:
# adds z, y
#
# Where "something_safe" is ne or eq.
#
# ands/tst folding:
#
# z = G_AND x, y
# G_ICMP z, 0
#
# Folds to:
#
# tst x, y
#
# When we have signed comparisons.
#
# Tests whose names start with cmn_ should use ADDS for the G_ICMP. Tests whose
# names start with no_cmn should use SUBS. Similarly, tests whose names start
# with TST should use ANDS for the G_ICMP.
#

...
---
name:            cmn_s32_rhs
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1

    ; CHECK-LABEL: name: cmn_s32_rhs
    ; CHECK: liveins: $w0, $w1
    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
    ; CHECK: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 0, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $w0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_CONSTANT i32 0
    %6:gpr(s32) = G_CONSTANT i32 1
    %3:gpr(s32) = G_SUB %2, %1
    %7:gpr(s32) = G_ICMP intpred(ne), %0(s32), %3
    %5:gpr(s32) = G_SELECT %7, %6, %2
    $w0 = COPY %5(s32)
    RET_ReallyLR implicit $w0

...
---
name:            cmn_s32_lhs
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1

    ; CHECK-LABEL: name: cmn_s32_lhs
    ; CHECK: liveins: $w0, $w1
    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
    ; CHECK: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 0, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $w0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_CONSTANT i32 0
    %6:gpr(s32) = G_CONSTANT i32 1
    %3:gpr(s32) = G_SUB %2, %0
    %7:gpr(s32) = G_ICMP intpred(ne), %3(s32), %1
    %5:gpr(s32) = G_SELECT %7, %6, %2
    $w0 = COPY %5(s32)
    RET_ReallyLR implicit $w0

...
---
name:            no_cmn_s32_rhs
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1

    ; CHECK-LABEL: name: no_cmn_s32_rhs
    ; CHECK: liveins: $w0, $w1
    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
    ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY1]], implicit-def $nzcv
    ; CHECK: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[SUBSWrr]], implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 10, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $w0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_CONSTANT i32 0
    %6:gpr(s32) = G_CONSTANT i32 1
    %3:gpr(s32) = G_SUB %2, %1
    %7:gpr(s32) = G_ICMP intpred(slt), %0(s32), %3
    %5:gpr(s32) = G_SELECT %7, %6, %2
    $w0 = COPY %5(s32)
    RET_ReallyLR implicit $w0

...
---
name:            no_cmn_s32_lhs
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1

    ; CHECK-LABEL: name: no_cmn_s32_lhs
    ; CHECK: liveins: $w0, $w1
    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
    ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY]], implicit-def $nzcv
    ; CHECK: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[SUBSWrr]], [[COPY1]], implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 10, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $w0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_CONSTANT i32 0
    %6:gpr(s32) = G_CONSTANT i32 1
    %3:gpr(s32) = G_SUB %2, %0
    %7:gpr(s32) = G_ICMP intpred(slt), %3(s32), %1
    %5:gpr(s32) = G_SELECT %7, %6, %2
    $w0 = COPY %5(s32)
    RET_ReallyLR implicit $w0

...
---
name:            cmn_s64_rhs
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1

    ; CHECK-LABEL: name: cmn_s64_rhs
    ; CHECK: liveins: $x0, $x1
    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
    ; CHECK: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 0, implicit $nzcv
    ; CHECK: $x0 = COPY [[CSINCXr]]
    ; CHECK: RET_ReallyLR implicit $x0
    %0:gpr(s64) = COPY $x0
    %1:gpr(s64) = COPY $x1
    %2:gpr(s64) = G_CONSTANT i64 0
    %6:gpr(s64) = G_CONSTANT i64 1
    %3:gpr(s64) = G_SUB %2, %1
    %7:gpr(s32) = G_ICMP intpred(ne), %0(s64), %3
    %5:gpr(s64) = G_SELECT %7, %6, %2
    $x0 = COPY %5(s64)
    RET_ReallyLR implicit $x0

...
---
name:            cmn_s64_lhs
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1

    ; CHECK-LABEL: name: cmn_s64_lhs
    ; CHECK: liveins: $x0, $x1
    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
    ; CHECK: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 0, implicit $nzcv
    ; CHECK: $x0 = COPY [[CSINCXr]]
    ; CHECK: RET_ReallyLR implicit $x0
    %0:gpr(s64) = COPY $x0
    %1:gpr(s64) = COPY $x1
    %2:gpr(s64) = G_CONSTANT i64 0
    %6:gpr(s64) = G_CONSTANT i64 1
    %3:gpr(s64) = G_SUB %2, %0
    %7:gpr(s32) = G_ICMP intpred(ne), %3(s64), %1
    %5:gpr(s64) = G_SELECT %7, %6, %2
    $x0 = COPY %5(s64)
    RET_ReallyLR implicit $x0

...
---
name:            no_cmn_s64_rhs
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1

    ; CHECK-LABEL: name: no_cmn_s64_rhs
    ; CHECK: liveins: $x0, $x1
    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY1]], implicit-def $nzcv
    ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[SUBSXrr]], implicit-def $nzcv
    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 10, implicit $nzcv
    ; CHECK: $x0 = COPY [[CSINCXr]]
    ; CHECK: RET_ReallyLR implicit $x0
    %0:gpr(s64) = COPY $x0
    %1:gpr(s64) = COPY $x1
    %2:gpr(s64) = G_CONSTANT i64 0
    %6:gpr(s64) = G_CONSTANT i64 1
    %3:gpr(s64) = G_SUB %2, %1
    %7:gpr(s32) = G_ICMP intpred(slt), %0(s64), %3
    %5:gpr(s64) = G_SELECT %7, %6, %2
    $x0 = COPY %5(s64)
    RET_ReallyLR implicit $x0

...
---
name:            no_cmn_s64_lhs
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1

    ; CHECK-LABEL: name: no_cmn_s64_lhs
    ; CHECK: liveins: $x0, $x1
    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY]], implicit-def $nzcv
    ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[SUBSXrr]], [[COPY1]], implicit-def $nzcv
    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 10, implicit $nzcv
    ; CHECK: $x0 = COPY [[CSINCXr]]
    ; CHECK: RET_ReallyLR implicit $x0
    %0:gpr(s64) = COPY $x0
    %1:gpr(s64) = COPY $x1
    %2:gpr(s64) = G_CONSTANT i64 0
    %6:gpr(s64) = G_CONSTANT i64 1
    %3:gpr(s64) = G_SUB %2, %0
    %7:gpr(s32) = G_ICMP intpred(slt), %3(s64), %1
    %5:gpr(s64) = G_SELECT %7, %6, %2
    $x0 = COPY %5(s64)
    RET_ReallyLR implicit $x0

...
---
name:            tst_s32
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; CHECK-LABEL: name: tst_s32
    ; CHECK: liveins: $w0, $w1
    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
    ; CHECK: [[ANDSWrr:%[0-9]+]]:gpr32 = ANDSWrr [[COPY1]], [[COPY]], implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 1, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $w0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_CONSTANT i32 0
    %6:gpr(s32) = G_CONSTANT i32 1
    %3:gpr(s32) = G_AND %2, %1
    %8:gpr(s32) = G_CONSTANT i32 0
    %7:gpr(s32) = G_ICMP intpred(eq), %3(s32), %8
    %5:gpr(s32) = G_SELECT %7, %6, %2
    $w0 = COPY %5(s32)
    RET_ReallyLR implicit $w0

...
---
name:            tst_s64
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1
    ; CHECK-LABEL: name: tst_s64
    ; CHECK: liveins: $x0, $x1
    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $xzr
    ; CHECK: [[ANDSXrr:%[0-9]+]]:gpr64 = ANDSXrr [[COPY1]], [[COPY]], implicit-def $nzcv
    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY1]], $xzr, 1, implicit $nzcv
    ; CHECK: $x0 = COPY [[CSINCXr]]
    ; CHECK: RET_ReallyLR implicit $x0
    %0:gpr(s64) = COPY $x0
    %1:gpr(s64) = COPY $x1
    %2:gpr(s64) = G_CONSTANT i64 0
    %6:gpr(s64) = G_CONSTANT i64 1
    %3:gpr(s64) = G_AND %2, %1
    %8:gpr(s64) = G_CONSTANT i64 0
    %7:gpr(s32) = G_ICMP intpred(eq), %3(s64), %8
    %5:gpr(s64) = G_SELECT %7, %6, %2
    $x0 = COPY %5(s64)
    RET_ReallyLR implicit $x0

...
---
name:            no_tst_unsigned_compare
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; CHECK-LABEL: name: no_tst_unsigned_compare
    ; CHECK: liveins: $w0, $w1
    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
    ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]]
    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[ANDWrr]], 0, 0, implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 9, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $w0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_CONSTANT i32 0
    %6:gpr(s32) = G_CONSTANT i32 1
    %3:gpr(s32) = G_AND %2, %1
    %8:gpr(s32) = G_CONSTANT i32 0
    %7:gpr(s32) = G_ICMP intpred(ugt), %3(s32), %8
    %5:gpr(s32) = G_SELECT %7, %6, %2
    $w0 = COPY %5(s32)
    RET_ReallyLR implicit $w0

...
---
name:            no_tst_nonzero
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; CHECK-LABEL: name: no_tst_nonzero
    ; CHECK: liveins: $w0, $w1
    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
    ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]]
    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[ANDWrr]], 42, 0, implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 9, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $w0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_CONSTANT i32 0
    %6:gpr(s32) = G_CONSTANT i32 1
    %3:gpr(s32) = G_AND %2, %1
    %8:gpr(s32) = G_CONSTANT i32 42
    %7:gpr(s32) = G_ICMP intpred(ugt), %3(s32), %8
    %5:gpr(s32) = G_SELECT %7, %6, %2
    $w0 = COPY %5(s32)
    RET_ReallyLR implicit $w0

...
---
name:            imm_tst
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; CHECK-LABEL: name: imm_tst
    ; CHECK: liveins: $w0, $w1
    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 1, implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $w0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_CONSTANT i32 0
    %3:gpr(s32) = G_CONSTANT i32 1

    ; This can be represented as a logical immediate, so we can pull it into
    ; the ANDS. We should get ANDSWri.
    %4:gpr(s32) = G_CONSTANT i32 3

    %5:gpr(s32) = G_AND %1, %4
    %6:gpr(s32) = G_ICMP intpred(eq), %5(s32), %2
    $w0 = COPY %6(s32)
    RET_ReallyLR implicit $w0


...
---
name:            no_imm_tst_not_logical_imm
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; CHECK-LABEL: name: no_imm_tst_not_logical_imm
    ; CHECK: liveins: $w0, $w1
    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -1
    ; CHECK: [[ANDSWrr:%[0-9]+]]:gpr32 = ANDSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $w0
    %0:gpr(s32) = COPY $w0
    %1:gpr(s32) = COPY $w1
    %2:gpr(s32) = G_CONSTANT i32 0
    %3:gpr(s32) = G_CONSTANT i32 1

    ; This immediate can't be represented as a logical immediate. We shouldn't
    ; select ANDSWri.
    %4:gpr(s32) = G_CONSTANT i32 -1

    %5:gpr(s32) = G_AND %1, %4
    %6:gpr(s32) = G_ICMP intpred(eq), %5(s32), %2
    $w0 = COPY %6(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_physreg_copy
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1
    ; CHECK-LABEL: name: test_physreg_copy
    ; CHECK: liveins: $x0, $x1
    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
    ; CHECK: $w0 = COPY [[CSINCWr]]
    ; CHECK: RET_ReallyLR implicit $x0
    %0:gpr(s64) = COPY $x0
    %1:gpr(s64) = COPY $x1
    ; When we find the defs of the LHS and RHS of the compare, we walk over
    ; copies. Make sure that we don't crash when we hit a copy from a physical
    ; register.
    %7:gpr(s32) = G_ICMP intpred(eq), %0, %1
    $w0 = COPY %7(s32)
    RET_ReallyLR implicit $x0

...
---
name:            tst_fold_shift_s64
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0, $x1
    ; We should fold the G_SHL into the ANDS to get ANDSXrs.
    ;
    ; CHECK-LABEL: name: tst_fold_shift_s64
    ; CHECK: liveins: $x0, $x1
    ; CHECK: %copy:gpr64 = COPY $x1
    ; CHECK: %zero:gpr64 = COPY $xzr
    ; CHECK: [[ANDSXrs:%[0-9]+]]:gpr64 = ANDSXrs %zero, %copy, 16, implicit-def $nzcv
    ; CHECK: %select:gpr64 = CSINCXr %zero, $xzr, 1, implicit $nzcv
    ; CHECK: $x0 = COPY %select
    ; CHECK: RET_ReallyLR implicit $x0
    %copy:gpr(s64) = COPY $x1
    %zero:gpr(s64) = G_CONSTANT i64 0
    %one:gpr(s64) = G_CONSTANT i64 1
    %cst:gpr(s64) = G_CONSTANT i64 16
    %shift:gpr(s64) = G_SHL %copy(s64), %cst(s64)
    %and:gpr(s64) = G_AND %zero, %shift
    %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
    %select:gpr(s64) = G_SELECT %cmp, %one, %zero
    $x0 = COPY %select(s64)
    RET_ReallyLR implicit $x0

...
---
name:            tst_fold_shift_s32
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1
    ; We should fold the G_SHL into the ANDS to get ANDSWrs.
    ;
    ; CHECK-LABEL: name: tst_fold_shift_s32
    ; CHECK: liveins: $w0, $w1
    ; CHECK: %copy:gpr32 = COPY $w1
    ; CHECK: %zero:gpr32 = COPY $wzr
    ; CHECK: [[ANDSWrs:%[0-9]+]]:gpr32 = ANDSWrs %zero, %copy, 16, implicit-def $nzcv
    ; CHECK: %select:gpr32 = CSINCWr %zero, $wzr, 1, implicit $nzcv
    ; CHECK: $w0 = COPY %select
    ; CHECK: RET_ReallyLR implicit $w0
    %copy:gpr(s32) = COPY $w1
    %zero:gpr(s32) = G_CONSTANT i32 0
    %one:gpr(s32) = G_CONSTANT i32 1
    %cst:gpr(s32) = G_CONSTANT i32 16
    %shift:gpr(s32) = G_SHL %copy(s32), %cst(s32)
    %and:gpr(s32) = G_AND %zero, %shift
    %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s32), %zero
    %select:gpr(s32) = G_SELECT %cmp, %one, %zero
    $w0 = COPY %select(s32)
    RET_ReallyLR implicit $w0

...
---
name:            cmn_s32_neg_imm
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $w1

    ; CHECK-LABEL: name: cmn_s32_neg_imm
    ; CHECK: liveins: $w0, $w1
    ; CHECK: %reg0:gpr32sp = COPY $w0
    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg0, 1, 0, implicit-def $nzcv
    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
    ; CHECK: $w0 = COPY %cmp
    ; CHECK: RET_ReallyLR implicit $w0
    %reg0:gpr(s32) = COPY $w0
    %negative_one:gpr(s32) = G_CONSTANT i32 -1
    %zero:gpr(s32) = G_CONSTANT i32 0
    %sub:gpr(s32) = G_SUB %zero, %negative_one
    %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %sub
    $w0 = COPY %cmp(s32)
    RET_ReallyLR implicit $w0

...
---
name:            cmn_arith_extended_shl
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $w0, $x0, $x1
    ; We should be able to fold away the extend + shift and select ADDSXrx.

    ; CHECK-LABEL: name: cmn_arith_extended_shl
    ; CHECK: liveins: $w0, $x0, $x1
    ; CHECK: %reg0:gpr64sp = COPY $x0
    ; CHECK: %reg1:gpr32 = COPY $w0
    ; CHECK: [[ADDSXrx:%[0-9]+]]:gpr64 = ADDSXrx %reg0, %reg1, 50, implicit-def $nzcv
    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
    ; CHECK: $w0 = COPY %cmp
    ; CHECK: RET_ReallyLR implicit $w0
    %reg0:gpr(s64) = COPY $x0
    %zero:gpr(s64) = G_CONSTANT i64 0
    %sub:gpr(s64) = G_SUB %zero, %reg0

    %reg1:gpr(s32) = COPY $w0
    %ext:gpr(s64) = G_SEXT %reg1(s32)
    %cst:gpr(s64) = G_CONSTANT i64 2
    %shift:gpr(s64) = G_SHL %ext, %cst(s64)

    %cmp:gpr(s32) = G_ICMP intpred(ne), %sub(s64), %shift
    $w0 = COPY %cmp(s32)
    RET_ReallyLR implicit $w0