Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -mtriple=aarch64 -run-pass=legalizer -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s

---
name:            test_redxor_v1i1
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$w0' }
body:             |
  bb.1:
    liveins: $w0

    ; CHECK-LABEL: name: test_redxor_v1i1
    ; CHECK: liveins: $w0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(s32) = COPY $w0
    %0:_(s1) = G_TRUNC %1(s32)
    %2:_(s1) = G_VECREDUCE_XOR %0(s1)
    %4:_(s32) = G_ZEXT %2(s1)
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v2i1
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$d0' }
body:             |
  bb.1:
    liveins: $d0

    ; CHECK-LABEL: name: test_redxor_v2i1
    ; CHECK: liveins: $d0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(<2 x s32>) = COPY $d0
    %0:_(<2 x s1>) = G_TRUNC %1(<2 x s32>)
    %2:_(s1) = G_VECREDUCE_XOR %0(<2 x s1>)
    %4:_(s32) = G_ZEXT %2(s1)
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v4i1
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$d0' }
body:             |
  bb.1:
    liveins: $d0

    ; CHECK-LABEL: name: test_redxor_v4i1
    ; CHECK: liveins: $d0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[XOR2]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(<4 x s16>) = COPY $d0
    %0:_(<4 x s1>) = G_TRUNC %1(<4 x s16>)
    %2:_(s1) = G_VECREDUCE_XOR %0(<4 x s1>)
    %4:_(s32) = G_ZEXT %2(s1)
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v8i1
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$d0' }
body:             |
  bb.1:
    liveins: $d0

    ; CHECK-LABEL: name: test_redxor_v8i1
    ; CHECK: liveins: $d0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<8 x s8>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[XOR4]], [[XOR5]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[XOR6]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(<8 x s8>) = COPY $d0
    %0:_(<8 x s1>) = G_TRUNC %1(<8 x s8>)
    %2:_(s1) = G_VECREDUCE_XOR %0(<8 x s1>)
    %4:_(s32) = G_ZEXT %2(s1)
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v16i1
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$q0' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.1:
    liveins: $q0

    ; CHECK-LABEL: name: test_redxor_v16i1
    ; CHECK: liveins: $q0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<16 x s8>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
    ; CHECK-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
    ; CHECK-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT8]], [[ANYEXT9]]
    ; CHECK-NEXT: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s8)
    ; CHECK-NEXT: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s8)
    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT10]], [[ANYEXT11]]
    ; CHECK-NEXT: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s8)
    ; CHECK-NEXT: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s8)
    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT12]], [[ANYEXT13]]
    ; CHECK-NEXT: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[UV14]](s8)
    ; CHECK-NEXT: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[UV15]](s8)
    ; CHECK-NEXT: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT14]], [[ANYEXT15]]
    ; CHECK-NEXT: [[XOR8:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
    ; CHECK-NEXT: [[XOR9:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
    ; CHECK-NEXT: [[XOR10:%[0-9]+]]:_(s32) = G_XOR [[XOR4]], [[XOR5]]
    ; CHECK-NEXT: [[XOR11:%[0-9]+]]:_(s32) = G_XOR [[XOR6]], [[XOR7]]
    ; CHECK-NEXT: [[XOR12:%[0-9]+]]:_(s32) = G_XOR [[XOR8]], [[XOR9]]
    ; CHECK-NEXT: [[XOR13:%[0-9]+]]:_(s32) = G_XOR [[XOR10]], [[XOR11]]
    ; CHECK-NEXT: [[XOR14:%[0-9]+]]:_(s32) = G_XOR [[XOR12]], [[XOR13]]
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[XOR14]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(<16 x s8>) = COPY $q0
    %0:_(<16 x s1>) = G_TRUNC %1(<16 x s8>)
    %2:_(s1) = G_VECREDUCE_XOR %0(<16 x s1>)
    %4:_(s32) = G_ZEXT %2(s1)
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v1i8
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$d0' }
body:             |
  bb.1:
    liveins: $d0

    ; CHECK-LABEL: name: test_redxor_v1i8
    ; CHECK: liveins: $d0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s64) = G_BITCAST [[COPY]](<8 x s8>)
    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[BITCAST]](s64)
    ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(<8 x s8>) = COPY $d0
    %11:_(s64) = G_BITCAST %1(<8 x s8>)
    %0:_(s8) = G_TRUNC %11(s64)
    %9:_(s8) = G_VECREDUCE_XOR %0(s8)
    %10:_(s32) = G_ANYEXT %9(s8)
    $w0 = COPY %10(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v3i8
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$w0' }
  - { reg: '$w1' }
  - { reg: '$w2' }
body:             |
  bb.1:
    liveins: $w0, $w1, $w2

    ; CHECK-LABEL: name: test_redxor_v3i8
    ; CHECK: liveins: $w0, $w1, $w2
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]]
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[COPY2]]
    ; CHECK-NEXT: $w0 = COPY [[XOR1]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(s32) = COPY $w0
    %2:_(s32) = COPY $w1
    %3:_(s32) = COPY $w2
    %4:_(<3 x s32>) = G_BUILD_VECTOR %1(s32), %2(s32), %3(s32)
    %0:_(<3 x s8>) = G_TRUNC %4(<3 x s32>)
    %5:_(s8) = G_VECREDUCE_XOR %0(<3 x s8>)
    %6:_(s32) = G_ANYEXT %5(s8)
    $w0 = COPY %6(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v4i8
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$d0' }
body:             |
  bb.1:
    liveins: $d0

    ; CHECK-LABEL: name: test_redxor_v4i8
    ; CHECK: liveins: $d0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
    ; CHECK-NEXT: $w0 = COPY [[XOR2]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(<4 x s16>) = COPY $d0
    %0:_(<4 x s8>) = G_TRUNC %1(<4 x s16>)
    %2:_(s8) = G_VECREDUCE_XOR %0(<4 x s8>)
    %3:_(s32) = G_ANYEXT %2(s8)
    $w0 = COPY %3(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v8i8
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$d0' }
body:             |
  bb.1:
    liveins: $d0

    ; CHECK-LABEL: name: test_redxor_v8i8
    ; CHECK: liveins: $d0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<8 x s8>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[XOR4]], [[XOR5]]
    ; CHECK-NEXT: $w0 = COPY [[XOR6]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(<8 x s8>) = COPY $d0
    %1:_(s8) = G_VECREDUCE_XOR %0(<8 x s8>)
    %2:_(s32) = G_ANYEXT %1(s8)
    $w0 = COPY %2(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v16i8
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$q0' }
body:             |
  bb.1:
    liveins: $q0

    ; CHECK-LABEL: name: test_redxor_v16i8
    ; CHECK: liveins: $q0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<8 x s8>), [[UV1:%[0-9]+]]:_(<8 x s8>) = G_UNMERGE_VALUES [[COPY]](<16 x s8>)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<8 x s8>) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[XOR]](<8 x s8>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[XOR1]], [[XOR2]]
    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[XOR3]], [[XOR4]]
    ; CHECK-NEXT: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[XOR5]], [[XOR6]]
    ; CHECK-NEXT: $w0 = COPY [[XOR7]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(<16 x s8>) = COPY $q0
    %1:_(s8) = G_VECREDUCE_XOR %0(<16 x s8>)
    %2:_(s32) = G_ANYEXT %1(s8)
    $w0 = COPY %2(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v32i8
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$q0' }
  - { reg: '$q1' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: test_redxor_v32i8
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<16 x s8>) = G_XOR [[COPY]], [[COPY1]]
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<8 x s8>), [[UV1:%[0-9]+]]:_(<8 x s8>) = G_UNMERGE_VALUES [[XOR]](<16 x s8>)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<8 x s8>) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[XOR1]](<8 x s8>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
    ; CHECK-NEXT: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[XOR4]], [[XOR5]]
    ; CHECK-NEXT: [[XOR8:%[0-9]+]]:_(s32) = G_XOR [[XOR6]], [[XOR7]]
    ; CHECK-NEXT: $w0 = COPY [[XOR8]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(<16 x s8>) = COPY $q0
    %2:_(<16 x s8>) = COPY $q1
    %0:_(<32 x s8>) = G_CONCAT_VECTORS %1(<16 x s8>), %2(<16 x s8>)
    %3:_(s8) = G_VECREDUCE_XOR %0(<32 x s8>)
    %4:_(s32) = G_ANYEXT %3(s8)
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v4i16
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$d0' }
body:             |
  bb.1:
    liveins: $d0

    ; CHECK-LABEL: name: test_redxor_v4i16
    ; CHECK: liveins: $d0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
    ; CHECK-NEXT: $w0 = COPY [[XOR2]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(<4 x s16>) = COPY $d0
    %1:_(s16) = G_VECREDUCE_XOR %0(<4 x s16>)
    %2:_(s32) = G_ANYEXT %1(s16)
    $w0 = COPY %2(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v8i16
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$q0' }
body:             |
  bb.1:
    liveins: $q0

    ; CHECK-LABEL: name: test_redxor_v8i16
    ; CHECK: liveins: $q0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[XOR]](<4 x s16>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[XOR1]], [[XOR2]]
    ; CHECK-NEXT: $w0 = COPY [[XOR3]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(<8 x s16>) = COPY $q0
    %1:_(s16) = G_VECREDUCE_XOR %0(<8 x s16>)
    %2:_(s32) = G_ANYEXT %1(s16)
    $w0 = COPY %2(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v16i16
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$q0' }
  - { reg: '$q1' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: test_redxor_v16i16
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<8 x s16>) = G_XOR [[COPY]], [[COPY1]]
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[XOR]](<8 x s16>)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<4 x s16>) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[XOR1]](<4 x s16>)
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
    ; CHECK-NEXT: $w0 = COPY [[XOR4]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(<8 x s16>) = COPY $q0
    %2:_(<8 x s16>) = COPY $q1
    %0:_(<16 x s16>) = G_CONCAT_VECTORS %1(<8 x s16>), %2(<8 x s16>)
    %3:_(s16) = G_VECREDUCE_XOR %0(<16 x s16>)
    %4:_(s32) = G_ANYEXT %3(s16)
    $w0 = COPY %4(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v2i32
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$d0' }
body:             |
  bb.1:
    liveins: $d0

    ; CHECK-LABEL: name: test_redxor_v2i32
    ; CHECK: liveins: $d0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[XOR]](s32)
    ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(<2 x s32>) = COPY $d0
    %1:_(s32) = G_VECREDUCE_XOR %0(<2 x s32>)
    $w0 = COPY %1(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v4i32
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$q0' }
body:             |
  bb.1:
    liveins: $q0

    ; CHECK-LABEL: name: test_redxor_v4i32
    ; CHECK: liveins: $q0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](<2 x s32>)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]]
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[XOR1]](s32)
    ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(s32) = G_VECREDUCE_XOR %0(<4 x s32>)
    $w0 = COPY %1(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v8i32
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$q0' }
  - { reg: '$q1' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: test_redxor_v8i32
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s32>) = G_XOR [[COPY]], [[COPY1]]
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[XOR]](<4 x s32>)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s32>) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](<2 x s32>)
    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]]
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[XOR2]](s32)
    ; CHECK-NEXT: $w0 = COPY [[COPY2]](s32)
    ; CHECK-NEXT: RET_ReallyLR implicit $w0
    %1:_(<4 x s32>) = COPY $q0
    %2:_(<4 x s32>) = COPY $q1
    %0:_(<8 x s32>) = G_CONCAT_VECTORS %1(<4 x s32>), %2(<4 x s32>)
    %3:_(s32) = G_VECREDUCE_XOR %0(<8 x s32>)
    $w0 = COPY %3(s32)
    RET_ReallyLR implicit $w0

...
---
name:            test_redxor_v2i64
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$q0' }
body:             |
  bb.1:
    liveins: $q0

    ; CHECK-LABEL: name: test_redxor_v2i64
    ; CHECK: liveins: $q0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[XOR]](s64)
    ; CHECK-NEXT: $x0 = COPY [[COPY1]](s64)
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %0:_(<2 x s64>) = COPY $q0
    %1:_(s64) = G_VECREDUCE_XOR %0(<2 x s64>)
    $x0 = COPY %1(s64)
    RET_ReallyLR implicit $x0

...
---
name:            test_redxor_v4i64
alignment:       4
tracksRegLiveness: true
liveins:
  - { reg: '$q0' }
  - { reg: '$q1' }
body:             |
  bb.1:
    liveins: $q0, $q1

    ; CHECK-LABEL: name: test_redxor_v4i64
    ; CHECK: liveins: $q0, $q1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[COPY]], [[COPY1]]
    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[XOR]](<2 x s64>)
    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[UV]], [[UV1]]
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[XOR1]](s64)
    ; CHECK-NEXT: $x0 = COPY [[COPY2]](s64)
    ; CHECK-NEXT: RET_ReallyLR implicit $x0
    %1:_(<2 x s64>) = COPY $q0
    %2:_(<2 x s64>) = COPY $q1
    %0:_(<4 x s64>) = G_CONCAT_VECTORS %1(<2 x s64>), %2(<2 x s64>)
    %3:_(s64) = G_VECREDUCE_XOR %0(<4 x s64>)
    $x0 = COPY %3(s64)
    RET_ReallyLR implicit $x0

...