# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc %s -verify-machineinstrs -O0 -run-pass=legalizer -mtriple aarch64-unknown-unknown -o - | FileCheck %s ... --- name: v2s64 alignment: 4 tracksRegLiveness: true body: | bb.0: liveins: $q0, $q1 ; CHECK-LABEL: name: v2s64 ; CHECK: liveins: $q0, $q1 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[COPY]](<2 x s64>), [[BUILD_VECTOR]] ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[ICMP]], [[BUILD_VECTOR1]](<2 x s64>) ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR1]](<2 x s64>) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C2]](s64), [[C2]](s64) ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR2]] ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY1]], [[ASHR]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY [[OR]](<2 x s64>) ; CHECK-NEXT: RET_ReallyLR implicit $q0 %0:_(<2 x s64>) = COPY $q0 %1:_(<2 x s64>) = COPY $q1 %3:_(s64) = G_CONSTANT i64 0 %2:_(<2 x s64>) = G_BUILD_VECTOR %3(s64), %3(s64) %4:_(<2 x s1>) = G_ICMP intpred(sgt), %0(<2 x s64>), %2 %5:_(<2 x s64>) = G_SELECT %4(<2 x s1>), %1, %0 $q0 = COPY %5(<2 x s64>) RET_ReallyLR implicit $q0 ... --- name: v2s32 alignment: 4 tracksRegLiveness: true body: | bb.0: liveins: $d0, $d1 ; CHECK-LABEL: name: v2s32 ; CHECK: liveins: $d0, $d1 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32) ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s32>) = G_ICMP intpred(sgt), [[COPY]](<2 x s32>), [[BUILD_VECTOR]] ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32) ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<2 x s32>) = G_SHL [[ICMP]], [[BUILD_VECTOR1]](<2 x s32>) ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(<2 x s32>) = G_ASHR [[SHL]], [[BUILD_VECTOR1]](<2 x s32>) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C2]](s32), [[C2]](s32) ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR [[ASHR]], [[BUILD_VECTOR2]] ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY1]], [[ASHR]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $d0 = COPY [[OR]](<2 x s32>) ; CHECK-NEXT: RET_ReallyLR implicit $d0 %0:_(<2 x s32>) = COPY $d0 %1:_(<2 x s32>) = COPY $d1 %3:_(s32) = G_CONSTANT i32 0 %2:_(<2 x s32>) = G_BUILD_VECTOR %3(s32), %3(s32) %4:_(<2 x s1>) = G_ICMP intpred(sgt), %0(<2 x s32>), %2 %5:_(<2 x s32>) = G_SELECT %4(<2 x s1>), %1, %0 $d0 = COPY %5(<2 x s32>) RET_ReallyLR implicit $d0 ... --- name: v16s8 alignment: 4 tracksRegLiveness: true body: | bb.0: liveins: $q0, $q1 ; CHECK-LABEL: name: v16s8 ; CHECK: liveins: $q0, $q1 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8) ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<16 x s8>) = G_ICMP intpred(sgt), [[COPY]](<16 x s8>), [[BUILD_VECTOR]] ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 7 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8) ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<16 x s8>) = G_SHL [[ICMP]], [[BUILD_VECTOR1]](<16 x s8>) ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(<16 x s8>) = G_ASHR [[SHL]], [[BUILD_VECTOR1]](<16 x s8>) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1 ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8) ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<16 x s8>) = G_XOR [[ASHR]], [[BUILD_VECTOR2]] ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<16 x s8>) = G_AND [[COPY1]], [[ASHR]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<16 x s8>) = G_AND [[COPY]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<16 x s8>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY [[OR]](<16 x s8>) ; CHECK-NEXT: RET_ReallyLR implicit $q0 %0:_(<16 x s8>) = COPY $q0 %1:_(<16 x s8>) = COPY $q1 %3:_(s8) = G_CONSTANT i8 0 %2:_(<16 x s8>) = G_BUILD_VECTOR %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8) %4:_(<16 x s1>) = G_ICMP intpred(sgt), %0(<16 x s8>), %2 %5:_(<16 x s8>) = G_SELECT %4(<16 x s1>), %1, %0 $q0 = COPY %5(<16 x s8>) RET_ReallyLR implicit $q0 ... --- name: scalar_mask alignment: 4 tracksRegLiveness: true liveins: - { reg: '$w0' } - { reg: '$q0' } body: | bb.1: liveins: $q0, $w0 ; CHECK-LABEL: name: scalar_mask ; CHECK: liveins: $q0, $w0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4100 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32) ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]] ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[AND]], 1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SEXT_INREG]](s32) ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY2]](s32), [[C3]](s64) ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[IVEC]](<4 x s32>), [[DEF]], shufflemask(0, 0, 0, 0) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C4]](s32), [[C4]](s32), [[C4]](s32), [[C4]](s32) ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s32>) = G_XOR [[SHUF]], [[BUILD_VECTOR1]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[SHUF]] ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<4 x s32>) = G_AND [[BUILD_VECTOR]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[AND1]], [[AND2]] ; CHECK-NEXT: $q0 = COPY [[OR]](<4 x s32>) ; CHECK-NEXT: RET_ReallyLR implicit $q0 %0:_(s32) = COPY $w0 %1:_(<4 x s32>) = COPY $q0 %2:_(s32) = G_CONSTANT i32 4100 %6:_(s32) = G_FCONSTANT float 0.000000e+00 %5:_(<4 x s32>) = G_BUILD_VECTOR %6(s32), %6(s32), %6(s32), %6(s32) %3:_(s1) = G_ICMP intpred(eq), %0(s32), %2 %4:_(<4 x s32>) = G_SELECT %3(s1), %1, %5 $q0 = COPY %4(<4 x s32>) RET_ReallyLR implicit $q0 ... --- name: s88 tracksRegLiveness: true body: | bb.0: liveins: $w0, $w1, $x0 ; CHECK-LABEL: name: s88 ; CHECK: liveins: $w0, $w1, $x0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %a:_(s32) = COPY $w0 ; CHECK-NEXT: %b:_(s32) = COPY $w1 ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), %a(s32), %b ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]] ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[DEF]], [[DEF]] ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64) ; CHECK-NEXT: RET_ReallyLR implicit $x0 %a:_(s32) = COPY $w0 %b:_(s32) = COPY $w1 %cmp:_(s1) = G_ICMP intpred(sgt), %a(s32), %b %sel_a:_(s88) = G_IMPLICIT_DEF %sel_b:_(s88) = G_IMPLICIT_DEF %select:_(s88) = G_SELECT %cmp(s1), %sel_a, %sel_b %trunc:_(s64) = G_TRUNC %select $x0 = COPY %trunc RET_ReallyLR implicit $x0 ... # The select condition has already been zero extended to s32, and # needs a sext_inreg to get a vector boolean. --- name: scalar_mask_already_promoted_select_s32_v4s32 alignment: 4 tracksRegLiveness: true body: | bb.0: liveins: $q0, $w0 ; CHECK-LABEL: name: scalar_mask_already_promoted_select_s32_v4s32 ; CHECK: liveins: $q0, $w0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4100 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32) ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SEXT_INREG]](s32) ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY2]](s32), [[C2]](s64) ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[IVEC]](<4 x s32>), [[DEF]], shufflemask(0, 0, 0, 0) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32) ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s32>) = G_XOR [[SHUF]], [[BUILD_VECTOR1]] ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[SHUF]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[BUILD_VECTOR]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY [[OR]](<4 x s32>) ; CHECK-NEXT: RET_ReallyLR implicit $q0 %0:_(s32) = COPY $w0 %1:_(<4 x s32>) = COPY $q0 %2:_(s32) = G_CONSTANT i32 4100 %6:_(s32) = G_FCONSTANT float 0.000000e+00 %5:_(<4 x s32>) = G_BUILD_VECTOR %6(s32), %6(s32), %6(s32), %6(s32) %3:_(s32) = G_ICMP intpred(eq), %0(s32), %2 %4:_(<4 x s32>) = G_SELECT %3(s32), %1, %5 $q0 = COPY %4(<4 x s32>) RET_ReallyLR implicit $q0 ... # The scalar select condition was zero extended to s32, to a different # type from the vector width. It needs to be sign extended inreg, and # then sign extended to the full element width. --- name: scalar_mask_select_s32_v4s64 alignment: 4 tracksRegLiveness: true body: | bb.0: liveins: $q0, $w0 ; CHECK-LABEL: name: scalar_mask_select_s32_v4s64 ; CHECK: liveins: $q0, $w0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q0 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4100 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXT_INREG]](s32) ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[SEXT]](s64), [[C2]](s64) ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[IVEC]](<2 x s64>), [[DEF]], shufflemask(0, 0) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C3]](s64), [[C3]](s64) ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SHUF]], [[BUILD_VECTOR1]] ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY1]], [[SHUF]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[BUILD_VECTOR]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY [[OR]](<2 x s64>) ; CHECK-NEXT: RET_ReallyLR implicit $q0 %0:_(s32) = COPY $w0 %1:_(<2 x s64>) = COPY $q0 %2:_(s32) = G_CONSTANT i32 4100 %6:_(s64) = G_FCONSTANT double 0.000000e+00 %5:_(<2 x s64>) = G_BUILD_VECTOR %6, %6 %3:_(s32) = G_ICMP intpred(eq), %0(s32), %2 %4:_(<2 x s64>) = G_SELECT %3(s32), %1, %5 $q0 = COPY %4 RET_ReallyLR implicit $q0 ... # Check degenerate case where the selected element size is the same as # the condition bitwidth. --- name: select_v4s1_s1 alignment: 4 tracksRegLiveness: true body: | bb.0: liveins: $q0, $q1, $q2, $w0 ; CHECK-LABEL: name: select_v4s1_s1 ; CHECK: liveins: $q0, $q1, $q2, $w0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %w0:_(s32) = COPY $w0 ; CHECK-NEXT: %q0:_(<4 x s32>) = COPY $q0 ; CHECK-NEXT: %q1:_(<4 x s32>) = COPY $q1 ; CHECK-NEXT: %q2:_(<4 x s32>) = COPY $q2 ; CHECK-NEXT: %vec_cond0:_(<4 x s1>) = G_ICMP intpred(eq), %q0(<4 x s32>), %q1 ; CHECK-NEXT: %vec_cond1:_(<4 x s1>) = G_ICMP intpred(eq), %q0(<4 x s32>), %q2 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4100 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32) ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(eq), %w0(s32), [[C]] ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT %cmp(s1) ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ZEXT]](s32) ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s1>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C2]](s64) ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s1>) = G_SHUFFLE_VECTOR [[IVEC]](<4 x s1>), [[DEF]], shufflemask(0, 0, 0, 0) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 true ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s1>) = G_BUILD_VECTOR [[C3]](s1), [[C3]](s1), [[C3]](s1), [[C3]](s1) ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s1>) = G_XOR [[SHUF]], [[BUILD_VECTOR1]] ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s1>) = G_AND %vec_cond0, [[SHUF]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s1>) = G_AND %vec_cond1, [[XOR]] ; CHECK-NEXT: %select:_(<4 x s1>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: %zext_select:_(<4 x s32>) = G_ZEXT %select(<4 x s1>) ; CHECK-NEXT: $q0 = COPY %zext_select(<4 x s32>) ; CHECK-NEXT: RET_ReallyLR implicit $q0 %w0:_(s32) = COPY $w0 %q0:_(<4 x s32>) = COPY $q0 %q1:_(<4 x s32>) = COPY $q1 %q2:_(<4 x s32>) = COPY $q2 %vec_cond0:_(<4 x s1>) = G_ICMP intpred(eq), %q0, %q1 %vec_cond1:_(<4 x s1>) = G_ICMP intpred(eq), %q0, %q2 %2:_(s32) = G_CONSTANT i32 4100 %6:_(s32) = G_FCONSTANT float 0.000000e+00 %5:_(<4 x s32>) = G_BUILD_VECTOR %6(s32), %6(s32), %6(s32), %6(s32) %cmp:_(s1) = G_ICMP intpred(eq), %w0, %2 %select:_(<4 x s1>) = G_SELECT %cmp, %vec_cond0, %vec_cond1 %zext_select:_(<4 x s32>) = G_ZEXT %select $q0 = COPY %zext_select RET_ReallyLR implicit $q0 ...