; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2 ; Given: ; icmp eq/ne (urem %x, C), 0 ; Iff C is not a power of two (those should not get to here though), ; and %x may have at most one bit set, omit the 'urem': ; icmp eq/ne %x, 0 ;------------------------------------------------------------------------------; ; Basic scalar tests ;------------------------------------------------------------------------------; define i1 @p0_scalar_urem_by_const(i32 %x, i32 %y) { ; CHECK-LABEL: p0_scalar_urem_by_const: ; CHECK: # %bb.0: ; CHECK-NEXT: testb $-128, %dil ; CHECK-NEXT: sete %al ; CHECK-NEXT: retq %t0 = and i32 %x, 128 ; clearly a power-of-two or zero %t1 = urem i32 %t0, 6 ; '6' is clearly not a power of two %t2 = icmp eq i32 %t1, 0 ret i1 %t2 } define i1 @p1_scalar_urem_by_nonconst(i32 %x, i32 %y) { ; CHECK-LABEL: p1_scalar_urem_by_nonconst: ; CHECK: # %bb.0: ; CHECK-NEXT: testb $-128, %dil ; CHECK-NEXT: sete %al ; CHECK-NEXT: retq %t0 = and i32 %x, 128 ; clearly a power-of-two or zero %t1 = or i32 %y, 6 ; two bits set, clearly not a power of two %t2 = urem i32 %t0, %t1 %t3 = icmp eq i32 %t2, 0 ret i1 %t3 } define i1 @p2_scalar_shifted_urem_by_const(i32 %x, i32 %y) { ; CHECK-LABEL: p2_scalar_shifted_urem_by_const: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %esi, %ecx ; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx ; CHECK-NEXT: shll %cl, %edi ; CHECK-NEXT: imull $-1431655765, %edi, %eax # imm = 0xAAAAAAAB ; CHECK-NEXT: cmpl $1431655766, %eax # imm = 0x55555556 ; CHECK-NEXT: setb %al ; CHECK-NEXT: retq %t0 = and i32 %x, 1 ; clearly a power-of-two or zero %t1 = shl i32 %t0, %y ; will still be a power-of-two or zero with any %y %t2 = urem i32 %t1, 3 ; '3' is clearly not a power of two %t3 = icmp eq i32 %t2, 0 ret i1 %t3 } define i1 @p3_scalar_shifted2_urem_by_const(i32 %x, i32 %y) { ; CHECK-LABEL: p3_scalar_shifted2_urem_by_const: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %esi, %ecx ; CHECK-NEXT: andl $2, %edi ; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx ; CHECK-NEXT: shll %cl, %edi ; CHECK-NEXT: imull $-1431655765, %edi, %eax # imm = 0xAAAAAAAB ; CHECK-NEXT: cmpl $1431655766, %eax # imm = 0x55555556 ; CHECK-NEXT: setb %al ; CHECK-NEXT: retq %t0 = and i32 %x, 2 ; clearly a power-of-two or zero %t1 = shl i32 %t0, %y ; will still be a power-of-two or zero with any %y %t2 = urem i32 %t1, 3 ; '3' is clearly not a power of two %t3 = icmp eq i32 %t2, 0 ret i1 %t3 } ;------------------------------------------------------------------------------; ; Basic vector tests ;------------------------------------------------------------------------------; define <4 x i1> @p4_vector_urem_by_const__splat(<4 x i32> %x, <4 x i32> %y) { ; SSE2-LABEL: p4_vector_urem_by_const__splat: ; SSE2: # %bb.0: ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] ; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrld $1, %xmm0 ; SSE2-NEXT: pslld $31, %xmm3 ; SSE2-NEXT: por %xmm0, %xmm3 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: pxor %xmm3, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: p4_vector_urem_by_const__splat: ; SSE4: # %bb.0: ; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE4-NEXT: psrld $1, %xmm0 ; SSE4-NEXT: movdqa {{.*#+}} xmm1 = [715827882,715827882,715827882,715827882] ; SSE4-NEXT: pminud %xmm0, %xmm1 ; SSE4-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX2-LABEL: p4_vector_urem_by_const__splat: ; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128] ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531] ; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpsrld $1, %xmm0, %xmm0 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [715827882,715827882,715827882,715827882] ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %t0 = and <4 x i32> %x, <i32 128, i32 128, i32 128, i32 128> ; clearly a power-of-two or zero %t1 = urem <4 x i32> %t0, <i32 6, i32 6, i32 6, i32 6> ; '6' is clearly not a power of two %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0> ret <4 x i1> %t2 } define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) { ; SSE2-LABEL: p5_vector_urem_by_const__nonsplat: ; SSE2: # %bb.0: ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,3435973837,2863311531,954437177] ; SSE2-NEXT: pmuludq %xmm0, %xmm1 ; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: por %xmm2, %xmm1 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: pxor %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: p5_vector_urem_by_const__nonsplat: ; SSE4: # %bb.0: ; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; SSE4-NEXT: por %xmm2, %xmm0 ; SSE4-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,858993459,715827882,477218588] ; SSE4-NEXT: pminud %xmm0, %xmm1 ; SSE4-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX2-LABEL: p5_vector_urem_by_const__nonsplat: ; AVX2: # %bb.0: ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %t0 = and <4 x i32> %x, <i32 128, i32 2, i32 4, i32 8> %t1 = urem <4 x i32> %t0, <i32 3, i32 5, i32 6, i32 9> %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0> ret <4 x i1> %t2 } define <4 x i1> @p6_vector_urem_by_const__nonsplat_undef0(<4 x i32> %x, <4 x i32> %y) { ; SSE2-LABEL: p6_vector_urem_by_const__nonsplat_undef0: ; SSE2: # %bb.0: ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] ; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrld $1, %xmm0 ; SSE2-NEXT: pslld $31, %xmm3 ; SSE2-NEXT: por %xmm0, %xmm3 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: pxor %xmm3, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: p6_vector_urem_by_const__nonsplat_undef0: ; SSE4: # %bb.0: ; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE4-NEXT: movdqa %xmm0, %xmm1 ; SSE4-NEXT: psrld $1, %xmm1 ; SSE4-NEXT: pslld $31, %xmm0 ; SSE4-NEXT: por %xmm1, %xmm0 ; SSE4-NEXT: movdqa {{.*#+}} xmm1 = [715827882,715827882,715827882,715827882] ; SSE4-NEXT: pminud %xmm0, %xmm1 ; SSE4-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX2-LABEL: p6_vector_urem_by_const__nonsplat_undef0: ; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128] ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531] ; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpsrld $1, %xmm0, %xmm0 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [715827882,715827882,715827882,715827882] ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %t0 = and <4 x i32> %x, <i32 128, i32 128, i32 undef, i32 128> %t1 = urem <4 x i32> %t0, <i32 6, i32 6, i32 6, i32 6> ; '6' is clearly not a power of two %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0> ret <4 x i1> %t2 } define <4 x i1> @p7_vector_urem_by_const__nonsplat_undef2(<4 x i32> %x, <4 x i32> %y) { ; SSE2-LABEL: p7_vector_urem_by_const__nonsplat_undef2: ; SSE2: # %bb.0: ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531] ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: pmuludq %xmm1, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm3 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: psrld $2, %xmm2 ; SSE2-NEXT: pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE2-NEXT: psubd %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: p7_vector_urem_by_const__nonsplat_undef2: ; SSE4: # %bb.0: ; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE4-NEXT: movdqa {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531] ; SSE4-NEXT: pmuludq %xmm2, %xmm1 ; SSE4-NEXT: pmuludq %xmm0, %xmm2 ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; SSE4-NEXT: psrld $2, %xmm2 ; SSE4-NEXT: pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE4-NEXT: psubd %xmm2, %xmm0 ; SSE4-NEXT: pxor %xmm1, %xmm1 ; SSE4-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX2-LABEL: p7_vector_urem_by_const__nonsplat_undef2: ; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128] ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531] ; AVX2-NEXT: vpmuludq %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpmuludq %xmm2, %xmm0, %xmm2 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] ; AVX2-NEXT: vpsrld $2, %xmm1, %xmm1 ; AVX2-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %t0 = and <4 x i32> %x, <i32 128, i32 128, i32 128, i32 128> ; clearly a power-of-two or zero %t1 = urem <4 x i32> %t0, <i32 6, i32 6, i32 6, i32 6> ; '6' is clearly not a power of two %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0> ret <4 x i1> %t2 } define <4 x i1> @p8_vector_urem_by_const__nonsplat_undef3(<4 x i32> %x, <4 x i32> %y) { ; SSE2-LABEL: p8_vector_urem_by_const__nonsplat_undef3: ; SSE2: # %bb.0: ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531] ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: pmuludq %xmm1, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm3 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: psrld $2, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [6,6,6,6] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm3 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: psubd %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: p8_vector_urem_by_const__nonsplat_undef3: ; SSE4: # %bb.0: ; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE4-NEXT: movdqa {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531] ; SSE4-NEXT: pmuludq %xmm2, %xmm1 ; SSE4-NEXT: pmuludq %xmm0, %xmm2 ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; SSE4-NEXT: psrld $2, %xmm2 ; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE4-NEXT: psubd %xmm2, %xmm0 ; SSE4-NEXT: pxor %xmm1, %xmm1 ; SSE4-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX2-LABEL: p8_vector_urem_by_const__nonsplat_undef3: ; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128] ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531] ; AVX2-NEXT: vpmuludq %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpmuludq %xmm2, %xmm0, %xmm2 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] ; AVX2-NEXT: vpsrld $2, %xmm1, %xmm1 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [6,6,6,6] ; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %t0 = and <4 x i32> %x, <i32 128, i32 128, i32 undef, i32 128> %t1 = urem <4 x i32> %t0, <i32 6, i32 6, i32 6, i32 6> ; '6' is clearly not a power of two %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0> ret <4 x i1> %t2 } ;------------------------------------------------------------------------------; ; Basic negative tests ;------------------------------------------------------------------------------; define i1 @n0_urem_of_maybe_not_power_of_two(i32 %x, i32 %y) { ; CHECK-LABEL: n0_urem_of_maybe_not_power_of_two: ; CHECK: # %bb.0: ; CHECK-NEXT: andl $3, %edi ; CHECK-NEXT: imull $-1431655765, %edi, %eax # imm = 0xAAAAAAAB ; CHECK-NEXT: cmpl $1431655766, %eax # imm = 0x55555556 ; CHECK-NEXT: setb %al ; CHECK-NEXT: retq %t0 = and i32 %x, 3 ; up to two bits set, not power-of-two %t1 = urem i32 %t0, 3 %t2 = icmp eq i32 %t1, 0 ret i1 %t2 } define i1 @n1_urem_by_maybe_power_of_two(i32 %x, i32 %y) { ; CHECK-LABEL: n1_urem_by_maybe_power_of_two: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: andl $128, %eax ; CHECK-NEXT: orl $1, %esi ; CHECK-NEXT: xorl %edx, %edx ; CHECK-NEXT: divl %esi ; CHECK-NEXT: testl %edx, %edx ; CHECK-NEXT: sete %al ; CHECK-NEXT: retq %t0 = and i32 %x, 128 ; clearly a power-of-two or zero %t1 = or i32 %y, 1 ; one low bit set, may be a power of two %t2 = urem i32 %t0, %t1 %t3 = icmp eq i32 %t2, 0 ret i1 %t3 }