; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,KNL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512,SKX define i1 @allones_v16i8_sign(<16 x i8> %arg) { ; SSE-LABEL: allones_v16i8_sign: ; SSE: # %bb.0: ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allones_v16i8_sign: ; AVX: # %bb.0: ; AVX-NEXT: vpmovmskb %xmm0, %eax ; AVX-NEXT: cmpw $-1, %ax ; AVX-NEXT: sete %al ; AVX-NEXT: retq %tmp = icmp slt <16 x i8> %arg, zeroinitializer %tmp1 = bitcast <16 x i1> %tmp to i16 %tmp2 = icmp eq i16 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v16i8_sign(<16 x i8> %arg) { ; SSE-LABEL: allzeros_v16i8_sign: ; SSE: # %bb.0: ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allzeros_v16i8_sign: ; AVX: # %bb.0: ; AVX-NEXT: vpmovmskb %xmm0, %eax ; AVX-NEXT: testl %eax, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: retq %tmp = icmp slt <16 x i8> %arg, zeroinitializer %tmp1 = bitcast <16 x i1> %tmp to i16 %tmp2 = icmp eq i16 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v32i8_sign(<32 x i8> %arg) { ; SSE-LABEL: allones_v32i8_sign: ; SSE: # %bb.0: ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v32i8_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v32i8_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: allones_v32i8_sign: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovmskb %ymm0, %eax ; AVX512-NEXT: cmpl $-1, %eax ; AVX512-NEXT: sete %al ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %tmp = icmp slt <32 x i8> %arg, zeroinitializer %tmp1 = bitcast <32 x i1> %tmp to i32 %tmp2 = icmp eq i32 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v32i8_sign(<32 x i8> %arg) { ; SSE-LABEL: allzeros_v32i8_sign: ; SSE: # %bb.0: ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v32i8_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v32i8_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: allzeros_v32i8_sign: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovmskb %ymm0, %eax ; AVX512-NEXT: testl %eax, %eax ; AVX512-NEXT: sete %al ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %tmp = icmp slt <32 x i8> %arg, zeroinitializer %tmp1 = bitcast <32 x i1> %tmp to i32 %tmp2 = icmp eq i32 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v64i8_sign(<64 x i8> %arg) { ; SSE-LABEL: allones_v64i8_sign: ; SSE: # %bb.0: ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: pand %xmm3, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v64i8_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v64i8_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v64i8_sign: ; KNL: # %bb.0: ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; KNL-NEXT: vpand %ymm0, %ymm1, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: cmpl $-1, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v64i8_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovb2m %zmm0, %k0 ; SKX-NEXT: kortestq %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <64 x i8> %arg, zeroinitializer %tmp1 = bitcast <64 x i1> %tmp to i64 %tmp2 = icmp eq i64 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v64i8_sign(<64 x i8> %arg) { ; SSE-LABEL: allzeros_v64i8_sign: ; SSE: # %bb.0: ; SSE-NEXT: por %xmm3, %xmm1 ; SSE-NEXT: por %xmm2, %xmm1 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: pmovmskb %xmm1, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v64i8_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v64i8_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v64i8_sign: ; KNL: # %bb.0: ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; KNL-NEXT: vpor %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: testl %eax, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v64i8_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovb2m %zmm0, %k0 ; SKX-NEXT: kortestq %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <64 x i8> %arg, zeroinitializer %tmp1 = bitcast <64 x i1> %tmp to i64 %tmp2 = icmp eq i64 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v8i16_sign(<8 x i16> %arg) { ; SSE-LABEL: allones_v8i16_sign: ; SSE: # %bb.0: ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allones_v8i16_sign: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: cmpb $-1, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allones_v8i16_sign: ; KNL: # %bb.0: ; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v8i16_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovw2m %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: retq %tmp = icmp slt <8 x i16> %arg, zeroinitializer %tmp1 = bitcast <8 x i1> %tmp to i8 %tmp2 = icmp eq i8 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v8i16_sign(<8 x i16> %arg) { ; SSE-LABEL: allzeros_v8i16_sign: ; SSE: # %bb.0: ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v8i16_sign: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: testl $43690, %eax # imm = 0xAAAA ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i16_sign: ; KNL: # %bb.0: ; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i16_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovw2m %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = icmp slt <8 x i16> %arg, zeroinitializer %tmp1 = bitcast <8 x i1> %tmp to i8 %tmp2 = icmp eq i8 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v16i16_sign(<16 x i16> %arg) { ; SSE-LABEL: allones_v16i16_sign: ; SSE: # %bb.0: ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v16i16_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v16i16_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax ; AVX2-NEXT: cmpw $-1, %ax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v16i16_sign: ; KNL: # %bb.0: ; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: setb %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v16i16_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovw2m %ymm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <16 x i16> %arg, zeroinitializer %tmp1 = bitcast <16 x i1> %tmp to i16 %tmp2 = icmp eq i16 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v16i16_sign(<16 x i16> %arg) { ; SSE-LABEL: allzeros_v16i16_sign: ; SSE: # %bb.0: ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v16i16_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v16i16_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl $-1431655766, %eax # imm = 0xAAAAAAAA ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v16i16_sign: ; KNL: # %bb.0: ; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v16i16_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovw2m %ymm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <16 x i16> %arg, zeroinitializer %tmp1 = bitcast <16 x i1> %tmp to i16 %tmp2 = icmp eq i16 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v32i16_sign(<32 x i16> %arg) { ; SSE-LABEL: allones_v32i16_sign: ; SSE: # %bb.0: ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm3, %xmm2 ; SSE-NEXT: pand %xmm0, %xmm2 ; SSE-NEXT: pmovmskb %xmm2, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v32i16_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v32i16_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v32i16_sign: ; KNL: # %bb.0: ; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm2 ; KNL-NEXT: vpmovsxwd %ymm2, %zmm2 ; KNL-NEXT: vptestmd %zmm2, %zmm2, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm0 ; KNL-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %ecx ; KNL-NEXT: andl %eax, %ecx ; KNL-NEXT: cmpl $65535, %ecx # imm = 0xFFFF ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v32i16_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovw2m %zmm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <32 x i16> %arg, zeroinitializer %tmp1 = bitcast <32 x i1> %tmp to i32 %tmp2 = icmp eq i32 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v32i16_sign(<32 x i16> %arg) { ; SSE-LABEL: allzeros_v32i16_sign: ; SSE: # %bb.0: ; SSE-NEXT: packsswb %xmm3, %xmm2 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v32i16_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v32i16_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v32i16_sign: ; KNL: # %bb.0: ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; KNL-NEXT: vpcmpgtw %ymm1, %ymm2, %ymm1 ; KNL-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm0 ; KNL-NEXT: vpor %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v32i16_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovw2m %zmm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <32 x i16> %arg, zeroinitializer %tmp1 = bitcast <32 x i1> %tmp to i32 %tmp2 = icmp eq i32 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v4i32_sign(<4 x i32> %arg) { ; SSE-LABEL: allones_v4i32_sign: ; SSE: # %bb.0: ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: cmpb $15, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allones_v4i32_sign: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskps %xmm0, %eax ; AVX-NEXT: cmpb $15, %al ; AVX-NEXT: sete %al ; AVX-NEXT: retq %tmp = icmp slt <4 x i32> %arg, zeroinitializer %tmp1 = bitcast <4 x i1> %tmp to i4 %tmp2 = icmp eq i4 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v4i32_sign(<4 x i32> %arg) { ; SSE-LABEL: allzeros_v4i32_sign: ; SSE: # %bb.0: ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allzeros_v4i32_sign: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskps %xmm0, %eax ; AVX-NEXT: testl %eax, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: retq %tmp = icmp slt <4 x i32> %arg, zeroinitializer %tmp1 = bitcast <4 x i1> %tmp to i4 %tmp2 = icmp eq i4 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v8i32_sign(<8 x i32> %arg) { ; SSE-LABEL: allones_v8i32_sign: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allones_v8i32_sign: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskps %ymm0, %eax ; AVX-NEXT: cmpb $-1, %al ; AVX-NEXT: sete %al ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %tmp = icmp slt <8 x i32> %arg, zeroinitializer %tmp1 = bitcast <8 x i1> %tmp to i8 %tmp2 = icmp eq i8 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v8i32_sign(<8 x i32> %arg) { ; SSE-LABEL: allzeros_v8i32_sign: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allzeros_v8i32_sign: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskps %ymm0, %eax ; AVX-NEXT: testl %eax, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %tmp = icmp slt <8 x i32> %arg, zeroinitializer %tmp1 = bitcast <8 x i1> %tmp to i8 %tmp2 = icmp eq i8 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v16i32_sign(<16 x i32> %arg) { ; SSE-LABEL: allones_v16i32_sign: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v16i32_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v16i32_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpcmpgtd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpcmpgtd %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v16i32_sign: ; KNL: # %bb.0: ; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtd %zmm0, %zmm1, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: setb %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v16i32_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovd2m %zmm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <16 x i32> %arg, zeroinitializer %tmp1 = bitcast <16 x i1> %tmp to i16 %tmp2 = icmp eq i16 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v16i32_sign(<16 x i32> %arg) { ; SSE-LABEL: allzeros_v16i32_sign: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v16i32_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v16i32_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpcmpgtd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpcmpgtd %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v16i32_sign: ; KNL: # %bb.0: ; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtd %zmm0, %zmm1, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v16i32_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovd2m %zmm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <16 x i32> %arg, zeroinitializer %tmp1 = bitcast <16 x i1> %tmp to i16 %tmp2 = icmp eq i16 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v4i64_sign(<4 x i64> %arg) { ; SSE-LABEL: allones_v4i64_sign: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: cmpb $15, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allones_v4i64_sign: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskpd %ymm0, %eax ; AVX-NEXT: cmpb $15, %al ; AVX-NEXT: sete %al ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %tmp = icmp slt <4 x i64> %arg, zeroinitializer %tmp1 = bitcast <4 x i1> %tmp to i4 %tmp2 = icmp eq i4 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v4i64_sign(<4 x i64> %arg) { ; SSE-LABEL: allzeros_v4i64_sign: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allzeros_v4i64_sign: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskpd %ymm0, %eax ; AVX-NEXT: testl %eax, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %tmp = icmp slt <4 x i64> %arg, zeroinitializer %tmp1 = bitcast <4 x i1> %tmp to i4 %tmp2 = icmp eq i4 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v8i64_sign(<8 x i64> %arg) { ; SSE-LABEL: allones_v8i64_sign: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packssdw %xmm2, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v8i64_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: cmpl $15, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v8i64_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: cmpb $-1, %al ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v8i64_sign: ; KNL: # %bb.0: ; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtq %zmm0, %zmm1, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v8i64_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovq2m %zmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <8 x i64> %arg, zeroinitializer %tmp1 = bitcast <8 x i1> %tmp to i8 %tmp2 = icmp eq i8 %tmp1, -1 ret i1 %tmp2 } define i1 @allzeros_v8i64_sign(<8 x i64> %arg) { ; SSE-LABEL: allzeros_v8i64_sign: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packssdw %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v8i64_sign: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v8i64_sign: ; AVX2: # %bb.0: ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i64_sign: ; KNL: # %bb.0: ; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtq %zmm0, %zmm1, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i64_sign: ; SKX: # %bb.0: ; SKX-NEXT: vpmovq2m %zmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = icmp slt <8 x i64> %arg, zeroinitializer %tmp1 = bitcast <8 x i1> %tmp to i8 %tmp2 = icmp eq i8 %tmp1, 0 ret i1 %tmp2 } define i1 @allones_v16i8_and1(<16 x i8> %arg) { ; SSE-LABEL: allones_v16i8_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllw $7, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allones_v16i8_and1: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: cmpw $-1, %ax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allones_v16i8_and1: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $7, %xmm0, %xmm0 ; KNL-NEXT: vpmovmskb %xmm0, %eax ; KNL-NEXT: cmpw $-1, %ax ; KNL-NEXT: sete %al ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v16i8_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: retq %tmp = and <16 x i8> %arg, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %tmp1 = icmp ne <16 x i8> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v16i8_not(<16 x i8> %a0) { ; SSE2-LABEL: allzeros_v16i8_not: ; SSE2: # %bb.0: ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE2-NEXT: pmovmskb %xmm1, %eax ; SSE2-NEXT: cmpw $-1, %ax ; SSE2-NEXT: setne %al ; SSE2-NEXT: retq ; ; SSE41-LABEL: allzeros_v16i8_not: ; SSE41: # %bb.0: ; SSE41-NEXT: ptest %xmm0, %xmm0 ; SSE41-NEXT: setne %al ; SSE41-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v16i8_not: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vptest %xmm0, %xmm0 ; AVX1OR2-NEXT: setne %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v16i8_not: ; KNL: # %bb.0: ; KNL-NEXT: vptest %xmm0, %xmm0 ; KNL-NEXT: setne %al ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v16i8_not: ; SKX: # %bb.0: ; SKX-NEXT: vptestnmb %xmm0, %xmm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: setae %al ; SKX-NEXT: retq %1 = icmp eq <16 x i8> %a0, zeroinitializer %2 = bitcast <16 x i1> %1 to i16 %3 = icmp ne i16 %2, -1 ret i1 %3 } define i1 @allzeros_v2i64_not(<2 x i64> %a0) { ; SSE2-LABEL: allzeros_v2i64_not: ; SSE2: # %bb.0: ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2] ; SSE2-NEXT: pand %xmm1, %xmm0 ; SSE2-NEXT: movmskpd %xmm0, %eax ; SSE2-NEXT: cmpb $3, %al ; SSE2-NEXT: setne %al ; SSE2-NEXT: retq ; ; SSE41-LABEL: allzeros_v2i64_not: ; SSE41: # %bb.0: ; SSE41-NEXT: ptest %xmm0, %xmm0 ; SSE41-NEXT: setne %al ; SSE41-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v2i64_not: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vptest %xmm0, %xmm0 ; AVX1OR2-NEXT: setne %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v2i64_not: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: setne %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v2i64_not: ; SKX: # %bb.0: ; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $3, %al ; SKX-NEXT: setne %al ; SKX-NEXT: retq %1 = icmp eq <2 x i64> %a0, zeroinitializer %2 = bitcast <2 x i1> %1 to i2 %3 = icmp ne i2 %2, -1 ret i1 %3 } define i1 @allzeros_v8i32_not(<8 x i32> %a0) { ; SSE-LABEL: allzeros_v8i32_not: ; SSE: # %bb.0: ; SSE-NEXT: pxor %xmm2, %xmm2 ; SSE-NEXT: pcmpeqd %xmm2, %xmm1 ; SSE-NEXT: pcmpeqd %xmm2, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: setne %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v8i32_not: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vptest %xmm0, %xmm0 ; AVX1-NEXT: setne %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v8i32_not: ; AVX2: # %bb.0: ; AVX2-NEXT: vptest %ymm0, %ymm0 ; AVX2-NEXT: setne %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i32_not: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: setne %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i32_not: ; SKX: # %bb.0: ; SKX-NEXT: vptestnmd %ymm0, %ymm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setae %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %1 = icmp eq <8 x i32> %a0, zeroinitializer %2 = bitcast <8 x i1> %1 to i8 %3 = icmp ne i8 %2, -1 ret i1 %3 } define i1 @allzeros_v8i64_not(<8 x i64> %a0) { ; SSE2-LABEL: allzeros_v8i64_not: ; SSE2: # %bb.0: ; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm3 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,0,3,2] ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,0,3,2] ; SSE2-NEXT: pand %xmm2, %xmm3 ; SSE2-NEXT: packssdw %xmm5, %xmm3 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,0,3,2] ; SSE2-NEXT: pand %xmm1, %xmm2 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: packssdw %xmm2, %xmm1 ; SSE2-NEXT: packssdw %xmm3, %xmm1 ; SSE2-NEXT: packsswb %xmm1, %xmm1 ; SSE2-NEXT: pmovmskb %xmm1, %eax ; SSE2-NEXT: cmpb $-1, %al ; SSE2-NEXT: setne %al ; SSE2-NEXT: retq ; ; SSE41-LABEL: allzeros_v8i64_not: ; SSE41: # %bb.0: ; SSE41-NEXT: pxor %xmm4, %xmm4 ; SSE41-NEXT: pcmpeqq %xmm4, %xmm3 ; SSE41-NEXT: pcmpeqq %xmm4, %xmm2 ; SSE41-NEXT: packssdw %xmm3, %xmm2 ; SSE41-NEXT: pcmpeqq %xmm4, %xmm1 ; SSE41-NEXT: pcmpeqq %xmm4, %xmm0 ; SSE41-NEXT: packssdw %xmm1, %xmm0 ; SSE41-NEXT: packssdw %xmm2, %xmm0 ; SSE41-NEXT: packsswb %xmm0, %xmm0 ; SSE41-NEXT: pmovmskb %xmm0, %eax ; SSE41-NEXT: cmpb $-1, %al ; SSE41-NEXT: setne %al ; SSE41-NEXT: retq ; ; AVX1-LABEL: allzeros_v8i64_not: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpeqq %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpeqq %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: cmpl $15, %eax ; AVX1-NEXT: setne %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v8i64_not: ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpcmpeqq %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpcmpeqq %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: cmpb $-1, %al ; AVX2-NEXT: setne %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i64_not: ; KNL: # %bb.0: ; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: setne %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i64_not: ; SKX: # %bb.0: ; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setae %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %1 = icmp eq <8 x i64> %a0, zeroinitializer %2 = bitcast <8 x i1> %1 to i8 %3 = icmp ne i8 %2, -1 ret i1 %3 } define i1 @allzeros_v16i8_and1(<16 x i8> %arg) { ; SSE-LABEL: allzeros_v16i8_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllw $7, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v16i8_and1: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: testl %eax, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v16i8_and1: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $7, %xmm0, %xmm0 ; KNL-NEXT: vpmovmskb %xmm0, %eax ; KNL-NEXT: testl %eax, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v16i8_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <16 x i8> %arg, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %tmp1 = icmp ne <16 x i8> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v32i8_and1(<32 x i8> %arg) { ; SSE-LABEL: allones_v32i8_and1: ; SSE: # %bb.0: ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: psllw $7, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v32i8_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v32i8_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v32i8_and1: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $7, %ymm0, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: cmpl $-1, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v32i8_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <32 x i8> %arg, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %tmp1 = icmp ne <32 x i8> %tmp, zeroinitializer %tmp2 = bitcast <32 x i1> %tmp1 to i32 %tmp3 = icmp eq i32 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v32i8_and1(<32 x i8> %arg) { ; SSE-LABEL: allzeros_v32i8_and1: ; SSE: # %bb.0: ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: psllw $7, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v32i8_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v32i8_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v32i8_and1: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $7, %ymm0, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: testl %eax, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v32i8_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <32 x i8> %arg, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %tmp1 = icmp ne <32 x i8> %tmp, zeroinitializer %tmp2 = bitcast <32 x i1> %tmp1 to i32 %tmp3 = icmp eq i32 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v64i8_and1(<64 x i8> %arg) { ; SSE-LABEL: allones_v64i8_and1: ; SSE: # %bb.0: ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: pand %xmm3, %xmm0 ; SSE-NEXT: psllw $7, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v64i8_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v64i8_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v64i8_and1: ; KNL: # %bb.0: ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; KNL-NEXT: vpand %ymm0, %ymm1, %ymm0 ; KNL-NEXT: vpsllw $7, %ymm0, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: cmpl $-1, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v64i8_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0 ; SKX-NEXT: kortestq %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <64 x i8> %arg, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %tmp1 = icmp ne <64 x i8> %tmp, zeroinitializer %tmp2 = bitcast <64 x i1> %tmp1 to i64 %tmp3 = icmp eq i64 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v64i8_and1(<64 x i8> %arg) { ; SSE-LABEL: allzeros_v64i8_and1: ; SSE: # %bb.0: ; SSE-NEXT: por %xmm3, %xmm1 ; SSE-NEXT: por %xmm2, %xmm1 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: psllw $7, %xmm1 ; SSE-NEXT: pmovmskb %xmm1, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v64i8_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v64i8_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v64i8_and1: ; KNL: # %bb.0: ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; KNL-NEXT: vpor %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpsllw $7, %ymm0, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: testl %eax, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v64i8_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0 ; SKX-NEXT: kortestq %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <64 x i8> %arg, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %tmp1 = icmp ne <64 x i8> %tmp, zeroinitializer %tmp2 = bitcast <64 x i1> %tmp1 to i64 %tmp3 = icmp eq i64 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v8i16_and1(<8 x i16> %arg) { ; SSE-LABEL: allones_v8i16_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllw $15, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allones_v8i16_and1: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $15, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: cmpb $-1, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allones_v8i16_and1: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $15, %xmm0, %xmm0 ; KNL-NEXT: vpsraw $15, %xmm0, %xmm0 ; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v8i16_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: retq %tmp = and <8 x i16> %arg, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %tmp1 = icmp ne <8 x i16> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v8i16_and1(<8 x i16> %arg) { ; SSE-LABEL: allzeros_v8i16_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllw $15, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v8i16_and1: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $15, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: testl $43690, %eax # imm = 0xAAAA ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i16_and1: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $15, %xmm0, %xmm0 ; KNL-NEXT: vpsraw $15, %xmm0, %xmm0 ; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i16_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <8 x i16> %arg, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %tmp1 = icmp ne <8 x i16> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v16i16_and1(<16 x i16> %arg) { ; SSE-LABEL: allones_v16i16_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllw $15, %xmm1 ; SSE-NEXT: psllw $15, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v16i16_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v16i16_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax ; AVX2-NEXT: cmpw $-1, %ax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v16i16_and1: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $15, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: setb %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v16i16_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <16 x i16> %arg, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %tmp1 = icmp ne <16 x i16> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, -1 ret i1 %tmp3 } define i1 @allones_v32i16_and1(<32 x i16> %arg) { ; SSE-LABEL: allones_v32i16_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllw $15, %xmm1 ; SSE-NEXT: psllw $15, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: psllw $15, %xmm3 ; SSE-NEXT: psllw $15, %xmm2 ; SSE-NEXT: packsswb %xmm3, %xmm2 ; SSE-NEXT: pand %xmm0, %xmm2 ; SSE-NEXT: pmovmskb %xmm2, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v32i16_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2 ; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2 ; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1 ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v32i16_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $15, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0 ; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v32i16_and1: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $15, %ymm0, %ymm1 ; KNL-NEXT: vpsraw $15, %ymm1, %ymm1 ; KNL-NEXT: vpmovsxwd %ymm1, %zmm1 ; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm0 ; KNL-NEXT: vpsllw $15, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %ecx ; KNL-NEXT: andl %eax, %ecx ; KNL-NEXT: cmpl $65535, %ecx # imm = 0xFFFF ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v32i16_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <32 x i16> %arg, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %tmp1 = icmp ne <32 x i16> %tmp, zeroinitializer %tmp2 = bitcast <32 x i1> %tmp1 to i32 %tmp3 = icmp eq i32 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v32i16_and1(<32 x i16> %arg) { ; SSE-LABEL: allzeros_v32i16_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllw $15, %xmm3 ; SSE-NEXT: psllw $15, %xmm2 ; SSE-NEXT: packsswb %xmm3, %xmm2 ; SSE-NEXT: psllw $15, %xmm1 ; SSE-NEXT: psllw $15, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v32i16_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2 ; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1 ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2 ; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v32i16_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $15, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0 ; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v32i16_and1: ; KNL: # %bb.0: ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; KNL-NEXT: vpor %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpsllw $15, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v32i16_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <32 x i16> %arg, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %tmp1 = icmp ne <32 x i16> %tmp, zeroinitializer %tmp2 = bitcast <32 x i1> %tmp1 to i32 %tmp3 = icmp eq i32 %tmp2, 0 ret i1 %tmp3 } define i1 @allzeros_v16i16_and1(<16 x i16> %arg) { ; SSE-LABEL: allzeros_v16i16_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllw $15, %xmm1 ; SSE-NEXT: psllw $15, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v16i16_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v16i16_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl $-1431655766, %eax # imm = 0xAAAAAAAA ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v16i16_and1: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $15, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v16i16_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <16 x i16> %arg, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %tmp1 = icmp ne <16 x i16> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v4i32_and1(<4 x i32> %arg) { ; SSE-LABEL: allones_v4i32_and1: ; SSE: # %bb.0: ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: cmpb $15, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allones_v4i32_and1: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax ; AVX1OR2-NEXT: cmpb $15, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allones_v4i32_and1: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vptestnmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v4i32_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $15, %al ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <4 x i32> %arg, <i32 1, i32 1, i32 1, i32 1> %tmp1 = icmp ne <4 x i32> %tmp, zeroinitializer %tmp2 = bitcast <4 x i1> %tmp1 to i4 %tmp3 = icmp eq i4 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v4i32_and1(<4 x i32> %arg) { ; SSE-LABEL: allzeros_v4i32_and1: ; SSE: # %bb.0: ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v4i32_and1: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax ; AVX1OR2-NEXT: testl %eax, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v4i32_and1: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v4i32_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <4 x i32> %arg, <i32 1, i32 1, i32 1, i32 1> %tmp1 = icmp ne <4 x i32> %tmp, zeroinitializer %tmp2 = bitcast <4 x i1> %tmp1 to i4 %tmp3 = icmp eq i4 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v8i32_and1(<8 x i32> %arg) { ; SSE-LABEL: allones_v8i32_and1: ; SSE: # %bb.0: ; SSE-NEXT: pslld $31, %xmm1 ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v8i32_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: cmpl $15, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v8i32_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: cmpb $-1, %al ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v8i32_and1: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v8i32_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <8 x i32> %arg, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %tmp1 = icmp ne <8 x i32> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v8i32_and1(<8 x i32> %arg) { ; SSE-LABEL: allzeros_v8i32_and1: ; SSE: # %bb.0: ; SSE-NEXT: pslld $31, %xmm1 ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v8i32_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v8i32_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i32_and1: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i32_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <8 x i32> %arg, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %tmp1 = icmp ne <8 x i32> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v16i32_and1(<16 x i32> %arg) { ; SSE-LABEL: allones_v16i32_and1: ; SSE: # %bb.0: ; SSE-NEXT: pslld $31, %xmm3 ; SSE-NEXT: pslld $31, %xmm2 ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: pslld $31, %xmm1 ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v16i32_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpslld $31, %xmm2, %xmm2 ; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpslld $31, %xmm2, %xmm2 ; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v16i32_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 ; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1 ; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: allones_v16i32_and1: ; AVX512: # %bb.0: ; AVX512-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; AVX512-NEXT: kortestw %k0, %k0 ; AVX512-NEXT: setb %al ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %tmp = and <16 x i32> %arg, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %tmp1 = icmp ne <16 x i32> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v16i32_and1(<16 x i32> %arg) { ; SSE-LABEL: allzeros_v16i32_and1: ; SSE: # %bb.0: ; SSE-NEXT: pslld $31, %xmm3 ; SSE-NEXT: pslld $31, %xmm2 ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: pslld $31, %xmm1 ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v16i32_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpslld $31, %xmm2, %xmm2 ; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpslld $31, %xmm2, %xmm2 ; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v16i32_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 ; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1 ; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: allzeros_v16i32_and1: ; AVX512: # %bb.0: ; AVX512-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; AVX512-NEXT: kortestw %k0, %k0 ; AVX512-NEXT: sete %al ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %tmp = and <16 x i32> %arg, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %tmp1 = icmp ne <16 x i32> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v2i64_and1(<2 x i64> %arg) { ; SSE-LABEL: allones_v2i64_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllq $63, %xmm0 ; SSE-NEXT: movmskpd %xmm0, %eax ; SSE-NEXT: cmpb $3, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allones_v2i64_and1: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: cmpb $3, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allones_v2i64_and1: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1] ; KNL-NEXT: vptestnmq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v2i64_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $3, %al ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <2 x i64> %arg, <i64 1, i64 1> %tmp1 = icmp ne <2 x i64> %tmp, zeroinitializer %tmp2 = bitcast <2 x i1> %tmp1 to i2 %tmp3 = icmp eq i2 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v2i64_and1(<2 x i64> %arg) { ; SSE-LABEL: allzeros_v2i64_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllq $63, %xmm0 ; SSE-NEXT: movmskpd %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v2i64_and1: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: testl %eax, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v2i64_and1: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1] ; KNL-NEXT: vptestmq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v2i64_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <2 x i64> %arg, <i64 1, i64 1> %tmp1 = icmp ne <2 x i64> %tmp, zeroinitializer %tmp2 = bitcast <2 x i1> %tmp1 to i2 %tmp3 = icmp eq i2 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v4i64_and1(<4 x i64> %arg) { ; SSE-LABEL: allones_v4i64_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllq $63, %xmm1 ; SSE-NEXT: psllq $63, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: cmpb $15, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v4i64_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax ; AVX1-NEXT: cmpl $3, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v4i64_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $63, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskpd %ymm0, %eax ; AVX2-NEXT: cmpb $15, %al ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v4i64_and1: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestnmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v4i64_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $15, %al ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <4 x i64> %arg, <i64 1, i64 1, i64 1, i64 1> %tmp1 = icmp ne <4 x i64> %tmp, zeroinitializer %tmp2 = bitcast <4 x i1> %tmp1 to i4 %tmp3 = icmp eq i4 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v4i64_and1(<4 x i64> %arg) { ; SSE-LABEL: allzeros_v4i64_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllq $63, %xmm1 ; SSE-NEXT: psllq $63, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v4i64_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v4i64_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $63, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskpd %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v4i64_and1: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v4i64_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <4 x i64> %arg, <i64 1, i64 1, i64 1, i64 1> %tmp1 = icmp ne <4 x i64> %tmp, zeroinitializer %tmp2 = bitcast <4 x i1> %tmp1 to i4 %tmp3 = icmp eq i4 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v8i64_and1(<8 x i64> %arg) { ; SSE-LABEL: allones_v8i64_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllq $63, %xmm3 ; SSE-NEXT: psllq $63, %xmm2 ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: psllq $63, %xmm1 ; SSE-NEXT: psllq $63, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packssdw %xmm2, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v8i64_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpsllq $63, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $63, %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsllq $63, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: cmpl $15, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v8i64_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $63, %ymm1, %ymm1 ; AVX2-NEXT: vpsllq $63, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: cmpb $-1, %al ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v8i64_and1: ; KNL: # %bb.0: ; KNL-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v8i64_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <8 x i64> %arg, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %tmp1 = icmp ne <8 x i64> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v8i64_and1(<8 x i64> %arg) { ; SSE-LABEL: allzeros_v8i64_and1: ; SSE: # %bb.0: ; SSE-NEXT: psllq $63, %xmm3 ; SSE-NEXT: psllq $63, %xmm2 ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: psllq $63, %xmm1 ; SSE-NEXT: psllq $63, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packssdw %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v8i64_and1: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpsllq $63, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $63, %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsllq $63, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v8i64_and1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $63, %ymm1, %ymm1 ; AVX2-NEXT: vpsllq $63, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i64_and1: ; KNL: # %bb.0: ; KNL-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i64_and1: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <8 x i64> %arg, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %tmp1 = icmp ne <8 x i64> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v16i8_and4(<16 x i8> %arg) { ; SSE-LABEL: allones_v16i8_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllw $5, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allones_v16i8_and4: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $5, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: cmpw $-1, %ax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allones_v16i8_and4: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $5, %xmm0, %xmm0 ; KNL-NEXT: vpmovmskb %xmm0, %eax ; KNL-NEXT: cmpw $-1, %ax ; KNL-NEXT: sete %al ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v16i8_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: retq %tmp = and <16 x i8> %arg, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4> %tmp1 = icmp ne <16 x i8> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v16i8_and4(<16 x i8> %arg) { ; SSE-LABEL: allzeros_v16i8_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllw $5, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v16i8_and4: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $5, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: testl %eax, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v16i8_and4: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $5, %xmm0, %xmm0 ; KNL-NEXT: vpmovmskb %xmm0, %eax ; KNL-NEXT: testl %eax, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v16i8_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <16 x i8> %arg, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4> %tmp1 = icmp ne <16 x i8> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v32i8_and4(<32 x i8> %arg) { ; SSE-LABEL: allones_v32i8_and4: ; SSE: # %bb.0: ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: psllw $5, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v32i8_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpsllw $5, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v32i8_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $5, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v32i8_and4: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $5, %ymm0, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: cmpl $-1, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v32i8_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <32 x i8> %arg, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4> %tmp1 = icmp ne <32 x i8> %tmp, zeroinitializer %tmp2 = bitcast <32 x i1> %tmp1 to i32 %tmp3 = icmp eq i32 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v32i8_and4(<32 x i8> %arg) { ; SSE-LABEL: allzeros_v32i8_and4: ; SSE: # %bb.0: ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: psllw $5, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v32i8_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsllw $5, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v32i8_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $5, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v32i8_and4: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $5, %ymm0, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: testl %eax, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v32i8_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <32 x i8> %arg, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4> %tmp1 = icmp ne <32 x i8> %tmp, zeroinitializer %tmp2 = bitcast <32 x i1> %tmp1 to i32 %tmp3 = icmp eq i32 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v64i8_and4(<64 x i8> %arg) { ; SSE-LABEL: allones_v64i8_and4: ; SSE: # %bb.0: ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: pand %xmm3, %xmm0 ; SSE-NEXT: psllw $5, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v64i8_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vpsllw $5, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v64i8_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpsllw $5, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v64i8_and4: ; KNL: # %bb.0: ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; KNL-NEXT: vpand %ymm0, %ymm1, %ymm0 ; KNL-NEXT: vpsllw $5, %ymm0, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: cmpl $-1, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v64i8_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0 ; SKX-NEXT: kortestq %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <64 x i8> %arg, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4> %tmp1 = icmp ne <64 x i8> %tmp, zeroinitializer %tmp2 = bitcast <64 x i1> %tmp1 to i64 %tmp3 = icmp eq i64 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v64i8_and4(<64 x i8> %arg) { ; SSE-LABEL: allzeros_v64i8_and4: ; SSE: # %bb.0: ; SSE-NEXT: por %xmm3, %xmm1 ; SSE-NEXT: por %xmm2, %xmm1 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: psllw $5, %xmm1 ; SSE-NEXT: pmovmskb %xmm1, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v64i8_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsllw $5, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v64i8_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpsllw $5, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v64i8_and4: ; KNL: # %bb.0: ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; KNL-NEXT: vpor %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpsllw $5, %ymm0, %ymm0 ; KNL-NEXT: vpmovmskb %ymm0, %eax ; KNL-NEXT: testl %eax, %eax ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v64i8_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0 ; SKX-NEXT: kortestq %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <64 x i8> %arg, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4> %tmp1 = icmp ne <64 x i8> %tmp, zeroinitializer %tmp2 = bitcast <64 x i1> %tmp1 to i64 %tmp3 = icmp eq i64 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v8i16_and4(<8 x i16> %arg) { ; SSE-LABEL: allones_v8i16_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllw $13, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allones_v8i16_and4: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $13, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: cmpb $-1, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allones_v8i16_and4: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $13, %xmm0, %xmm0 ; KNL-NEXT: vpsraw $15, %xmm0, %xmm0 ; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v8i16_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: retq %tmp = and <8 x i16> %arg, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> %tmp1 = icmp ne <8 x i16> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v8i16_and4(<8 x i16> %arg) { ; SSE-LABEL: allzeros_v8i16_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllw $13, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v8i16_and4: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $13, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: testl $43690, %eax # imm = 0xAAAA ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i16_and4: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $13, %xmm0, %xmm0 ; KNL-NEXT: vpsraw $15, %xmm0, %xmm0 ; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i16_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <8 x i16> %arg, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> %tmp1 = icmp ne <8 x i16> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v16i16_and4(<16 x i16> %arg) { ; SSE-LABEL: allones_v16i16_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllw $13, %xmm1 ; SSE-NEXT: psllw $13, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v16i16_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsllw $13, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $13, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v16i16_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $13, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax ; AVX2-NEXT: cmpw $-1, %ax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v16i16_and4: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $13, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: setb %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v16i16_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <16 x i16> %arg, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> %tmp1 = icmp ne <16 x i16> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, -1 ret i1 %tmp3 } define i1 @allones_v32i16_and4(<32 x i16> %arg) { ; SSE-LABEL: allones_v32i16_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllw $13, %xmm1 ; SSE-NEXT: psllw $13, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: psllw $13, %xmm3 ; SSE-NEXT: psllw $13, %xmm2 ; SSE-NEXT: packsswb %xmm3, %xmm2 ; SSE-NEXT: pand %xmm0, %xmm2 ; SSE-NEXT: pmovmskb %xmm2, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v32i16_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsllw $13, %xmm2, %xmm2 ; AVX1-NEXT: vpsllw $13, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpsllw $13, %xmm2, %xmm2 ; AVX1-NEXT: vpsllw $13, %xmm1, %xmm1 ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v32i16_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $13, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $13, %ymm0, %ymm0 ; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v32i16_and4: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $13, %ymm0, %ymm1 ; KNL-NEXT: vpsraw $15, %ymm1, %ymm1 ; KNL-NEXT: vpmovsxwd %ymm1, %zmm1 ; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm0 ; KNL-NEXT: vpsllw $13, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %ecx ; KNL-NEXT: andl %eax, %ecx ; KNL-NEXT: cmpl $65535, %ecx # imm = 0xFFFF ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v32i16_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <32 x i16> %arg, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> %tmp1 = icmp ne <32 x i16> %tmp, zeroinitializer %tmp2 = bitcast <32 x i1> %tmp1 to i32 %tmp3 = icmp eq i32 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v32i16_and4(<32 x i16> %arg) { ; SSE-LABEL: allzeros_v32i16_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllw $13, %xmm3 ; SSE-NEXT: psllw $13, %xmm2 ; SSE-NEXT: packsswb %xmm3, %xmm2 ; SSE-NEXT: psllw $13, %xmm1 ; SSE-NEXT: psllw $13, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v32i16_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpsllw $13, %xmm2, %xmm2 ; AVX1-NEXT: vpsllw $13, %xmm1, %xmm1 ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsllw $13, %xmm2, %xmm2 ; AVX1-NEXT: vpsllw $13, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v32i16_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $13, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $13, %ymm0, %ymm0 ; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v32i16_and4: ; KNL: # %bb.0: ; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; KNL-NEXT: vpor %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpsllw $13, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v32i16_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0 ; SKX-NEXT: kortestd %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <32 x i16> %arg, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> %tmp1 = icmp ne <32 x i16> %tmp, zeroinitializer %tmp2 = bitcast <32 x i1> %tmp1 to i32 %tmp3 = icmp eq i32 %tmp2, 0 ret i1 %tmp3 } define i1 @allzeros_v16i16_and4(<16 x i16> %arg) { ; SSE-LABEL: allzeros_v16i16_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllw $13, %xmm1 ; SSE-NEXT: psllw $13, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v16i16_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsllw $13, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $13, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v16i16_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $13, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl $-1431655766, %eax # imm = 0xAAAAAAAA ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v16i16_and4: ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $13, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 ; KNL-NEXT: vpmovsxwd %ymm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kortestw %k0, %k0 ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v16i16_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %k0 ; SKX-NEXT: kortestw %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <16 x i16> %arg, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> %tmp1 = icmp ne <16 x i16> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v4i32_and4(<4 x i32> %arg) { ; SSE-LABEL: allones_v4i32_and4: ; SSE: # %bb.0: ; SSE-NEXT: pslld $29, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: cmpb $15, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allones_v4i32_and4: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpslld $29, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax ; AVX1OR2-NEXT: cmpb $15, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allones_v4i32_and4: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vptestnmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v4i32_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $15, %al ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <4 x i32> %arg, <i32 4, i32 4, i32 4, i32 4> %tmp1 = icmp ne <4 x i32> %tmp, zeroinitializer %tmp2 = bitcast <4 x i1> %tmp1 to i4 %tmp3 = icmp eq i4 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v4i32_and4(<4 x i32> %arg) { ; SSE-LABEL: allzeros_v4i32_and4: ; SSE: # %bb.0: ; SSE-NEXT: pslld $29, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v4i32_and4: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpslld $29, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax ; AVX1OR2-NEXT: testl %eax, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v4i32_and4: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v4i32_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <4 x i32> %arg, <i32 4, i32 4, i32 4, i32 4> %tmp1 = icmp ne <4 x i32> %tmp, zeroinitializer %tmp2 = bitcast <4 x i1> %tmp1 to i4 %tmp3 = icmp eq i4 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v8i32_and4(<8 x i32> %arg) { ; SSE-LABEL: allones_v8i32_and4: ; SSE: # %bb.0: ; SSE-NEXT: pslld $29, %xmm1 ; SSE-NEXT: pslld $29, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v8i32_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpslld $29, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: cmpl $15, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v8i32_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $29, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: cmpb $-1, %al ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v8i32_and4: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v8i32_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <8 x i32> %arg, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4> %tmp1 = icmp ne <8 x i32> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v8i32_and4(<8 x i32> %arg) { ; SSE-LABEL: allzeros_v8i32_and4: ; SSE: # %bb.0: ; SSE-NEXT: pslld $29, %xmm1 ; SSE-NEXT: pslld $29, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v8i32_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpslld $29, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v8i32_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $29, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i32_and4: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i32_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <8 x i32> %arg, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4> %tmp1 = icmp ne <8 x i32> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v16i32_and4(<16 x i32> %arg) { ; SSE-LABEL: allones_v16i32_and4: ; SSE: # %bb.0: ; SSE-NEXT: pslld $29, %xmm3 ; SSE-NEXT: pslld $29, %xmm2 ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: pslld $29, %xmm1 ; SSE-NEXT: pslld $29, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpw $-1, %ax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v16i32_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpslld $29, %xmm2, %xmm2 ; AVX1-NEXT: vpslld $29, %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpslld $29, %xmm2, %xmm2 ; AVX1-NEXT: vpslld $29, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: cmpw $-1, %ax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v16i32_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $29, %ymm1, %ymm1 ; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1 ; AVX2-NEXT: vpslld $29, %ymm0, %ymm0 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: cmpl $-1, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: allones_v16i32_and4: ; AVX512: # %bb.0: ; AVX512-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; AVX512-NEXT: kortestw %k0, %k0 ; AVX512-NEXT: setb %al ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %tmp = and <16 x i32> %arg, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4> %tmp1 = icmp ne <16 x i32> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v16i32_and4(<16 x i32> %arg) { ; SSE-LABEL: allzeros_v16i32_and4: ; SSE: # %bb.0: ; SSE-NEXT: pslld $29, %xmm3 ; SSE-NEXT: pslld $29, %xmm2 ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: pslld $29, %xmm1 ; SSE-NEXT: pslld $29, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v16i32_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpslld $29, %xmm2, %xmm2 ; AVX1-NEXT: vpslld $29, %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpslld $29, %xmm2, %xmm2 ; AVX1-NEXT: vpslld $29, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v16i32_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $29, %ymm1, %ymm1 ; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1 ; AVX2-NEXT: vpslld $29, %ymm0, %ymm0 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: allzeros_v16i32_and4: ; AVX512: # %bb.0: ; AVX512-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0 ; AVX512-NEXT: kortestw %k0, %k0 ; AVX512-NEXT: sete %al ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %tmp = and <16 x i32> %arg, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4> %tmp1 = icmp ne <16 x i32> %tmp, zeroinitializer %tmp2 = bitcast <16 x i1> %tmp1 to i16 %tmp3 = icmp eq i16 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v2i64_and4(<2 x i64> %arg) { ; SSE-LABEL: allones_v2i64_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllq $61, %xmm0 ; SSE-NEXT: movmskpd %xmm0, %eax ; SSE-NEXT: cmpb $3, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allones_v2i64_and4: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllq $61, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: cmpb $3, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allones_v2i64_and4: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4] ; KNL-NEXT: vptestnmq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v2i64_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $3, %al ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <2 x i64> %arg, <i64 4, i64 4> %tmp1 = icmp ne <2 x i64> %tmp, zeroinitializer %tmp2 = bitcast <2 x i1> %tmp1 to i2 %tmp3 = icmp eq i2 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v2i64_and4(<2 x i64> %arg) { ; SSE-LABEL: allzeros_v2i64_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllq $61, %xmm0 ; SSE-NEXT: movmskpd %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: allzeros_v2i64_and4: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllq $61, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: testl %eax, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: allzeros_v2i64_and4: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4] ; KNL-NEXT: vptestmq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v2i64_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: retq %tmp = and <2 x i64> %arg, <i64 4, i64 4> %tmp1 = icmp ne <2 x i64> %tmp, zeroinitializer %tmp2 = bitcast <2 x i1> %tmp1 to i2 %tmp3 = icmp eq i2 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v4i64_and4(<4 x i64> %arg) { ; SSE-LABEL: allones_v4i64_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllq $61, %xmm1 ; SSE-NEXT: psllq $61, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: cmpb $15, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v4i64_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsllq $61, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax ; AVX1-NEXT: cmpl $3, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v4i64_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $61, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskpd %ymm0, %eax ; AVX2-NEXT: cmpb $15, %al ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v4i64_and4: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestnmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v4i64_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $15, %al ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <4 x i64> %arg, <i64 4, i64 4, i64 4, i64 4> %tmp1 = icmp ne <4 x i64> %tmp, zeroinitializer %tmp2 = bitcast <4 x i1> %tmp1 to i4 %tmp3 = icmp eq i4 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v4i64_and4(<4 x i64> %arg) { ; SSE-LABEL: allzeros_v4i64_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllq $61, %xmm1 ; SSE-NEXT: psllq $61, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v4i64_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsllq $61, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v4i64_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $61, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskpd %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v4i64_and4: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; KNL-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $15, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v4i64_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <4 x i64> %arg, <i64 4, i64 4, i64 4, i64 4> %tmp1 = icmp ne <4 x i64> %tmp, zeroinitializer %tmp2 = bitcast <4 x i1> %tmp1 to i4 %tmp3 = icmp eq i4 %tmp2, 0 ret i1 %tmp3 } define i1 @allones_v8i64_and4(<8 x i64> %arg) { ; SSE-LABEL: allones_v8i64_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllq $61, %xmm3 ; SSE-NEXT: psllq $61, %xmm2 ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: psllq $61, %xmm1 ; SSE-NEXT: psllq $61, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packssdw %xmm2, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: cmpb $-1, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allones_v8i64_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpsllq $61, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $61, %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsllq $61, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $61, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: cmpl $15, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allones_v8i64_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $61, %ymm1, %ymm1 ; AVX2-NEXT: vpsllq $61, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: cmpb $-1, %al ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allones_v8i64_and4: ; KNL: # %bb.0: ; KNL-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: cmpb $-1, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allones_v8i64_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setb %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <8 x i64> %arg, <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4> %tmp1 = icmp ne <8 x i64> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, -1 ret i1 %tmp3 } define i1 @allzeros_v8i64_and4(<8 x i64> %arg) { ; SSE-LABEL: allzeros_v8i64_and4: ; SSE: # %bb.0: ; SSE-NEXT: psllq $61, %xmm3 ; SSE-NEXT: psllq $61, %xmm2 ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: psllq $61, %xmm1 ; SSE-NEXT: psllq $61, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packssdw %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: testl $43690, %eax # imm = 0xAAAA ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1-LABEL: allzeros_v8i64_and4: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpsllq $61, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $61, %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsllq $61, %xmm2, %xmm2 ; AVX1-NEXT: vpsllq $61, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskps %xmm0, %eax ; AVX1-NEXT: testl %eax, %eax ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: allzeros_v8i64_and4: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $61, %ymm1, %ymm1 ; AVX2-NEXT: vpsllq $61, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax ; AVX2-NEXT: testl %eax, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; KNL-LABEL: allzeros_v8i64_and4: ; KNL: # %bb.0: ; KNL-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: allzeros_v8i64_and4: ; SKX: # %bb.0: ; SKX-NEXT: vptestmq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: sete %al ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %tmp = and <8 x i64> %arg, <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4> %tmp1 = icmp ne <8 x i64> %tmp, zeroinitializer %tmp2 = bitcast <8 x i1> %tmp1 to i8 %tmp3 = icmp eq i8 %tmp2, 0 ret i1 %tmp3 } ; The below are IR patterns that should directly represent the behavior of a ; MOVMSK instruction. define i32 @movmskpd(<2 x double> %x) { ; SSE-LABEL: movmskpd: ; SSE: # %bb.0: ; SSE-NEXT: movmskpd %xmm0, %eax ; SSE-NEXT: retq ; ; AVX-LABEL: movmskpd: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskpd %xmm0, %eax ; AVX-NEXT: retq %a = bitcast <2 x double> %x to <2 x i64> %b = icmp slt <2 x i64> %a, zeroinitializer %c = bitcast <2 x i1> %b to i2 %d = zext i2 %c to i32 ret i32 %d } define i32 @movmskps(<4 x float> %x) { ; SSE-LABEL: movmskps: ; SSE: # %bb.0: ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: retq ; ; AVX-LABEL: movmskps: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskps %xmm0, %eax ; AVX-NEXT: retq %a = bitcast <4 x float> %x to <4 x i32> %b = icmp slt <4 x i32> %a, zeroinitializer %c = bitcast <4 x i1> %b to i4 %d = zext i4 %c to i32 ret i32 %d } define i32 @movmskpd256(<4 x double> %x) { ; SSE-LABEL: movmskpd256: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: retq ; ; AVX-LABEL: movmskpd256: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskpd %ymm0, %eax ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %a = bitcast <4 x double> %x to <4 x i64> %b = icmp slt <4 x i64> %a, zeroinitializer %c = bitcast <4 x i1> %b to i4 %d = zext i4 %c to i32 ret i32 %d } define i32 @movmskps256(<8 x float> %x) { ; SSE-LABEL: movmskps256: ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: retq ; ; AVX-LABEL: movmskps256: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskps %ymm0, %eax ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %a = bitcast <8 x float> %x to <8 x i32> %b = icmp slt <8 x i32> %a, zeroinitializer %c = bitcast <8 x i1> %b to i8 %d = zext i8 %c to i32 ret i32 %d } define i32 @movmskb(<16 x i8> %x) { ; SSE-LABEL: movmskb: ; SSE: # %bb.0: ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: retq ; ; AVX-LABEL: movmskb: ; AVX: # %bb.0: ; AVX-NEXT: vpmovmskb %xmm0, %eax ; AVX-NEXT: retq %a = icmp slt <16 x i8> %x, zeroinitializer %b = bitcast <16 x i1> %a to i16 %c = zext i16 %b to i32 ret i32 %c } define i32 @movmskb256(<32 x i8> %x) { ; SSE-LABEL: movmskb256: ; SSE: # %bb.0: ; SSE-NEXT: pmovmskb %xmm0, %ecx ; SSE-NEXT: pmovmskb %xmm1, %eax ; SSE-NEXT: shll $16, %eax ; SSE-NEXT: orl %ecx, %eax ; SSE-NEXT: retq ; ; AVX1-LABEL: movmskb256: ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovmskb %xmm0, %ecx ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax ; AVX1-NEXT: shll $16, %eax ; AVX1-NEXT: orl %ecx, %eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: movmskb256: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovmskb %ymm0, %eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: movmskb256: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovmskb %ymm0, %eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %a = icmp slt <32 x i8> %x, zeroinitializer %b = bitcast <32 x i1> %a to i32 ret i32 %b } ; Multiple extract elements from a vector compare. define i1 @movmsk_v16i8(<16 x i8> %x, <16 x i8> %y) { ; SSE-LABEL: movmsk_v16i8: ; SSE: # %bb.0: ; SSE-NEXT: pcmpeqb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: movl %eax, %ecx ; SSE-NEXT: shrl $15, %ecx ; SSE-NEXT: movl %eax, %edx ; SSE-NEXT: shrl $8, %edx ; SSE-NEXT: andl $1, %edx ; SSE-NEXT: andl $8, %eax ; SSE-NEXT: shrl $3, %eax ; SSE-NEXT: xorl %edx, %eax ; SSE-NEXT: andl %ecx, %eax ; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v16i8: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: movl %eax, %ecx ; AVX1OR2-NEXT: shrl $15, %ecx ; AVX1OR2-NEXT: movl %eax, %edx ; AVX1OR2-NEXT: shrl $8, %edx ; AVX1OR2-NEXT: andl $1, %edx ; AVX1OR2-NEXT: andl $8, %eax ; AVX1OR2-NEXT: shrl $3, %eax ; AVX1OR2-NEXT: xorl %edx, %eax ; AVX1OR2-NEXT: andl %ecx, %eax ; AVX1OR2-NEXT: # kill: def $al killed $al killed $eax ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v16i8: ; KNL: # %bb.0: ; KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kshiftrw $15, %k0, %k1 ; KNL-NEXT: kmovw %k1, %ecx ; KNL-NEXT: kshiftrw $8, %k0, %k1 ; KNL-NEXT: kmovw %k1, %edx ; KNL-NEXT: kshiftrw $3, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: xorb %dl, %al ; KNL-NEXT: andb %cl, %al ; KNL-NEXT: # kill: def $al killed $al killed $eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v16i8: ; SKX: # %bb.0: ; SKX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ; SKX-NEXT: kshiftrw $15, %k0, %k1 ; SKX-NEXT: kmovd %k1, %ecx ; SKX-NEXT: kshiftrw $8, %k0, %k1 ; SKX-NEXT: kmovd %k1, %edx ; SKX-NEXT: kshiftrw $3, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: xorb %dl, %al ; SKX-NEXT: andb %cl, %al ; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: retq %cmp = icmp eq <16 x i8> %x, %y %e1 = extractelement <16 x i1> %cmp, i32 3 %e2 = extractelement <16 x i1> %cmp, i32 8 %e3 = extractelement <16 x i1> %cmp, i32 15 %u1 = xor i1 %e1, %e2 %u2 = and i1 %e3, %u1 ret i1 %u2 } define i1 @movmsk_v8i16(<8 x i16> %x, <8 x i16> %y) { ; SSE-LABEL: movmsk_v8i16: ; SSE: # %bb.0: ; SSE-NEXT: pcmpgtw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: notb %al ; SSE-NEXT: testb $-109, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v8i16: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: notb %al ; AVX1OR2-NEXT: testb $-109, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v8i16: ; KNL: # %bb.0: ; KNL-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 ; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $-109, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v8i16: ; SKX: # %bb.0: ; SKX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ; SKX-NEXT: knotb %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: testb $-109, %al ; SKX-NEXT: sete %al ; SKX-NEXT: retq %cmp = icmp sgt <8 x i16> %x, %y %e1 = extractelement <8 x i1> %cmp, i32 0 %e2 = extractelement <8 x i1> %cmp, i32 1 %e3 = extractelement <8 x i1> %cmp, i32 7 %e4 = extractelement <8 x i1> %cmp, i32 4 %u1 = and i1 %e1, %e2 %u2 = and i1 %e3, %e4 %u3 = and i1 %u1, %u2 ret i1 %u3 } ; TODO: Replace shift+mask chain with AND+CMP. define i1 @movmsk_v4i32(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: movmsk_v4i32: ; SSE: # %bb.0: ; SSE-NEXT: pcmpgtd %xmm0, %xmm1 ; SSE-NEXT: movmskps %xmm1, %eax ; SSE-NEXT: movl %eax, %ecx ; SSE-NEXT: shrb $3, %cl ; SSE-NEXT: andb $4, %al ; SSE-NEXT: shrb $2, %al ; SSE-NEXT: xorb %cl, %al ; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v4i32: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax ; AVX1OR2-NEXT: movl %eax, %ecx ; AVX1OR2-NEXT: shrb $3, %cl ; AVX1OR2-NEXT: andb $4, %al ; AVX1OR2-NEXT: shrb $2, %al ; AVX1OR2-NEXT: xorb %cl, %al ; AVX1OR2-NEXT: # kill: def $al killed $al killed $eax ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v4i32: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpgtd %zmm0, %zmm1, %k0 ; KNL-NEXT: kshiftrw $3, %k0, %k1 ; KNL-NEXT: kmovw %k1, %ecx ; KNL-NEXT: kshiftrw $2, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: xorb %cl, %al ; KNL-NEXT: # kill: def $al killed $al killed $eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v4i32: ; SKX: # %bb.0: ; SKX-NEXT: vpcmpgtd %xmm0, %xmm1, %k0 ; SKX-NEXT: kshiftrb $3, %k0, %k1 ; SKX-NEXT: kmovd %k1, %ecx ; SKX-NEXT: kshiftrb $2, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: xorb %cl, %al ; SKX-NEXT: # kill: def $al killed $al killed $eax ; SKX-NEXT: retq %cmp = icmp slt <4 x i32> %x, %y %e1 = extractelement <4 x i1> %cmp, i32 2 %e2 = extractelement <4 x i1> %cmp, i32 3 %u1 = xor i1 %e1, %e2 ret i1 %u1 } define i1 @movmsk_and_v2i64(<2 x i64> %x, <2 x i64> %y) { ; SSE2-LABEL: movmsk_and_v2i64: ; SSE2: # %bb.0: ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: movmskpd %xmm1, %eax ; SSE2-NEXT: xorl $3, %eax ; SSE2-NEXT: cmpb $3, %al ; SSE2-NEXT: sete %al ; SSE2-NEXT: retq ; ; SSE41-LABEL: movmsk_and_v2i64: ; SSE41: # %bb.0: ; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 ; SSE41-NEXT: movmskpd %xmm0, %eax ; SSE41-NEXT: xorl $3, %eax ; SSE41-NEXT: cmpb $3, %al ; SSE41-NEXT: sete %al ; SSE41-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_and_v2i64: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: xorl $3, %eax ; AVX1OR2-NEXT: cmpb $3, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_and_v2i64: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_and_v2i64: ; SKX: # %bb.0: ; SKX-NEXT: vpcmpneqq %xmm1, %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $3, %al ; SKX-NEXT: sete %al ; SKX-NEXT: retq %cmp = icmp ne <2 x i64> %x, %y %e1 = extractelement <2 x i1> %cmp, i32 0 %e2 = extractelement <2 x i1> %cmp, i32 1 %u1 = and i1 %e1, %e2 ret i1 %u1 } define i1 @movmsk_or_v2i64(<2 x i64> %x, <2 x i64> %y) { ; SSE2-LABEL: movmsk_or_v2i64: ; SSE2: # %bb.0: ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: movmskpd %xmm1, %eax ; SSE2-NEXT: cmpl $3, %eax ; SSE2-NEXT: setne %al ; SSE2-NEXT: retq ; ; SSE41-LABEL: movmsk_or_v2i64: ; SSE41: # %bb.0: ; SSE41-NEXT: psubq %xmm1, %xmm0 ; SSE41-NEXT: ptest %xmm0, %xmm0 ; SSE41-NEXT: setne %al ; SSE41-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_or_v2i64: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vptest %xmm0, %xmm0 ; AVX1OR2-NEXT: setne %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_or_v2i64: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpneqq %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: setne %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_or_v2i64: ; SKX: # %bb.0: ; SKX-NEXT: vpcmpneqq %xmm1, %xmm0, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setne %al ; SKX-NEXT: retq %cmp = icmp ne <2 x i64> %x, %y %e1 = extractelement <2 x i1> %cmp, i32 0 %e2 = extractelement <2 x i1> %cmp, i32 1 %u1 = or i1 %e1, %e2 ret i1 %u1 } define i1 @movmsk_v4f32(<4 x float> %x, <4 x float> %y) { ; SSE-LABEL: movmsk_v4f32: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: cmpeqps %xmm1, %xmm2 ; SSE-NEXT: cmpunordps %xmm1, %xmm0 ; SSE-NEXT: orps %xmm2, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: testb $14, %al ; SSE-NEXT: setne %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v4f32: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmpeq_uqps %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax ; AVX1OR2-NEXT: testb $14, %al ; AVX1OR2-NEXT: setne %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v4f32: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vcmpeq_uqps %zmm1, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $14, %al ; KNL-NEXT: setne %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v4f32: ; SKX: # %bb.0: ; SKX-NEXT: vcmpeq_uqps %xmm1, %xmm0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: testb $14, %al ; SKX-NEXT: setne %al ; SKX-NEXT: retq %cmp = fcmp ueq <4 x float> %x, %y %e1 = extractelement <4 x i1> %cmp, i32 1 %e2 = extractelement <4 x i1> %cmp, i32 2 %e3 = extractelement <4 x i1> %cmp, i32 3 %u1 = or i1 %e1, %e2 %u2 = or i1 %u1, %e3 ret i1 %u2 } define i1 @movmsk_and_v2f64(<2 x double> %x, <2 x double> %y) { ; SSE-LABEL: movmsk_and_v2f64: ; SSE: # %bb.0: ; SSE-NEXT: cmplepd %xmm0, %xmm1 ; SSE-NEXT: movmskpd %xmm1, %eax ; SSE-NEXT: cmpb $3, %al ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_and_v2f64: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmplepd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: cmpb $3, %al ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_and_v2f64: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vcmplepd %zmm0, %zmm1, %k0 ; KNL-NEXT: knotw %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: sete %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_and_v2f64: ; SKX: # %bb.0: ; SKX-NEXT: vcmplepd %xmm0, %xmm1, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $3, %al ; SKX-NEXT: sete %al ; SKX-NEXT: retq %cmp = fcmp oge <2 x double> %x, %y %e1 = extractelement <2 x i1> %cmp, i32 0 %e2 = extractelement <2 x i1> %cmp, i32 1 %u1 = and i1 %e1, %e2 ret i1 %u1 } define i1 @movmsk_or_v2f64(<2 x double> %x, <2 x double> %y) { ; SSE-LABEL: movmsk_or_v2f64: ; SSE: # %bb.0: ; SSE-NEXT: cmplepd %xmm0, %xmm1 ; SSE-NEXT: movmskpd %xmm1, %eax ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: setne %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_or_v2f64: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmplepd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: testl %eax, %eax ; AVX1OR2-NEXT: setne %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_or_v2f64: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vcmplepd %zmm0, %zmm1, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: setne %al ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_or_v2f64: ; SKX: # %bb.0: ; SKX-NEXT: vcmplepd %xmm0, %xmm1, %k0 ; SKX-NEXT: kortestb %k0, %k0 ; SKX-NEXT: setne %al ; SKX-NEXT: retq %cmp = fcmp oge <2 x double> %x, %y %e1 = extractelement <2 x i1> %cmp, i32 0 %e2 = extractelement <2 x i1> %cmp, i32 1 %u1 = or i1 %e1, %e2 ret i1 %u1 } ; Extract elements from a non-constant index. define i1 @movmsk_v16i8_var(<16 x i8> %x, <16 x i8> %y, i32 %z) { ; SSE-LABEL: movmsk_v16i8_var: ; SSE: # %bb.0: ; SSE-NEXT: pcmpeqb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: btl %edi, %eax ; SSE-NEXT: setb %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v16i8_var: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: btl %edi, %eax ; AVX1OR2-NEXT: setb %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v16i8_var: ; KNL: # %bb.0: ; KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpmovmskb %xmm0, %eax ; KNL-NEXT: btl %edi, %eax ; KNL-NEXT: setb %al ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v16i8_var: ; SKX: # %bb.0: ; SKX-NEXT: # kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ; SKX-NEXT: vpmovm2b %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; SKX-NEXT: andl $15, %edi ; SKX-NEXT: movzbl -24(%rsp,%rdi), %eax ; SKX-NEXT: retq %cmp = icmp eq <16 x i8> %x, %y %val = extractelement <16 x i1> %cmp, i32 %z ret i1 %val } define i1 @movmsk_v8i16_var(<8 x i16> %x, <8 x i16> %y, i32 %z) { ; SSE-LABEL: movmsk_v8i16_var: ; SSE: # %bb.0: ; SSE-NEXT: pcmpgtw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm0, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax ; SSE-NEXT: btl %edi, %eax ; SSE-NEXT: setb %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v8i16_var: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax ; AVX1OR2-NEXT: btl %edi, %eax ; AVX1OR2-NEXT: setb %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v8i16_var: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $edi killed $edi def $rdi ; KNL-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpmovdw %zmm0, %ymm0 ; KNL-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; KNL-NEXT: andl $7, %edi ; KNL-NEXT: movzbl -24(%rsp,%rdi,2), %eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v8i16_var: ; SKX: # %bb.0: ; SKX-NEXT: # kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ; SKX-NEXT: vpmovm2w %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; SKX-NEXT: andl $7, %edi ; SKX-NEXT: movzbl -24(%rsp,%rdi,2), %eax ; SKX-NEXT: retq %cmp = icmp sgt <8 x i16> %x, %y %val = extractelement <8 x i1> %cmp, i32 %z ret i1 %val } define i1 @movmsk_v4i32_var(<4 x i32> %x, <4 x i32> %y, i32 %z) { ; SSE-LABEL: movmsk_v4i32_var: ; SSE: # %bb.0: ; SSE-NEXT: pcmpgtd %xmm0, %xmm1 ; SSE-NEXT: movmskps %xmm1, %eax ; SSE-NEXT: btl %edi, %eax ; SSE-NEXT: setb %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v4i32_var: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax ; AVX1OR2-NEXT: btl %edi, %eax ; AVX1OR2-NEXT: setb %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v4i32_var: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $edi killed $edi def $rdi ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; KNL-NEXT: andl $3, %edi ; KNL-NEXT: movzbl -24(%rsp,%rdi,4), %eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v4i32_var: ; SKX: # %bb.0: ; SKX-NEXT: # kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpgtd %xmm0, %xmm1, %k0 ; SKX-NEXT: vpmovm2d %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; SKX-NEXT: andl $3, %edi ; SKX-NEXT: movzbl -24(%rsp,%rdi,4), %eax ; SKX-NEXT: retq %cmp = icmp slt <4 x i32> %x, %y %val = extractelement <4 x i1> %cmp, i32 %z ret i1 %val } define i1 @movmsk_v2i64_var(<2 x i64> %x, <2 x i64> %y, i32 %z) { ; SSE2-LABEL: movmsk_v2i64_var: ; SSE2: # %bb.0: ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: movmskpd %xmm1, %eax ; SSE2-NEXT: xorl $3, %eax ; SSE2-NEXT: btl %edi, %eax ; SSE2-NEXT: setb %al ; SSE2-NEXT: retq ; ; SSE41-LABEL: movmsk_v2i64_var: ; SSE41: # %bb.0: ; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 ; SSE41-NEXT: movmskpd %xmm0, %eax ; SSE41-NEXT: xorl $3, %eax ; SSE41-NEXT: btl %edi, %eax ; SSE41-NEXT: setb %al ; SSE41-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v2i64_var: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: xorl $3, %eax ; AVX1OR2-NEXT: btl %edi, %eax ; AVX1OR2-NEXT: setb %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v2i64_var: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $edi killed $edi def $rdi ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vpcmpneqq %zmm1, %zmm0, %k1 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; KNL-NEXT: andl $1, %edi ; KNL-NEXT: movzbl -24(%rsp,%rdi,8), %eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v2i64_var: ; SKX: # %bb.0: ; SKX-NEXT: # kill: def $edi killed $edi def $rdi ; SKX-NEXT: vpcmpneqq %xmm1, %xmm0, %k0 ; SKX-NEXT: vpmovm2q %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; SKX-NEXT: andl $1, %edi ; SKX-NEXT: movzbl -24(%rsp,%rdi,8), %eax ; SKX-NEXT: retq %cmp = icmp ne <2 x i64> %x, %y %val = extractelement <2 x i1> %cmp, i32 %z ret i1 %val } define i1 @movmsk_v4f32_var(<4 x float> %x, <4 x float> %y, i32 %z) { ; SSE-LABEL: movmsk_v4f32_var: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: cmpeqps %xmm1, %xmm2 ; SSE-NEXT: cmpunordps %xmm1, %xmm0 ; SSE-NEXT: orps %xmm2, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax ; SSE-NEXT: btl %edi, %eax ; SSE-NEXT: setb %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v4f32_var: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmpeq_uqps %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax ; AVX1OR2-NEXT: btl %edi, %eax ; AVX1OR2-NEXT: setb %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v4f32_var: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $edi killed $edi def $rdi ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vcmpeq_uqps %zmm1, %zmm0, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; KNL-NEXT: andl $3, %edi ; KNL-NEXT: movzbl -24(%rsp,%rdi,4), %eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v4f32_var: ; SKX: # %bb.0: ; SKX-NEXT: # kill: def $edi killed $edi def $rdi ; SKX-NEXT: vcmpeq_uqps %xmm1, %xmm0, %k0 ; SKX-NEXT: vpmovm2d %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; SKX-NEXT: andl $3, %edi ; SKX-NEXT: movzbl -24(%rsp,%rdi,4), %eax ; SKX-NEXT: retq %cmp = fcmp ueq <4 x float> %x, %y %val = extractelement <4 x i1> %cmp, i32 %z ret i1 %val } define i1 @movmsk_v2f64_var(<2 x double> %x, <2 x double> %y, i32 %z) { ; SSE-LABEL: movmsk_v2f64_var: ; SSE: # %bb.0: ; SSE-NEXT: cmplepd %xmm0, %xmm1 ; SSE-NEXT: movmskpd %xmm1, %eax ; SSE-NEXT: btl %edi, %eax ; SSE-NEXT: setb %al ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_v2f64_var: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmplepd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: btl %edi, %eax ; AVX1OR2-NEXT: setb %al ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: movmsk_v2f64_var: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $edi killed $edi def $rdi ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vcmplepd %zmm0, %zmm1, %k1 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; KNL-NEXT: andl $1, %edi ; KNL-NEXT: movzbl -24(%rsp,%rdi,8), %eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: movmsk_v2f64_var: ; SKX: # %bb.0: ; SKX-NEXT: # kill: def $edi killed $edi def $rdi ; SKX-NEXT: vcmplepd %xmm0, %xmm1, %k0 ; SKX-NEXT: vpmovm2q %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; SKX-NEXT: andl $1, %edi ; SKX-NEXT: movzbl -24(%rsp,%rdi,8), %eax ; SKX-NEXT: retq %cmp = fcmp oge <2 x double> %x, %y %val = extractelement <2 x i1> %cmp, i32 %z ret i1 %val } ; TODO: We expect similar result as for PR39665_c_ray_opt, ; but this is not the case in practice. define i32 @PR39665_c_ray(<2 x double> %x, <2 x double> %y) { ; SSE-LABEL: PR39665_c_ray: ; SSE: # %bb.0: ; SSE-NEXT: cmpltpd %xmm0, %xmm1 ; SSE-NEXT: movmskpd %xmm1, %ecx ; SSE-NEXT: testb $2, %cl ; SSE-NEXT: movl $42, %eax ; SSE-NEXT: movl $99, %edx ; SSE-NEXT: cmovel %edx, %eax ; SSE-NEXT: testb $1, %cl ; SSE-NEXT: cmovel %edx, %eax ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: PR39665_c_ray: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %ecx ; AVX1OR2-NEXT: testb $2, %cl ; AVX1OR2-NEXT: movl $42, %eax ; AVX1OR2-NEXT: movl $99, %edx ; AVX1OR2-NEXT: cmovel %edx, %eax ; AVX1OR2-NEXT: testb $1, %cl ; AVX1OR2-NEXT: cmovel %edx, %eax ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: PR39665_c_ray: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vcmpltpd %zmm0, %zmm1, %k0 ; KNL-NEXT: kshiftrw $1, %k0, %k1 ; KNL-NEXT: kmovw %k1, %eax ; KNL-NEXT: kmovw %k0, %ecx ; KNL-NEXT: testb $1, %al ; KNL-NEXT: movl $42, %eax ; KNL-NEXT: movl $99, %edx ; KNL-NEXT: cmovel %edx, %eax ; KNL-NEXT: testb $1, %cl ; KNL-NEXT: cmovel %edx, %eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: PR39665_c_ray: ; SKX: # %bb.0: ; SKX-NEXT: vcmpltpd %xmm0, %xmm1, %k0 ; SKX-NEXT: kshiftrb $1, %k0, %k1 ; SKX-NEXT: kmovd %k1, %eax ; SKX-NEXT: kmovd %k0, %ecx ; SKX-NEXT: testb $1, %al ; SKX-NEXT: movl $42, %eax ; SKX-NEXT: movl $99, %edx ; SKX-NEXT: cmovel %edx, %eax ; SKX-NEXT: testb $1, %cl ; SKX-NEXT: cmovel %edx, %eax ; SKX-NEXT: retq %cmp = fcmp ogt <2 x double> %x, %y %e1 = extractelement <2 x i1> %cmp, i32 0 %e2 = extractelement <2 x i1> %cmp, i32 1 %u = and i1 %e1, %e2 %r = select i1 %u, i32 42, i32 99 ret i32 %r } define i32 @PR39665_c_ray_opt(<2 x double> %x, <2 x double> %y) { ; SSE-LABEL: PR39665_c_ray_opt: ; SSE: # %bb.0: ; SSE-NEXT: cmpltpd %xmm0, %xmm1 ; SSE-NEXT: movmskpd %xmm1, %eax ; SSE-NEXT: cmpb $3, %al ; SSE-NEXT: movl $42, %ecx ; SSE-NEXT: movl $99, %eax ; SSE-NEXT: cmovel %ecx, %eax ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: PR39665_c_ray_opt: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax ; AVX1OR2-NEXT: cmpb $3, %al ; AVX1OR2-NEXT: movl $42, %ecx ; AVX1OR2-NEXT: movl $99, %eax ; AVX1OR2-NEXT: cmovel %ecx, %eax ; AVX1OR2-NEXT: retq ; ; KNL-LABEL: PR39665_c_ray_opt: ; KNL: # %bb.0: ; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; KNL-NEXT: vcmpltpd %zmm0, %zmm1, %k0 ; KNL-NEXT: knotw %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb $3, %al ; KNL-NEXT: movl $42, %ecx ; KNL-NEXT: movl $99, %eax ; KNL-NEXT: cmovel %ecx, %eax ; KNL-NEXT: vzeroupper ; KNL-NEXT: retq ; ; SKX-LABEL: PR39665_c_ray_opt: ; SKX: # %bb.0: ; SKX-NEXT: vcmpltpd %xmm0, %xmm1, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: cmpb $3, %al ; SKX-NEXT: movl $42, %ecx ; SKX-NEXT: movl $99, %eax ; SKX-NEXT: cmovel %ecx, %eax ; SKX-NEXT: retq %cmp = fcmp ogt <2 x double> %x, %y %shift = shufflevector <2 x i1> %cmp, <2 x i1> poison, <2 x i32> <i32 1, i32 undef> %1 = and <2 x i1> %cmp, %shift %u = extractelement <2 x i1> %1, i64 0 %r = select i1 %u, i32 42, i32 99 ret i32 %r }