; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-MVE ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-MVEFP ; ; Float to signed 32-bit -- Vector size variation ; declare <1 x i32> @llvm.fptoui.sat.v1f32.v1i32 (<1 x float>) declare <2 x i32> @llvm.fptoui.sat.v2f32.v2i32 (<2 x float>) declare <3 x i32> @llvm.fptoui.sat.v3f32.v3i32 (<3 x float>) declare <4 x i32> @llvm.fptoui.sat.v4f32.v4i32 (<4 x float>) declare <5 x i32> @llvm.fptoui.sat.v5f32.v5i32 (<5 x float>) declare <6 x i32> @llvm.fptoui.sat.v6f32.v6i32 (<6 x float>) declare <7 x i32> @llvm.fptoui.sat.v7f32.v7i32 (<7 x float>) declare <8 x i32> @llvm.fptoui.sat.v8f32.v8i32 (<8 x float>) define arm_aapcs_vfpcc <1 x i32> @test_unsigned_v1f32_v1i32(<1 x float> %f) { ; CHECK-LABEL: test_unsigned_v1f32_v1i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-NEXT: vmov r0, s0 ; CHECK-NEXT: bx lr %x = call <1 x i32> @llvm.fptoui.sat.v1f32.v1i32(<1 x float> %f) ret <1 x i32> %x } define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f32_v2i32(<2 x float> %f) { ; CHECK-LABEL: test_unsigned_v2f32_v2i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r7, lr} ; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vmov r0, s17 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: vldr s18, .LCPI1_0 ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vcmp.f32 s17, s18 ; CHECK-NEXT: mov r4, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r5, #-1 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s18 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s18 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r4, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r4, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vcmp.f32 s16, s18 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r1, #0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r4 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: pop {r4, r5, r7, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI1_0: ; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9 %x = call <2 x i32> @llvm.fptoui.sat.v2f32.v2i32(<2 x float> %f) ret <2 x i32> %x } define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f32_v3i32(<3 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v3f32_v3i32: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s1 ; CHECK-MVE-NEXT: vmov r0, s2 ; CHECK-MVE-NEXT: vmov r1, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s4 ; CHECK-MVE-NEXT: vmov r1, s6 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-MVE-NEXT: bx lr ; ; CHECK-MVEFP-LABEL: test_unsigned_v3f32_v3i32: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: bx lr %x = call <3 x i32> @llvm.fptoui.sat.v3f32.v3i32(<3 x float> %f) ret <3 x i32> %x } define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f32_v4i32(<4 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v4f32_v4i32: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s1 ; CHECK-MVE-NEXT: vmov r0, s2 ; CHECK-MVE-NEXT: vmov r1, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s4 ; CHECK-MVE-NEXT: vmov r1, s6 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-MVE-NEXT: bx lr ; ; CHECK-MVEFP-LABEL: test_unsigned_v4f32_v4i32: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: bx lr %x = call <4 x i32> @llvm.fptoui.sat.v4f32.v4i32(<4 x float> %f) ret <4 x i32> %x } define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f32_v5i32(<5 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v5f32_v5i32: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s1 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-MVE-NEXT: vmov r1, s2 ; CHECK-MVE-NEXT: vmov r2, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r2, r1 ; CHECK-MVE-NEXT: vmov r1, s6 ; CHECK-MVE-NEXT: vmov r2, s8 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r1 ; CHECK-MVE-NEXT: vstrw.32 q0, [r0] ; CHECK-MVE-NEXT: vstr s4, [r0, #16] ; CHECK-MVE-NEXT: bx lr ; ; CHECK-MVEFP-LABEL: test_unsigned_v5f32_v5i32: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q1, q1 ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: vmov r1, s4 ; CHECK-MVEFP-NEXT: str r1, [r0, #16] ; CHECK-MVEFP-NEXT: vstrw.32 q0, [r0] ; CHECK-MVEFP-NEXT: bx lr %x = call <5 x i32> @llvm.fptoui.sat.v5f32.v5i32(<5 x float> %f) ret <5 x i32> %x } define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f32_v6i32(<6 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v6f32_v6i32: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s1 ; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s5 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-MVE-NEXT: vmov r1, s2 ; CHECK-MVE-NEXT: vmov r2, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r2, r1 ; CHECK-MVE-NEXT: vmov r1, s8 ; CHECK-MVE-NEXT: vmov r2, s10 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r1 ; CHECK-MVE-NEXT: vstr s6, [r0, #20] ; CHECK-MVE-NEXT: vstrw.32 q0, [r0] ; CHECK-MVE-NEXT: vstr s4, [r0, #16] ; CHECK-MVE-NEXT: bx lr ; ; CHECK-MVEFP-LABEL: test_unsigned_v6f32_v6i32: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q1, q1 ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: vmov.f32 s6, s5 ; CHECK-MVEFP-NEXT: vmov r2, s4 ; CHECK-MVEFP-NEXT: vmov r1, s6 ; CHECK-MVEFP-NEXT: strd r2, r1, [r0, #16] ; CHECK-MVEFP-NEXT: vstrw.32 q0, [r0] ; CHECK-MVEFP-NEXT: bx lr %x = call <6 x i32> @llvm.fptoui.sat.v6f32.v6i32(<6 x float> %f) ret <6 x i32> %x } define arm_aapcs_vfpcc <7 x i32> @test_unsigned_v7f32_v7i32(<7 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v7f32_v7i32: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s1 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s5 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s6 ; CHECK-MVE-NEXT: vmov r1, s2 ; CHECK-MVE-NEXT: vmov r2, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r2, r1 ; CHECK-MVE-NEXT: vmov r1, s10 ; CHECK-MVE-NEXT: vmov r2, s12 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r1 ; CHECK-MVE-NEXT: vstr s8, [r0, #20] ; CHECK-MVE-NEXT: vstr s4, [r0, #16] ; CHECK-MVE-NEXT: vstrw.32 q0, [r0] ; CHECK-MVE-NEXT: vstr s6, [r0, #24] ; CHECK-MVE-NEXT: bx lr ; ; CHECK-MVEFP-LABEL: test_unsigned_v7f32_v7i32: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q1, q1 ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: vmov.f32 s10, s5 ; CHECK-MVEFP-NEXT: vmov r2, s4 ; CHECK-MVEFP-NEXT: vmov r3, s6 ; CHECK-MVEFP-NEXT: vmov r1, s10 ; CHECK-MVEFP-NEXT: strd r2, r1, [r0, #16] ; CHECK-MVEFP-NEXT: str r3, [r0, #24] ; CHECK-MVEFP-NEXT: vstrw.32 q0, [r0] ; CHECK-MVEFP-NEXT: bx lr %x = call <7 x i32> @llvm.fptoui.sat.v7f32.v7i32(<7 x float> %f) ret <7 x i32> %x } define arm_aapcs_vfpcc <8 x i32> @test_unsigned_v8f32_v8i32(<8 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v8f32_v8i32: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s1 ; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s6 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s7 ; CHECK-MVE-NEXT: vcvt.u32.f32 s14, s5 ; CHECK-MVE-NEXT: vmov r0, s2 ; CHECK-MVE-NEXT: vmov r1, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s8 ; CHECK-MVE-NEXT: vmov r1, s10 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s6 ; CHECK-MVE-NEXT: vmov r1, s4 ; CHECK-MVE-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: vmov r1, s14 ; CHECK-MVE-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-MVE-NEXT: bx lr ; ; CHECK-MVEFP-LABEL: test_unsigned_v8f32_v8i32: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q1, q1 ; CHECK-MVEFP-NEXT: bx lr %x = call <8 x i32> @llvm.fptoui.sat.v8f32.v8i32(<8 x float> %f) ret <8 x i32> %x } ; ; Double to signed 32-bit -- Vector size variation ; declare <1 x i32> @llvm.fptoui.sat.v1f64.v1i32 (<1 x double>) declare <2 x i32> @llvm.fptoui.sat.v2f64.v2i32 (<2 x double>) declare <3 x i32> @llvm.fptoui.sat.v3f64.v3i32 (<3 x double>) declare <4 x i32> @llvm.fptoui.sat.v4f64.v4i32 (<4 x double>) declare <5 x i32> @llvm.fptoui.sat.v5f64.v5i32 (<5 x double>) declare <6 x i32> @llvm.fptoui.sat.v6f64.v6i32 (<6 x double>) define arm_aapcs_vfpcc <1 x i32> @test_unsigned_v1f64_v1i32(<1 x double> %f) { ; CHECK-LABEL: test_unsigned_v1f64_v1i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, lr} ; CHECK-NEXT: push {r4, r5, r6, r7, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: vldr d1, .LCPI8_0 ; CHECK-NEXT: vmov r4, r5, d0 ; CHECK-NEXT: vmov r2, r3, d1 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI8_1 ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_d2uiz ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop {r4, r5, r6, r7, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI8_0: ; CHECK-NEXT: .long 4292870144 @ double 4294967295 ; CHECK-NEXT: .long 1106247679 ; CHECK-NEXT: .LCPI8_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <1 x i32> @llvm.fptoui.sat.v1f64.v1i32(<1 x double> %f) ret <1 x i32> %x } define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f64_v2i32(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #32 ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI9_0 ; CHECK-NEXT: vmov r5, r4, d9 ; CHECK-NEXT: vmov r10, r9, d0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI9_1 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vmov r2, r11, d0 ; CHECK-NEXT: str r2, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: str.w r11, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r7, r6, d8 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: csel r0, r0, r8, ne ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: str.w r10, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r8, r9 ; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: csel r9, r0, r9, ne ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r9, #-1 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r11, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: ldr.w r10, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r5, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r5, #0 ; CHECK-NEXT: ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: vmov q0[2], q0[0], r9, r1 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r5 ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI9_0: ; CHECK-NEXT: .long 4292870144 @ double 4294967295 ; CHECK-NEXT: .long 1106247679 ; CHECK-NEXT: .LCPI9_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i32> @llvm.fptoui.sat.v2f64.v2i32(<2 x double> %f) ret <2 x i32> %x } define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f64_v3i32(<3 x double> %f) { ; CHECK-LABEL: test_unsigned_v3f64_v3i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #24 ; CHECK-NEXT: sub sp, #24 ; CHECK-NEXT: vmov.f32 s18, s0 ; CHECK-NEXT: vmov.f32 s19, s1 ; CHECK-NEXT: vldr d0, .LCPI10_0 ; CHECK-NEXT: vmov r4, r5, d1 ; CHECK-NEXT: vmov r9, r7, d0 ; CHECK-NEXT: vmov.f32 s16, s4 ; CHECK-NEXT: vmov.f32 s17, s5 ; CHECK-NEXT: str.w r9, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r9 ; CHECK-NEXT: mov r3, r7 ; CHECK-NEXT: str r7, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI10_1 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: vmov r11, r3, d0 ; CHECK-NEXT: str r3, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r10, r8, d8 ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: csel r0, r0, r6, ne ; CHECK-NEXT: mov r2, r9 ; CHECK-NEXT: mov r3, r7 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: vmov r5, r4, d9 ; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: mov r1, r8 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r7, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: mov r1, r8 ; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r7 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: mov r1, r8 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: csel r6, r0, r9, ne ; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r6, #-1 ; CHECK-NEXT: ldrd r2, r3, [sp, #8] @ 8-byte Folded Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r7 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: vmov.32 q0[1], r1 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r6 ; CHECK-NEXT: add sp, #24 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI10_0: ; CHECK-NEXT: .long 4292870144 @ double 4294967295 ; CHECK-NEXT: .long 1106247679 ; CHECK-NEXT: .LCPI10_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <3 x i32> @llvm.fptoui.sat.v3f64.v3i32(<3 x double> %f) ret <3 x i32> %x } define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f64_v4i32(<4 x double> %f) { ; CHECK-LABEL: test_unsigned_v4f64_v4i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11} ; CHECK-NEXT: vpush {d8, d9, d10, d11} ; CHECK-NEXT: .pad #24 ; CHECK-NEXT: sub sp, #24 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI11_0 ; CHECK-NEXT: vmov q5, q1 ; CHECK-NEXT: vmov r7, r9, d0 ; CHECK-NEXT: vmov r4, r5, d10 ; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r2, r7 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI11_1 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: str r0, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: strd r2, r3, [sp, #16] @ 8-byte Folded Spill ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r10, r8, d8 ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: csel r0, r0, r6, ne ; CHECK-NEXT: mov r2, r7 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: vmov r11, r5, d11 ; CHECK-NEXT: mov r4, r7 ; CHECK-NEXT: str r7, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: mov r1, r8 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r6, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: mov r1, r8 ; CHECK-NEXT: ldr r7, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: mov r2, r6 ; CHECK-NEXT: mov r3, r7 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: mov r1, r8 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: csel r8, r0, r9, ne ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r2, r4 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r8, #-1 ; CHECK-NEXT: ldr.w r10, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: mov r0, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r11 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r6 ; CHECK-NEXT: mov r3, r7 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r11 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r4, r5, d9 ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r6, r0, r7, ne ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r6, #-1 ; CHECK-NEXT: ldr r2, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldrd r2, r3, [sp, #16] @ 8-byte Folded Reload ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: vmov q0[2], q0[0], r8, r1 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r6 ; CHECK-NEXT: add sp, #24 ; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI11_0: ; CHECK-NEXT: .long 4292870144 @ double 4294967295 ; CHECK-NEXT: .long 1106247679 ; CHECK-NEXT: .LCPI11_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <4 x i32> @llvm.fptoui.sat.v4f64.v4i32(<4 x double> %f) ret <4 x i32> %x } define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f64_v5i32(<5 x double> %f) { ; CHECK-LABEL: test_unsigned_v5f64_v5i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11} ; CHECK-NEXT: vpush {d8, d9, d10, d11} ; CHECK-NEXT: .pad #40 ; CHECK-NEXT: sub sp, #40 ; CHECK-NEXT: vmov.f32 s16, s0 ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmov.f32 s17, s1 ; CHECK-NEXT: vldr d0, .LCPI12_0 ; CHECK-NEXT: vmov r5, r6, d4 ; CHECK-NEXT: str r0, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: vmov.f32 s20, s6 ; CHECK-NEXT: vmov.f32 s18, s4 ; CHECK-NEXT: vmov.f32 s22, s2 ; CHECK-NEXT: vmov.f32 s21, s7 ; CHECK-NEXT: vmov.f32 s19, s5 ; CHECK-NEXT: vmov.f32 s23, s3 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: strd r2, r3, [sp, #32] @ 8-byte Folded Spill ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI12_1 ; CHECK-NEXT: mov r10, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: vmov r7, r3, d0 ; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: str r7, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r2, r7 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r11, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r8, r1, d11 ; CHECK-NEXT: cmp.w r11, #0 ; CHECK-NEXT: vmov r6, r9, d10 ; CHECK-NEXT: csel r0, r0, r11, ne ; CHECK-NEXT: cmp.w r10, #0 ; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: vmov r2, r1, d9 ; CHECK-NEXT: strd r2, r1, [sp, #16] @ 8-byte Folded Spill ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #16] ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: ldr r5, [sp, #32] @ 4-byte Reload ; CHECK-NEXT: mov r1, r9 ; CHECK-NEXT: ldr.w r10, [sp, #36] @ 4-byte Reload ; CHECK-NEXT: mov r2, r5 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r2, r7 ; CHECK-NEXT: ldr r7, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: mov r11, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r9 ; CHECK-NEXT: mov r3, r7 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r9 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: mov r2, r5 ; CHECK-NEXT: csel r0, r0, r4, ne ; CHECK-NEXT: cmp.w r11, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr r6, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: mov r11, r10 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r10, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r3, r7 ; CHECK-NEXT: mov r5, r6 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r9, r7 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: csel r0, r0, r6, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr r4, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: ldr.w r8, [sp, #32] @ 4-byte Reload ; CHECK-NEXT: ldr r6, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: str r0, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r8 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: mov r11, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: mov r2, r8 ; CHECK-NEXT: csel r4, r0, r5, ne ; CHECK-NEXT: vmov r5, r6, d8 ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r4, #-1 ; CHECK-NEXT: ldr r3, [sp, #36] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r10, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp.w r10, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 ; CHECK-NEXT: ldr r0, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: ldr r0, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: vstrw.32 q0, [r0] ; CHECK-NEXT: add sp, #40 ; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI12_0: ; CHECK-NEXT: .long 4292870144 @ double 4294967295 ; CHECK-NEXT: .long 1106247679 ; CHECK-NEXT: .LCPI12_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <5 x i32> @llvm.fptoui.sat.v5f64.v5i32(<5 x double> %f) ret <5 x i32> %x } define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f64_v6i32(<6 x double> %f) { ; CHECK-LABEL: test_unsigned_v6f64_v6i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12} ; CHECK-NEXT: .pad #40 ; CHECK-NEXT: sub sp, #40 ; CHECK-NEXT: vmov.f32 s16, s0 ; CHECK-NEXT: str r0, [sp, #32] @ 4-byte Spill ; CHECK-NEXT: vmov.f32 s17, s1 ; CHECK-NEXT: vldr d0, .LCPI13_0 ; CHECK-NEXT: vmov r5, r6, d5 ; CHECK-NEXT: vmov r11, r3, d0 ; CHECK-NEXT: vmov.f32 s22, s8 ; CHECK-NEXT: vmov.f32 s20, s6 ; CHECK-NEXT: vmov.f32 s18, s4 ; CHECK-NEXT: vmov.f32 s24, s2 ; CHECK-NEXT: vmov.f32 s23, s9 ; CHECK-NEXT: vmov.f32 s21, s7 ; CHECK-NEXT: vmov.f32 s19, s5 ; CHECK-NEXT: vmov.f32 s25, s3 ; CHECK-NEXT: str r3, [sp, #36] @ 4-byte Spill ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: str.w r11, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI13_1 ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: vmov r4, r9, d0 ; CHECK-NEXT: str r4, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r2, r4 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r10, r1, d10 ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: vmov r5, r6, d11 ; CHECK-NEXT: csel r0, r0, r8, ne ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: vmov r2, r1, d12 ; CHECK-NEXT: strd r2, r1, [sp, #12] @ 8-byte Folded Spill ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr r7, [sp, #32] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: str r0, [r7, #20] ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: ldr.w r8, [sp, #36] @ 4-byte Reload ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r11, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r4 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r2, r1, d9 ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csel r0, r0, r4, ne ; CHECK-NEXT: cmp.w r11, #0 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: strd r2, r1, [sp, #4] @ 8-byte Folded Spill ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [r7, #16] ; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: ldr r6, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: ldr.w r11, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r5, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: mov r8, r9 ; CHECK-NEXT: mov r2, r5 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr r7, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: ldr r4, [sp, #36] @ 4-byte Reload ; CHECK-NEXT: ldr.w r9, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: mov r2, r5 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: mov r6, r7 ; CHECK-NEXT: mov r10, r5 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: csel r9, r0, r7, ne ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r9, #-1 ; CHECK-NEXT: ldr r6, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: ldr r5, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r11, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r5, r6, d8 ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r4, r0, r7, ne ; CHECK-NEXT: cmp.w r11, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r4, #-1 ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: ldr r3, [sp, #36] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: mov r10, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp.w r10, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 ; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: vmov q0[3], q0[1], r9, r0 ; CHECK-NEXT: ldr r0, [sp, #32] @ 4-byte Reload ; CHECK-NEXT: vstrw.32 q0, [r0] ; CHECK-NEXT: add sp, #40 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI13_0: ; CHECK-NEXT: .long 4292870144 @ double 4294967295 ; CHECK-NEXT: .long 1106247679 ; CHECK-NEXT: .LCPI13_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <6 x i32> @llvm.fptoui.sat.v6f64.v6i32(<6 x double> %f) ret <6 x i32> %x } ; ; FP16 to signed 32-bit -- Vector size variation ; declare <1 x i32> @llvm.fptoui.sat.v1f16.v1i32 (<1 x half>) declare <2 x i32> @llvm.fptoui.sat.v2f16.v2i32 (<2 x half>) declare <3 x i32> @llvm.fptoui.sat.v3f16.v3i32 (<3 x half>) declare <4 x i32> @llvm.fptoui.sat.v4f16.v4i32 (<4 x half>) declare <5 x i32> @llvm.fptoui.sat.v5f16.v5i32 (<5 x half>) declare <6 x i32> @llvm.fptoui.sat.v6f16.v6i32 (<6 x half>) declare <7 x i32> @llvm.fptoui.sat.v7f16.v7i32 (<7 x half>) declare <8 x i32> @llvm.fptoui.sat.v8f16.v8i32 (<8 x half>) define arm_aapcs_vfpcc <1 x i32> @test_unsigned_v1f16_v1i32(<1 x half> %f) { ; CHECK-LABEL: test_unsigned_v1f16_v1i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vcvt.u32.f16 s0, s0 ; CHECK-NEXT: vmov r0, s0 ; CHECK-NEXT: bx lr %x = call <1 x i32> @llvm.fptoui.sat.v1f16.v1i32(<1 x half> %f) ret <1 x i32> %x } define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f16_v2i32(<2 x half> %f) { ; CHECK-LABEL: test_unsigned_v2f16_v2i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r7, lr} ; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: .vsave {d8, d9, d10} ; CHECK-NEXT: vpush {d8, d9, d10} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vcvtt.f32.f16 s18, s16 ; CHECK-NEXT: vmov r0, s18 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcvtb.f32.f16 s16, s16 ; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: vldr s20, .LCPI15_0 ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: mov r4, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r5, #-1 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r4, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r4, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r1, #0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r4 ; CHECK-NEXT: vpop {d8, d9, d10} ; CHECK-NEXT: pop {r4, r5, r7, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI15_0: ; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9 %x = call <2 x i32> @llvm.fptoui.sat.v2f16.v2i32(<2 x half> %f) ret <2 x i32> %x } define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f16_v3i32(<3 x half> %f) { ; CHECK-LABEL: test_unsigned_v3f16_v3i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vcvt.u32.f16 s6, s0 ; CHECK-NEXT: vcvt.u32.f16 s0, s1 ; CHECK-NEXT: vcvt.u32.f16 s4, s2 ; CHECK-NEXT: vmov r0, s0 ; CHECK-NEXT: vmov.32 q0[1], r0 ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: bx lr %x = call <3 x i32> @llvm.fptoui.sat.v3f16.v3i32(<3 x half> %f) ret <3 x i32> %x } define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f16_v4i32(<4 x half> %f) { ; CHECK-LABEL: test_unsigned_v4f16_v4i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vmovx.f16 s2, s1 ; CHECK-NEXT: vcvt.u32.f16 s4, s2 ; CHECK-NEXT: vmovx.f16 s2, s0 ; CHECK-NEXT: vcvt.u32.f16 s6, s2 ; CHECK-NEXT: vcvt.u32.f16 s2, s1 ; CHECK-NEXT: vcvt.u32.f16 s0, s0 ; CHECK-NEXT: vmov r0, s2 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: bx lr %x = call <4 x i32> @llvm.fptoui.sat.v4f16.v4i32(<4 x half> %f) ret <4 x i32> %x } define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f16_v5i32(<5 x half> %f) { ; CHECK-LABEL: test_unsigned_v5f16_v5i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vmovx.f16 s6, s0 ; CHECK-NEXT: vmovx.f16 s4, s1 ; CHECK-NEXT: vcvt.u32.f16 s8, s1 ; CHECK-NEXT: vcvt.u32.f16 s0, s0 ; CHECK-NEXT: vcvt.u32.f16 s4, s4 ; CHECK-NEXT: vcvt.u32.f16 s6, s6 ; CHECK-NEXT: vmov r1, s8 ; CHECK-NEXT: vcvt.u32.f16 s2, s2 ; CHECK-NEXT: vmov r2, s0 ; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: str r1, [r0, #16] ; CHECK-NEXT: vstrw.32 q2, [r0] ; CHECK-NEXT: bx lr %x = call <5 x i32> @llvm.fptoui.sat.v5f16.v5i32(<5 x half> %f) ret <5 x i32> %x } define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f16_v6i32(<6 x half> %f) { ; CHECK-LABEL: test_unsigned_v6f16_v6i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vmovx.f16 s8, s0 ; CHECK-NEXT: vmovx.f16 s6, s1 ; CHECK-NEXT: vcvt.u32.f16 s10, s1 ; CHECK-NEXT: vcvt.u32.f16 s0, s0 ; CHECK-NEXT: vcvt.u32.f16 s4, s2 ; CHECK-NEXT: vmovx.f16 s2, s2 ; CHECK-NEXT: vcvt.u32.f16 s6, s6 ; CHECK-NEXT: vcvt.u32.f16 s8, s8 ; CHECK-NEXT: vmov r1, s10 ; CHECK-NEXT: vcvt.u32.f16 s2, s2 ; CHECK-NEXT: vmov r2, s0 ; CHECK-NEXT: vmov q3[2], q3[0], r2, r1 ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: vmov r2, s8 ; CHECK-NEXT: vmov q3[3], q3[1], r2, r1 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: vmov r2, s4 ; CHECK-NEXT: strd r2, r1, [r0, #16] ; CHECK-NEXT: vstrw.32 q3, [r0] ; CHECK-NEXT: bx lr %x = call <6 x i32> @llvm.fptoui.sat.v6f16.v6i32(<6 x half> %f) ret <6 x i32> %x } define arm_aapcs_vfpcc <7 x i32> @test_unsigned_v7f16_v7i32(<7 x half> %f) { ; CHECK-LABEL: test_unsigned_v7f16_v7i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vmovx.f16 s10, s0 ; CHECK-NEXT: vmovx.f16 s8, s1 ; CHECK-NEXT: vcvt.u32.f16 s12, s1 ; CHECK-NEXT: vcvt.u32.f16 s0, s0 ; CHECK-NEXT: vcvt.u32.f16 s4, s2 ; CHECK-NEXT: vmovx.f16 s2, s2 ; CHECK-NEXT: vcvt.u32.f16 s8, s8 ; CHECK-NEXT: vcvt.u32.f16 s10, s10 ; CHECK-NEXT: vmov r1, s12 ; CHECK-NEXT: vcvt.u32.f16 s2, s2 ; CHECK-NEXT: vmov r2, s0 ; CHECK-NEXT: vcvt.u32.f16 s6, s3 ; CHECK-NEXT: vmov q3[2], q3[0], r2, r1 ; CHECK-NEXT: vmov r1, s8 ; CHECK-NEXT: vmov r2, s10 ; CHECK-NEXT: vmov q3[3], q3[1], r2, r1 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: vmov r2, s4 ; CHECK-NEXT: vmov r3, s6 ; CHECK-NEXT: strd r2, r1, [r0, #16] ; CHECK-NEXT: str r3, [r0, #24] ; CHECK-NEXT: vstrw.32 q3, [r0] ; CHECK-NEXT: bx lr %x = call <7 x i32> @llvm.fptoui.sat.v7f16.v7i32(<7 x half> %f) ret <7 x i32> %x } define arm_aapcs_vfpcc <8 x i32> @test_unsigned_v8f16_v8i32(<8 x half> %f) { ; CHECK-LABEL: test_unsigned_v8f16_v8i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vmovx.f16 s4, s3 ; CHECK-NEXT: vmovx.f16 s6, s0 ; CHECK-NEXT: vcvt.u32.f16 s8, s4 ; CHECK-NEXT: vmovx.f16 s4, s2 ; CHECK-NEXT: vcvt.u32.f16 s10, s4 ; CHECK-NEXT: vmovx.f16 s4, s1 ; CHECK-NEXT: vcvt.u32.f16 s14, s2 ; CHECK-NEXT: vcvt.u32.f16 s2, s1 ; CHECK-NEXT: vcvt.u32.f16 s0, s0 ; CHECK-NEXT: vcvt.u32.f16 s4, s4 ; CHECK-NEXT: vcvt.u32.f16 s6, s6 ; CHECK-NEXT: vmov r0, s2 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: vcvt.u32.f16 s12, s3 ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vmov r0, s12 ; CHECK-NEXT: vmov r1, s14 ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov r0, s8 ; CHECK-NEXT: vmov r1, s10 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: bx lr %x = call <8 x i32> @llvm.fptoui.sat.v8f16.v8i32(<8 x half> %f) ret <8 x i32> %x } ; ; 2-Vector float to signed integer -- result size variation ; declare <4 x i1> @llvm.fptoui.sat.v4f32.v4i1 (<4 x float>) declare <4 x i8> @llvm.fptoui.sat.v4f32.v4i8 (<4 x float>) declare <4 x i13> @llvm.fptoui.sat.v4f32.v4i13 (<4 x float>) declare <4 x i16> @llvm.fptoui.sat.v4f32.v4i16 (<4 x float>) declare <4 x i19> @llvm.fptoui.sat.v4f32.v4i19 (<4 x float>) declare <4 x i50> @llvm.fptoui.sat.v4f32.v4i50 (<4 x float>) declare <4 x i64> @llvm.fptoui.sat.v4f32.v4i64 (<4 x float>) declare <4 x i100> @llvm.fptoui.sat.v4f32.v4i100(<4 x float>) declare <4 x i128> @llvm.fptoui.sat.v4f32.v4i128(<4 x float>) define arm_aapcs_vfpcc <4 x i1> @test_unsigned_v4f32_v4i1(<4 x float> %f) { ; CHECK-LABEL: test_unsigned_v4f32_v4i1: ; CHECK: @ %bb.0: ; CHECK-NEXT: vldr s4, .LCPI22_0 ; CHECK-NEXT: vmov.f32 s6, #1.000000e+00 ; CHECK-NEXT: movs r1, #0 ; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 ; CHECK-NEXT: vmaxnm.f32 s8, s3, s4 ; CHECK-NEXT: vminnm.f32 s0, s0, s6 ; CHECK-NEXT: vmaxnm.f32 s2, s2, s4 ; CHECK-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-NEXT: vmaxnm.f32 s4, s1, s4 ; CHECK-NEXT: vminnm.f32 s4, s4, s6 ; CHECK-NEXT: vminnm.f32 s2, s2, s6 ; CHECK-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-NEXT: vminnm.f32 s8, s8, s6 ; CHECK-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-NEXT: vmov r2, s0 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #0, #1 ; CHECK-NEXT: vmov r2, s4 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #1, #1 ; CHECK-NEXT: vmov r2, s2 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #2, #1 ; CHECK-NEXT: vmov r2, s8 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #3, #1 ; CHECK-NEXT: strb r1, [r0] ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI22_0: ; CHECK-NEXT: .long 0x00000000 @ float 0 %x = call <4 x i1> @llvm.fptoui.sat.v4f32.v4i1(<4 x float> %f) ret <4 x i1> %x } define arm_aapcs_vfpcc <4 x i8> @test_unsigned_v4f32_v4i8(<4 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v4f32_v4i8: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vldr s4, .LCPI23_0 ; CHECK-MVE-NEXT: vldr s6, .LCPI23_1 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vmaxnm.f32 s0, s0, s4 ; CHECK-MVE-NEXT: vmaxnm.f32 s8, s3, s4 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vminnm.f32 s0, s0, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s4, s1, s4 ; CHECK-MVE-NEXT: vminnm.f32 s8, s8, s6 ; CHECK-MVE-NEXT: vminnm.f32 s4, s4, s6 ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-MVE-NEXT: vmov r0, s2 ; CHECK-MVE-NEXT: vmov r1, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s8 ; CHECK-MVE-NEXT: vmov r1, s4 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-MVE-NEXT: bx lr ; CHECK-MVE-NEXT: .p2align 2 ; CHECK-MVE-NEXT: @ %bb.1: ; CHECK-MVE-NEXT: .LCPI23_0: ; CHECK-MVE-NEXT: .long 0x00000000 @ float 0 ; CHECK-MVE-NEXT: .LCPI23_1: ; CHECK-MVE-NEXT: .long 0x437f0000 @ float 255 ; ; CHECK-MVEFP-LABEL: test_unsigned_v4f32_v4i8: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vmov.i32 q1, #0xff ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: vmin.u32 q0, q0, q1 ; CHECK-MVEFP-NEXT: bx lr %x = call <4 x i8> @llvm.fptoui.sat.v4f32.v4i8(<4 x float> %f) ret <4 x i8> %x } define arm_aapcs_vfpcc <4 x i13> @test_unsigned_v4f32_v4i13(<4 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v4f32_v4i13: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vldr s4, .LCPI24_0 ; CHECK-MVE-NEXT: vldr s6, .LCPI24_1 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vmaxnm.f32 s0, s0, s4 ; CHECK-MVE-NEXT: vmaxnm.f32 s8, s3, s4 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vminnm.f32 s0, s0, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s4, s1, s4 ; CHECK-MVE-NEXT: vminnm.f32 s8, s8, s6 ; CHECK-MVE-NEXT: vminnm.f32 s4, s4, s6 ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-MVE-NEXT: vmov r0, s2 ; CHECK-MVE-NEXT: vmov r1, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s8 ; CHECK-MVE-NEXT: vmov r1, s4 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-MVE-NEXT: bx lr ; CHECK-MVE-NEXT: .p2align 2 ; CHECK-MVE-NEXT: @ %bb.1: ; CHECK-MVE-NEXT: .LCPI24_0: ; CHECK-MVE-NEXT: .long 0x00000000 @ float 0 ; CHECK-MVE-NEXT: .LCPI24_1: ; CHECK-MVE-NEXT: .long 0x45fff800 @ float 8191 ; ; CHECK-MVEFP-LABEL: test_unsigned_v4f32_v4i13: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vmov.i32 q1, #0x1fff ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: vmin.u32 q0, q0, q1 ; CHECK-MVEFP-NEXT: bx lr %x = call <4 x i13> @llvm.fptoui.sat.v4f32.v4i13(<4 x float> %f) ret <4 x i13> %x } define arm_aapcs_vfpcc <4 x i16> @test_unsigned_v4f32_v4i16(<4 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v4f32_v4i16: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vldr s4, .LCPI25_0 ; CHECK-MVE-NEXT: vldr s6, .LCPI25_1 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vmaxnm.f32 s0, s0, s4 ; CHECK-MVE-NEXT: vmaxnm.f32 s8, s3, s4 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vminnm.f32 s0, s0, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s4, s1, s4 ; CHECK-MVE-NEXT: vminnm.f32 s8, s8, s6 ; CHECK-MVE-NEXT: vminnm.f32 s4, s4, s6 ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-MVE-NEXT: vmov r0, s2 ; CHECK-MVE-NEXT: vmov r1, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s8 ; CHECK-MVE-NEXT: vmov r1, s4 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-MVE-NEXT: bx lr ; CHECK-MVE-NEXT: .p2align 2 ; CHECK-MVE-NEXT: @ %bb.1: ; CHECK-MVE-NEXT: .LCPI25_0: ; CHECK-MVE-NEXT: .long 0x00000000 @ float 0 ; CHECK-MVE-NEXT: .LCPI25_1: ; CHECK-MVE-NEXT: .long 0x477fff00 @ float 65535 ; ; CHECK-MVEFP-LABEL: test_unsigned_v4f32_v4i16: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: vqmovnb.u32 q0, q0 ; CHECK-MVEFP-NEXT: vmovlb.u16 q0, q0 ; CHECK-MVEFP-NEXT: bx lr %x = call <4 x i16> @llvm.fptoui.sat.v4f32.v4i16(<4 x float> %f) ret <4 x i16> %x } define arm_aapcs_vfpcc <4 x i19> @test_unsigned_v4f32_v4i19(<4 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v4f32_v4i19: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vldr s4, .LCPI26_0 ; CHECK-MVE-NEXT: vldr s6, .LCPI26_1 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vmaxnm.f32 s0, s0, s4 ; CHECK-MVE-NEXT: vmaxnm.f32 s8, s3, s4 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vminnm.f32 s0, s0, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s4, s1, s4 ; CHECK-MVE-NEXT: vminnm.f32 s8, s8, s6 ; CHECK-MVE-NEXT: vminnm.f32 s4, s4, s6 ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-MVE-NEXT: vmov r0, s2 ; CHECK-MVE-NEXT: vmov r1, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s8 ; CHECK-MVE-NEXT: vmov r1, s4 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-MVE-NEXT: bx lr ; CHECK-MVE-NEXT: .p2align 2 ; CHECK-MVE-NEXT: @ %bb.1: ; CHECK-MVE-NEXT: .LCPI26_0: ; CHECK-MVE-NEXT: .long 0x00000000 @ float 0 ; CHECK-MVE-NEXT: .LCPI26_1: ; CHECK-MVE-NEXT: .long 0x48ffffe0 @ float 524287 ; ; CHECK-MVEFP-LABEL: test_unsigned_v4f32_v4i19: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vmov.i32 q1, #0x7ffff ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: vmin.u32 q0, q0, q1 ; CHECK-MVEFP-NEXT: bx lr %x = call <4 x i19> @llvm.fptoui.sat.v4f32.v4i19(<4 x float> %f) ret <4 x i19> %x } define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f32_v4i32_duplicate(<4 x float> %f) { ; CHECK-MVE-LABEL: test_unsigned_v4f32_v4i32_duplicate: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s1 ; CHECK-MVE-NEXT: vmov r0, s2 ; CHECK-MVE-NEXT: vmov r1, s0 ; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-MVE-NEXT: vmov r0, s4 ; CHECK-MVE-NEXT: vmov r1, s6 ; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-MVE-NEXT: bx lr ; ; CHECK-MVEFP-LABEL: test_unsigned_v4f32_v4i32_duplicate: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u32.f32 q0, q0 ; CHECK-MVEFP-NEXT: bx lr %x = call <4 x i32> @llvm.fptoui.sat.v4f32.v4i32(<4 x float> %f) ret <4 x i32> %x } define arm_aapcs_vfpcc <4 x i50> @test_unsigned_v4f32_v4i50(<4 x float> %f) { ; CHECK-LABEL: test_unsigned_v4f32_v4i50: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr} ; CHECK-NEXT: .vsave {d8, d9, d10} ; CHECK-NEXT: vpush {d8, d9, d10} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: vldr s20, .LCPI28_0 ; CHECK-NEXT: vmov r4, s17 ; CHECK-NEXT: vmov r6, s19 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: mov r10, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r10, #0 ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r10, #65535 ; CHECK-NEXT: movtgt r10, #3 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: mov r6, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r6, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r6, #65535 ; CHECK-NEXT: movtgt r6, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r7, #-1 ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: str.w r7, [r8] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r4, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: lsl.w r0, r6, #22 ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r4, #-1 ; CHECK-NEXT: orr.w r0, r0, r4, lsr #10 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r0, [r8, #20] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r9, #0 ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r9, #-1 ; CHECK-NEXT: lsr.w r0, r9, #14 ; CHECK-NEXT: orr.w r1, r0, r10, lsl #18 ; CHECK-NEXT: vmov r0, s18 ; CHECK-NEXT: str.w r1, [r8, #8] ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: lsrs r2, r6, #10 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r1, #65535 ; CHECK-NEXT: movtgt r1, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r5, #65535 ; CHECK-NEXT: movtgt r5, #3 ; CHECK-NEXT: strb.w r2, [r8, #24] ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: ubfx r2, r10, #14, #4 ; CHECK-NEXT: bfc r1, #18, #14 ; CHECK-NEXT: orr.w r2, r2, r0, lsl #4 ; CHECK-NEXT: lsrs r0, r0, #28 ; CHECK-NEXT: orr.w r0, r0, r1, lsl #4 ; CHECK-NEXT: bfc r5, #18, #14 ; CHECK-NEXT: str.w r2, [r8, #12] ; CHECK-NEXT: orr.w r2, r5, r9, lsl #18 ; CHECK-NEXT: str.w r2, [r8, #4] ; CHECK-NEXT: orr.w r0, r0, r4, lsl #22 ; CHECK-NEXT: str.w r0, [r8, #16] ; CHECK-NEXT: vpop {d8, d9, d10} ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI28_0: ; CHECK-NEXT: .long 0x587fffff @ float 1.12589984E+15 %x = call <4 x i50> @llvm.fptoui.sat.v4f32.v4i50(<4 x float> %f) ret <4 x i50> %x } define arm_aapcs_vfpcc <4 x i64> @test_unsigned_v4f32_v4i64(<4 x float> %f) { ; CHECK-LABEL: test_unsigned_v4f32_v4i64: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10} ; CHECK-NEXT: vpush {d8, d9, d10} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vmov r0, s19 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: mov r11, r0 ; CHECK-NEXT: vmov r0, s18 ; CHECK-NEXT: vldr s20, .LCPI29_0 ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r11, #0 ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: mov r10, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vmov r9, s17 ; CHECK-NEXT: vmov r8, s16 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r11, #-1 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r7, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r10, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r6, r1 ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r10, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r6, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r6, #-1 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r4, r1 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r5, #-1 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmov q1[2], q1[0], r7, r11 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r4, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r4, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r4 ; CHECK-NEXT: vmov q1[3], q1[1], r6, r10 ; CHECK-NEXT: vpop {d8, d9, d10} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI29_0: ; CHECK-NEXT: .long 0x5f7fffff @ float 1.8446743E+19 %x = call <4 x i64> @llvm.fptoui.sat.v4f32.v4i64(<4 x float> %f) ret <4 x i64> %x } define arm_aapcs_vfpcc <4 x i100> @test_unsigned_v4f32_v4i100(<4 x float> %f) { ; CHECK-LABEL: test_unsigned_v4f32_v4i100: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, lr} ; CHECK-NEXT: push {r4, r5, r6, r7, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10} ; CHECK-NEXT: vpush {d8, d9, d10} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmov r0, s18 ; CHECK-NEXT: vldr s20, .LCPI30_0 ; CHECK-NEXT: vmov r5, s16 ; CHECK-NEXT: vmov r7, s19 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: mov r6, r3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: str.w r2, [r4, #33] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r1, [r4, #29] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str.w r0, [r4, #25] ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: mov r5, r3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: str r2, [r4, #8] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #4] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4] ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: lsr.w r7, r1, #28 ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: orr.w r7, r7, r2, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r7, [r4, #45] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: lsrs r7, r0, #28 ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: orr.w r7, r7, r1, lsl #4 ; CHECK-NEXT: vmov r1, s17 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: str.w r7, [r4, #41] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: lsr.w r2, r2, #28 ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r3, #15 ; CHECK-NEXT: orr.w r2, r2, r3, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: strb.w r2, [r4, #49] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r6, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r6, #15 ; CHECK-NEXT: and r2, r6, #15 ; CHECK-NEXT: orr.w r0, r2, r0, lsl #4 ; CHECK-NEXT: str.w r0, [r4, #37] ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: lsrs r7, r1, #28 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: orr.w r7, r7, r2, lsl #4 ; CHECK-NEXT: str r7, [r4, #20] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: lsrs r7, r0, #28 ; CHECK-NEXT: orr.w r1, r7, r1, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: str r1, [r4, #16] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: lsr.w r1, r2, #28 ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r3, #15 ; CHECK-NEXT: orr.w r1, r1, r3, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: strb r1, [r4, #24] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r5, #15 ; CHECK-NEXT: and r1, r5, #15 ; CHECK-NEXT: orr.w r0, r1, r0, lsl #4 ; CHECK-NEXT: str r0, [r4, #12] ; CHECK-NEXT: vpop {d8, d9, d10} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop {r4, r5, r6, r7, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI30_0: ; CHECK-NEXT: .long 0x717fffff @ float 1.26765052E+30 %x = call <4 x i100> @llvm.fptoui.sat.v4f32.v4i100(<4 x float> %f) ret <4 x i100> %x } define arm_aapcs_vfpcc <4 x i128> @test_unsigned_v4f32_v4i128(<4 x float> %f) { ; CHECK-LABEL: test_unsigned_v4f32_v4i128: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, lr} ; CHECK-NEXT: push {r4, r5, r6, r7, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10} ; CHECK-NEXT: vpush {d8, d9, d10} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmov r0, s19 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vmov r5, s18 ; CHECK-NEXT: vldr s20, .LCPI31_0 ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: str r3, [r4, #60] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: str r2, [r4, #56] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #52] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s19, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #48] ; CHECK-NEXT: vmov r7, s16 ; CHECK-NEXT: vmov r6, s17 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: str r3, [r4, #44] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: str r2, [r4, #40] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #36] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #32] ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: str r3, [r4, #28] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: str r2, [r4, #24] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #20] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #16] ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: str r3, [r4, #12] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: str r2, [r4, #8] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #4] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4] ; CHECK-NEXT: vpop {d8, d9, d10} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop {r4, r5, r6, r7, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI31_0: ; CHECK-NEXT: .long 0x7f7fffff @ float 3.40282347E+38 %x = call <4 x i128> @llvm.fptoui.sat.v4f32.v4i128(<4 x float> %f) ret <4 x i128> %x } ; ; 2-Vector double to signed integer -- result size variation ; declare <2 x i1> @llvm.fptoui.sat.v2f64.v2i1 (<2 x double>) declare <2 x i8> @llvm.fptoui.sat.v2f64.v2i8 (<2 x double>) declare <2 x i13> @llvm.fptoui.sat.v2f64.v2i13 (<2 x double>) declare <2 x i16> @llvm.fptoui.sat.v2f64.v2i16 (<2 x double>) declare <2 x i19> @llvm.fptoui.sat.v2f64.v2i19 (<2 x double>) declare <2 x i50> @llvm.fptoui.sat.v2f64.v2i50 (<2 x double>) declare <2 x i64> @llvm.fptoui.sat.v2f64.v2i64 (<2 x double>) declare <2 x i100> @llvm.fptoui.sat.v2f64.v2i100(<2 x double>) declare <2 x i128> @llvm.fptoui.sat.v2f64.v2i128(<2 x double>) define arm_aapcs_vfpcc <2 x i1> @test_unsigned_v2f64_v2i1(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i1: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #8 ; CHECK-NEXT: sub sp, #8 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI32_0 ; CHECK-NEXT: vmov r5, r6, d8 ; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: vmov r10, r9, d0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI32_1 ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: vmov r4, r11, d0 ; CHECK-NEXT: mov r2, r4 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2uiz ; CHECK-NEXT: vmov r6, r5, d9 ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: csel r0, r0, r8, ne ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #1 ; CHECK-NEXT: movs r7, #0 ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: rsbs r0, r0, #0 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bfi r7, r0, #0, #1 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r4 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_d2uiz ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csel r0, r0, r4, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #1 ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: rsbs r0, r0, #0 ; CHECK-NEXT: bfi r7, r0, #1, #1 ; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: strb r7, [r0] ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI32_0: ; CHECK-NEXT: .long 0 @ double 1 ; CHECK-NEXT: .long 1072693248 ; CHECK-NEXT: .LCPI32_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i1> @llvm.fptoui.sat.v2f64.v2i1(<2 x double> %f) ret <2 x i1> %x } define arm_aapcs_vfpcc <2 x i8> @test_unsigned_v2f64_v2i8(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i8: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #32 ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI33_0 ; CHECK-NEXT: vmov r5, r4, d9 ; CHECK-NEXT: vmov r10, r9, d0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI33_1 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vmov r2, r11, d0 ; CHECK-NEXT: str r2, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: str.w r11, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r7, r6, d8 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: csel r0, r0, r8, ne ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #255 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: str.w r10, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r8, r9 ; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: csel r9, r0, r9, ne ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r9, #255 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r11, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: ldr.w r10, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r5, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r5, #0 ; CHECK-NEXT: ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: vmov q0[2], q0[0], r9, r1 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r5 ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI33_0: ; CHECK-NEXT: .long 0 @ double 255 ; CHECK-NEXT: .long 1081073664 ; CHECK-NEXT: .LCPI33_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i8> @llvm.fptoui.sat.v2f64.v2i8(<2 x double> %f) ret <2 x i8> %x } define arm_aapcs_vfpcc <2 x i13> @test_unsigned_v2f64_v2i13(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i13: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #32 ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI34_0 ; CHECK-NEXT: vmov r5, r4, d9 ; CHECK-NEXT: vmov r10, r9, d0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI34_1 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vmov r2, r11, d0 ; CHECK-NEXT: str r2, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: str.w r11, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r7, r6, d8 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: csel r0, r0, r8, ne ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movwne r0, #8191 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: str.w r10, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r8, r9 ; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: csel r9, r0, r9, ne ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: it ne ; CHECK-NEXT: movwne r9, #8191 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r11, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: ldr.w r10, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r5, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r5, #0 ; CHECK-NEXT: ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: vmov q0[2], q0[0], r9, r1 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r5 ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI34_0: ; CHECK-NEXT: .long 0 @ double 8191 ; CHECK-NEXT: .long 1086324480 ; CHECK-NEXT: .LCPI34_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i13> @llvm.fptoui.sat.v2f64.v2i13(<2 x double> %f) ret <2 x i13> %x } define arm_aapcs_vfpcc <2 x i16> @test_unsigned_v2f64_v2i16(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i16: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #32 ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI35_0 ; CHECK-NEXT: vmov r5, r4, d9 ; CHECK-NEXT: vmov r10, r9, d0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI35_1 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vmov r2, r11, d0 ; CHECK-NEXT: str r2, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: str.w r11, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r7, r6, d8 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: csel r0, r0, r8, ne ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movwne r0, #65535 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: str.w r10, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r8, r9 ; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: csel r9, r0, r9, ne ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: it ne ; CHECK-NEXT: movwne r9, #65535 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r11, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: ldr.w r10, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r5, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r5, #0 ; CHECK-NEXT: ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: vmov q0[2], q0[0], r9, r1 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r5 ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI35_0: ; CHECK-NEXT: .long 0 @ double 65535 ; CHECK-NEXT: .long 1089470432 ; CHECK-NEXT: .LCPI35_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i16> @llvm.fptoui.sat.v2f64.v2i16(<2 x double> %f) ret <2 x i16> %x } define arm_aapcs_vfpcc <2 x i19> @test_unsigned_v2f64_v2i19(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i19: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #32 ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI36_0 ; CHECK-NEXT: vmov r5, r4, d9 ; CHECK-NEXT: vmov r10, r9, d0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI36_1 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vmov r2, r11, d0 ; CHECK-NEXT: str r2, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: str.w r11, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r7, r6, d8 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: csel r0, r0, r8, ne ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: itt ne ; CHECK-NEXT: movwne r0, #65535 ; CHECK-NEXT: movtne r0, #7 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: str.w r10, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r8, r9 ; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: csel r9, r0, r9, ne ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: itt ne ; CHECK-NEXT: movwne r9, #65535 ; CHECK-NEXT: movtne r9, #7 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r11, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: ldr.w r10, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r5, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r5, #0 ; CHECK-NEXT: ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: vmov q0[2], q0[0], r9, r1 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r5 ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI36_0: ; CHECK-NEXT: .long 0 @ double 524287 ; CHECK-NEXT: .long 1092616188 ; CHECK-NEXT: .LCPI36_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i19> @llvm.fptoui.sat.v2f64.v2i19(<2 x double> %f) ret <2 x i19> %x } define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f64_v2i32_duplicate(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i32_duplicate: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #32 ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI37_0 ; CHECK-NEXT: vmov r5, r4, d9 ; CHECK-NEXT: vmov r10, r9, d0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI37_1 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vmov r2, r11, d0 ; CHECK-NEXT: str r2, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: str.w r11, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r7, r6, d8 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: csel r0, r0, r8, ne ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: str.w r10, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r8, r9 ; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: csel r9, r0, r9, ne ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r9, #-1 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r11, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: ldr.w r10, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r5, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r5, #0 ; CHECK-NEXT: ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: vmov q0[2], q0[0], r9, r1 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r5 ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI37_0: ; CHECK-NEXT: .long 4292870144 @ double 4294967295 ; CHECK-NEXT: .long 1106247679 ; CHECK-NEXT: .LCPI37_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i32> @llvm.fptoui.sat.v2f64.v2i32(<2 x double> %f) ret <2 x i32> %x } define arm_aapcs_vfpcc <2 x i50> @test_unsigned_v2f64_v2i50(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i50: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #32 ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI38_0 ; CHECK-NEXT: vmov r5, r4, d9 ; CHECK-NEXT: vmov r10, r9, d0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI38_1 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vmov r2, r11, d0 ; CHECK-NEXT: str r2, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: str.w r11, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r7, r6, d8 ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: csel r0, r1, r8, ne ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: mov r8, r9 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: itt ne ; CHECK-NEXT: movwne r0, #65535 ; CHECK-NEXT: movtne r0, #3 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: str.w r10, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: str r0, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: csel r9, r1, r9, ne ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: itt ne ; CHECK-NEXT: movwne r9, #65535 ; CHECK-NEXT: movtne r9, #3 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r11, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: ldr.w r10, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r5, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r5, #-1 ; CHECK-NEXT: ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 ; CHECK-NEXT: ldr r0, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: vmov q0[3], q0[1], r9, r0 ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI38_0: ; CHECK-NEXT: .long 4294967288 @ double 1125899906842623 ; CHECK-NEXT: .long 1125122047 ; CHECK-NEXT: .LCPI38_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i50> @llvm.fptoui.sat.v2f64.v2i50(<2 x double> %f) ret <2 x i50> %x } define arm_aapcs_vfpcc <2 x i64> @test_unsigned_v2f64_v2i64(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i64: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #32 ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI39_0 ; CHECK-NEXT: vmov r5, r4, d9 ; CHECK-NEXT: vmov r10, r9, d0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI39_1 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vmov r2, r11, d0 ; CHECK-NEXT: str r2, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: str.w r11, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov r7, r6, d8 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: csel r0, r0, r8, ne ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: str.w r10, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r8, r9 ; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: csel r9, r0, r9, ne ; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r3, r8 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r9, #-1 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r11, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: ldr.w r10, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r5, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r5, #-1 ; CHECK-NEXT: ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r6 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: vmov q0[2], q0[0], r9, r1 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r5 ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI39_0: ; CHECK-NEXT: .long 4294967295 @ double 1.844674407370955E+19 ; CHECK-NEXT: .long 1139802111 ; CHECK-NEXT: .LCPI39_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i64> @llvm.fptoui.sat.v2f64.v2i64(<2 x double> %f) ret <2 x i64> %x } define arm_aapcs_vfpcc <2 x i100> @test_unsigned_v2f64_v2i100(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i100: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #48 ; CHECK-NEXT: sub sp, #48 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI40_0 ; CHECK-NEXT: vmov r9, r5, d8 ; CHECK-NEXT: str r0, [sp, #44] @ 4-byte Spill ; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r7, r2 ; CHECK-NEXT: mov r6, r3 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI40_1 ; CHECK-NEXT: mov r11, r0 ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: str r2, [sp, #40] @ 4-byte Spill ; CHECK-NEXT: mov r10, r3 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __fixunsdfti ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: strd r1, r0, [sp, #8] @ 8-byte Folded Spill ; CHECK-NEXT: csel r0, r2, r8, ne ; CHECK-NEXT: str r3, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: cmp.w r11, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr r4, [sp, #44] @ 4-byte Reload ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r7 ; CHECK-NEXT: mov r3, r6 ; CHECK-NEXT: mov r11, r7 ; CHECK-NEXT: str r0, [r4, #8] ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: str r5, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: str r7, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: str r6, [sp, #32] @ 4-byte Spill ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r7, [sp, #40] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: str.w r9, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: mov r2, r7 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: mov r3, r6 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #4] ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r7 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: mov r9, r7 ; CHECK-NEXT: str.w r10, [sp, #36] @ 4-byte Spill ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: vmov r8, r11, d9 ; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [r4] ; CHECK-NEXT: ldr r5, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: ldr r6, [sp, #32] @ 4-byte Reload ; CHECK-NEXT: mov r2, r5 ; CHECK-NEXT: mov r3, r6 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r11 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r11 ; CHECK-NEXT: mov r2, r7 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r10, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r11 ; CHECK-NEXT: bl __fixunsdfti ; CHECK-NEXT: cmp.w r10, #0 ; CHECK-NEXT: strd r2, r0, [sp, #4] @ 8-byte Folded Spill ; CHECK-NEXT: csel r7, r1, r10, ne ; CHECK-NEXT: str r3, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r11 ; CHECK-NEXT: mov r2, r5 ; CHECK-NEXT: mov r3, r6 ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r7, #-1 ; CHECK-NEXT: mov r4, r6 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr.w r10, [sp, #36] @ 4-byte Reload ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r11 ; CHECK-NEXT: mov r2, r9 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r2, r5 ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: csel r9, r1, r0, ne ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r9, #-1 ; CHECK-NEXT: ldr r6, [sp, #44] @ 4-byte Reload ; CHECK-NEXT: lsrs r0, r7, #28 ; CHECK-NEXT: mov r1, r11 ; CHECK-NEXT: orr.w r0, r0, r9, lsl #4 ; CHECK-NEXT: str r0, [r6, #20] ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r2, [sp, #40] @ 4-byte Reload ; CHECK-NEXT: mov r1, r11 ; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: mov r5, r10 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r4, r1, r0, ne ; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: mov r1, r11 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r4, #-1 ; CHECK-NEXT: lsrs r0, r4, #28 ; CHECK-NEXT: orr.w r0, r0, r7, lsl #4 ; CHECK-NEXT: str r0, [r6, #16] ; CHECK-NEXT: ldr r6, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: ldr.w r10, [sp, #32] @ 4-byte Reload ; CHECK-NEXT: mov r2, r6 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r1, r11 ; CHECK-NEXT: ldr.w r11, [sp, #40] @ 4-byte Reload ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r3, r5 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r2, r6 ; CHECK-NEXT: mov r3, r10 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #15 ; CHECK-NEXT: lsr.w r1, r9, #28 ; CHECK-NEXT: ldr.w r9, [sp, #44] @ 4-byte Reload ; CHECK-NEXT: orr.w r0, r1, r0, lsl #4 ; CHECK-NEXT: strb.w r0, [r9, #24] ; CHECK-NEXT: ldr r7, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: ldr r5, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r3, [sp, #36] @ 4-byte Reload ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne r0, #15 ; CHECK-NEXT: and r0, r0, #15 ; CHECK-NEXT: orr.w r0, r0, r4, lsl #4 ; CHECK-NEXT: str.w r0, [r9, #12] ; CHECK-NEXT: add sp, #48 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI40_0: ; CHECK-NEXT: .long 4294967295 @ double 1.2676506002282293E+30 ; CHECK-NEXT: .long 1177550847 ; CHECK-NEXT: .LCPI40_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i100> @llvm.fptoui.sat.v2f64.v2i100(<2 x double> %f) ret <2 x i100> %x } define arm_aapcs_vfpcc <2 x i128> @test_unsigned_v2f64_v2i128(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i128: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #32 ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vldr d0, .LCPI41_0 ; CHECK-NEXT: vmov r8, r7, d9 ; CHECK-NEXT: str r0, [sp, #24] @ 4-byte Spill ; CHECK-NEXT: vmov r6, r4, d0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: mov r2, r6 ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: vldr d0, .LCPI41_1 ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: vmov r10, r11, d0 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: bl __fixunsdfti ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: strd r1, r0, [sp, #16] @ 8-byte Folded Spill ; CHECK-NEXT: csel r0, r3, r5, ne ; CHECK-NEXT: str r2, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr r5, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: mov r2, r6 ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: str r0, [r5, #28] ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: str r6, [sp, #28] @ 4-byte Spill ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: str.w r10, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r2, r6 ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [r5, #24] ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: str r4, [sp] @ 4-byte Spill ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: vmov r6, r5, d8 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp.w r9, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: ldr.w r9, [sp, #24] @ 4-byte Reload ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: str.w r0, [r9, #20] ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: ldr r2, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: ldr r4, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: mov r10, r0 ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: mov r3, r11 ; CHECK-NEXT: mov r7, r11 ; CHECK-NEXT: mov r2, r4 ; CHECK-NEXT: str.w r11, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r11, r9 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp.w r10, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str.w r0, [r9, #16] ; CHECK-NEXT: ldr.w r8, [sp, #28] @ 4-byte Reload ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: ldr.w r9, [sp] @ 4-byte Reload ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r8 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r10, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r4 ; CHECK-NEXT: mov r3, r7 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __fixunsdfti ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: strd r1, r0, [sp, #16] @ 8-byte Folded Spill ; CHECK-NEXT: csel r0, r3, r7, ne ; CHECK-NEXT: str r2, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: cmp.w r10, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str.w r0, [r11, #12] ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r8 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: mov r7, r11 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r2, r4 ; CHECK-NEXT: mov r10, r4 ; CHECK-NEXT: ldr r4, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: mov r11, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r2, r8 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp.w r11, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [r7, #8] ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r11, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: mov r2, r8 ; CHECK-NEXT: mov r3, r9 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp.w r11, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [r7, #4] ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: bl __aeabi_dcmpgt ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r5 ; CHECK-NEXT: mov r2, r10 ; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: bl __aeabi_dcmpge ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csel r0, r1, r0, ne ; CHECK-NEXT: cmp.w r8, #0 ; CHECK-NEXT: it ne ; CHECK-NEXT: movne.w r0, #-1 ; CHECK-NEXT: str r0, [r7] ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 3 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI41_0: ; CHECK-NEXT: .long 4294967295 @ double 3.4028236692093843E+38 ; CHECK-NEXT: .long 1206910975 ; CHECK-NEXT: .LCPI41_1: ; CHECK-NEXT: .long 0 @ double 0 ; CHECK-NEXT: .long 0 %x = call <2 x i128> @llvm.fptoui.sat.v2f64.v2i128(<2 x double> %f) ret <2 x i128> %x } ; ; 4-Vector half to signed integer -- result size variation ; declare <8 x i1> @llvm.fptoui.sat.v8f16.v8i1 (<8 x half>) declare <8 x i8> @llvm.fptoui.sat.v8f16.v8i8 (<8 x half>) declare <8 x i13> @llvm.fptoui.sat.v8f16.v8i13 (<8 x half>) declare <8 x i16> @llvm.fptoui.sat.v8f16.v8i16 (<8 x half>) declare <8 x i19> @llvm.fptoui.sat.v8f16.v8i19 (<8 x half>) declare <8 x i50> @llvm.fptoui.sat.v8f16.v8i50 (<8 x half>) declare <8 x i64> @llvm.fptoui.sat.v8f16.v8i64 (<8 x half>) declare <8 x i100> @llvm.fptoui.sat.v8f16.v8i100(<8 x half>) declare <8 x i128> @llvm.fptoui.sat.v8f16.v8i128(<8 x half>) define arm_aapcs_vfpcc <8 x i1> @test_unsigned_v8f16_v8i1(<8 x half> %f) { ; CHECK-LABEL: test_unsigned_v8f16_v8i1: ; CHECK: @ %bb.0: ; CHECK-NEXT: vldr s4, .LCPI42_0 ; CHECK-NEXT: vcvtt.f32.f16 s8, s3 ; CHECK-NEXT: vcvtb.f32.f16 s10, s3 ; CHECK-NEXT: vcvtb.f32.f16 s3, s0 ; CHECK-NEXT: vmov.f32 s6, #1.000000e+00 ; CHECK-NEXT: vmaxnm.f32 s3, s3, s4 ; CHECK-NEXT: vminnm.f32 s3, s3, s6 ; CHECK-NEXT: vcvtt.f32.f16 s0, s0 ; CHECK-NEXT: vcvt.u32.f32 s3, s3 ; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 ; CHECK-NEXT: vminnm.f32 s0, s0, s6 ; CHECK-NEXT: movs r1, #0 ; CHECK-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-NEXT: vcvtt.f32.f16 s14, s1 ; CHECK-NEXT: vcvtb.f32.f16 s1, s1 ; CHECK-NEXT: vmaxnm.f32 s14, s14, s4 ; CHECK-NEXT: vmaxnm.f32 s1, s1, s4 ; CHECK-NEXT: vminnm.f32 s14, s14, s6 ; CHECK-NEXT: vminnm.f32 s1, s1, s6 ; CHECK-NEXT: vcvt.u32.f32 s14, s14 ; CHECK-NEXT: vcvt.u32.f32 s1, s1 ; CHECK-NEXT: vcvtt.f32.f16 s12, s2 ; CHECK-NEXT: vmov r2, s3 ; CHECK-NEXT: vcvtb.f32.f16 s2, s2 ; CHECK-NEXT: vmaxnm.f32 s2, s2, s4 ; CHECK-NEXT: vmaxnm.f32 s12, s12, s4 ; CHECK-NEXT: vminnm.f32 s2, s2, s6 ; CHECK-NEXT: vminnm.f32 s12, s12, s6 ; CHECK-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-NEXT: vmaxnm.f32 s10, s10, s4 ; CHECK-NEXT: vcvt.u32.f32 s12, s12 ; CHECK-NEXT: vminnm.f32 s10, s10, s6 ; CHECK-NEXT: vcvt.u32.f32 s10, s10 ; CHECK-NEXT: vmaxnm.f32 s8, s8, s4 ; CHECK-NEXT: vminnm.f32 s8, s8, s6 ; CHECK-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #0, #1 ; CHECK-NEXT: vmov r2, s0 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #1, #1 ; CHECK-NEXT: vmov r2, s1 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #2, #1 ; CHECK-NEXT: vmov r2, s14 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #3, #1 ; CHECK-NEXT: vmov r2, s2 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #4, #1 ; CHECK-NEXT: vmov r2, s12 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #5, #1 ; CHECK-NEXT: vmov r2, s10 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #6, #1 ; CHECK-NEXT: vmov r2, s8 ; CHECK-NEXT: and r2, r2, #1 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: bfi r1, r2, #7, #1 ; CHECK-NEXT: strb r1, [r0] ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI42_0: ; CHECK-NEXT: .long 0x00000000 @ float 0 %x = call <8 x i1> @llvm.fptoui.sat.v8f16.v8i1(<8 x half> %f) ret <8 x i1> %x } define arm_aapcs_vfpcc <8 x i8> @test_unsigned_v8f16_v8i8(<8 x half> %f) { ; CHECK-MVE-LABEL: test_unsigned_v8f16_v8i8: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vldr s6, .LCPI43_1 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s10, s2 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s2, s2 ; CHECK-MVE-NEXT: vldr s4, .LCPI43_0 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s8, s3 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s12, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s5, s2 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s2, s0 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s0, s0 ; CHECK-MVE-NEXT: vmaxnm.f32 s0, s0, s6 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vminnm.f32 s0, s0, s4 ; CHECK-MVE-NEXT: vcvt.u32.f32 s7, s2 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s2, s1 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s14, s1 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vmaxnm.f32 s8, s8, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s10, s10, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s12, s12, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s14, s14, s6 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vminnm.f32 s8, s8, s4 ; CHECK-MVE-NEXT: vminnm.f32 s10, s10, s4 ; CHECK-MVE-NEXT: vminnm.f32 s12, s12, s4 ; CHECK-MVE-NEXT: vminnm.f32 s14, s14, s4 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s14, s14 ; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s10 ; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s12 ; CHECK-MVE-NEXT: vmov r0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-MVE-NEXT: vmov.16 q0[0], r0 ; CHECK-MVE-NEXT: vmov r0, s7 ; CHECK-MVE-NEXT: vmov.16 q0[1], r0 ; CHECK-MVE-NEXT: vmov r0, s4 ; CHECK-MVE-NEXT: vmov.16 q0[2], r0 ; CHECK-MVE-NEXT: vmov r0, s14 ; CHECK-MVE-NEXT: vmov.16 q0[3], r0 ; CHECK-MVE-NEXT: vmov r0, s5 ; CHECK-MVE-NEXT: vmov.16 q0[4], r0 ; CHECK-MVE-NEXT: vmov r0, s10 ; CHECK-MVE-NEXT: vmov.16 q0[5], r0 ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: vmov.16 q0[6], r0 ; CHECK-MVE-NEXT: vmov r0, s8 ; CHECK-MVE-NEXT: vmov.16 q0[7], r0 ; CHECK-MVE-NEXT: bx lr ; CHECK-MVE-NEXT: .p2align 2 ; CHECK-MVE-NEXT: @ %bb.1: ; CHECK-MVE-NEXT: .LCPI43_0: ; CHECK-MVE-NEXT: .long 0x437f0000 @ float 255 ; CHECK-MVE-NEXT: .LCPI43_1: ; CHECK-MVE-NEXT: .long 0x00000000 @ float 0 ; ; CHECK-MVEFP-LABEL: test_unsigned_v8f16_v8i8: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u16.f16 q0, q0 ; CHECK-MVEFP-NEXT: vqmovnb.u16 q0, q0 ; CHECK-MVEFP-NEXT: vmovlb.u8 q0, q0 ; CHECK-MVEFP-NEXT: bx lr %x = call <8 x i8> @llvm.fptoui.sat.v8f16.v8i8(<8 x half> %f) ret <8 x i8> %x } define arm_aapcs_vfpcc <8 x i13> @test_unsigned_v8f16_v8i13(<8 x half> %f) { ; CHECK-MVE-LABEL: test_unsigned_v8f16_v8i13: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vldr s6, .LCPI44_1 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s10, s2 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s2, s2 ; CHECK-MVE-NEXT: vldr s4, .LCPI44_0 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s8, s3 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s12, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s5, s2 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s2, s0 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s0, s0 ; CHECK-MVE-NEXT: vmaxnm.f32 s0, s0, s6 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vminnm.f32 s0, s0, s4 ; CHECK-MVE-NEXT: vcvt.u32.f32 s7, s2 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s2, s1 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s14, s1 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vmaxnm.f32 s8, s8, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s10, s10, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s12, s12, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s14, s14, s6 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vminnm.f32 s8, s8, s4 ; CHECK-MVE-NEXT: vminnm.f32 s10, s10, s4 ; CHECK-MVE-NEXT: vminnm.f32 s12, s12, s4 ; CHECK-MVE-NEXT: vminnm.f32 s14, s14, s4 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s14, s14 ; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s10 ; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s12 ; CHECK-MVE-NEXT: vmov r0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-MVE-NEXT: vmov.16 q0[0], r0 ; CHECK-MVE-NEXT: vmov r0, s7 ; CHECK-MVE-NEXT: vmov.16 q0[1], r0 ; CHECK-MVE-NEXT: vmov r0, s4 ; CHECK-MVE-NEXT: vmov.16 q0[2], r0 ; CHECK-MVE-NEXT: vmov r0, s14 ; CHECK-MVE-NEXT: vmov.16 q0[3], r0 ; CHECK-MVE-NEXT: vmov r0, s5 ; CHECK-MVE-NEXT: vmov.16 q0[4], r0 ; CHECK-MVE-NEXT: vmov r0, s10 ; CHECK-MVE-NEXT: vmov.16 q0[5], r0 ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: vmov.16 q0[6], r0 ; CHECK-MVE-NEXT: vmov r0, s8 ; CHECK-MVE-NEXT: vmov.16 q0[7], r0 ; CHECK-MVE-NEXT: bx lr ; CHECK-MVE-NEXT: .p2align 2 ; CHECK-MVE-NEXT: @ %bb.1: ; CHECK-MVE-NEXT: .LCPI44_0: ; CHECK-MVE-NEXT: .long 0x45fff800 @ float 8191 ; CHECK-MVE-NEXT: .LCPI44_1: ; CHECK-MVE-NEXT: .long 0x00000000 @ float 0 ; ; CHECK-MVEFP-LABEL: test_unsigned_v8f16_v8i13: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vmvn.i16 q1, #0xe000 ; CHECK-MVEFP-NEXT: vcvt.u16.f16 q0, q0 ; CHECK-MVEFP-NEXT: vmin.u16 q0, q0, q1 ; CHECK-MVEFP-NEXT: bx lr %x = call <8 x i13> @llvm.fptoui.sat.v8f16.v8i13(<8 x half> %f) ret <8 x i13> %x } define arm_aapcs_vfpcc <8 x i16> @test_unsigned_v8f16_v8i16(<8 x half> %f) { ; CHECK-MVE-LABEL: test_unsigned_v8f16_v8i16: ; CHECK-MVE: @ %bb.0: ; CHECK-MVE-NEXT: vldr s6, .LCPI45_1 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s10, s2 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s2, s2 ; CHECK-MVE-NEXT: vldr s4, .LCPI45_0 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s8, s3 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s12, s3 ; CHECK-MVE-NEXT: vcvt.u32.f32 s5, s2 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s2, s0 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s0, s0 ; CHECK-MVE-NEXT: vmaxnm.f32 s0, s0, s6 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vminnm.f32 s0, s0, s4 ; CHECK-MVE-NEXT: vcvt.u32.f32 s7, s2 ; CHECK-MVE-NEXT: vcvtb.f32.f16 s2, s1 ; CHECK-MVE-NEXT: vcvtt.f32.f16 s14, s1 ; CHECK-MVE-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-MVE-NEXT: vmaxnm.f32 s8, s8, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s10, s10, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s12, s12, s6 ; CHECK-MVE-NEXT: vmaxnm.f32 s14, s14, s6 ; CHECK-MVE-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-MVE-NEXT: vminnm.f32 s8, s8, s4 ; CHECK-MVE-NEXT: vminnm.f32 s10, s10, s4 ; CHECK-MVE-NEXT: vminnm.f32 s12, s12, s4 ; CHECK-MVE-NEXT: vminnm.f32 s14, s14, s4 ; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s2 ; CHECK-MVE-NEXT: vcvt.u32.f32 s14, s14 ; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s10 ; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s12 ; CHECK-MVE-NEXT: vmov r0, s0 ; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-MVE-NEXT: vmov.16 q0[0], r0 ; CHECK-MVE-NEXT: vmov r0, s7 ; CHECK-MVE-NEXT: vmov.16 q0[1], r0 ; CHECK-MVE-NEXT: vmov r0, s4 ; CHECK-MVE-NEXT: vmov.16 q0[2], r0 ; CHECK-MVE-NEXT: vmov r0, s14 ; CHECK-MVE-NEXT: vmov.16 q0[3], r0 ; CHECK-MVE-NEXT: vmov r0, s5 ; CHECK-MVE-NEXT: vmov.16 q0[4], r0 ; CHECK-MVE-NEXT: vmov r0, s10 ; CHECK-MVE-NEXT: vmov.16 q0[5], r0 ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: vmov.16 q0[6], r0 ; CHECK-MVE-NEXT: vmov r0, s8 ; CHECK-MVE-NEXT: vmov.16 q0[7], r0 ; CHECK-MVE-NEXT: bx lr ; CHECK-MVE-NEXT: .p2align 2 ; CHECK-MVE-NEXT: @ %bb.1: ; CHECK-MVE-NEXT: .LCPI45_0: ; CHECK-MVE-NEXT: .long 0x477fff00 @ float 65535 ; CHECK-MVE-NEXT: .LCPI45_1: ; CHECK-MVE-NEXT: .long 0x00000000 @ float 0 ; ; CHECK-MVEFP-LABEL: test_unsigned_v8f16_v8i16: ; CHECK-MVEFP: @ %bb.0: ; CHECK-MVEFP-NEXT: vcvt.u16.f16 q0, q0 ; CHECK-MVEFP-NEXT: bx lr %x = call <8 x i16> @llvm.fptoui.sat.v8f16.v8i16(<8 x half> %f) ret <8 x i16> %x } define arm_aapcs_vfpcc <8 x i19> @test_unsigned_v8f16_v8i19(<8 x half> %f) { ; CHECK-LABEL: test_unsigned_v8f16_v8i19: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r7, lr} ; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: vldr s6, .LCPI46_1 ; CHECK-NEXT: vcvtb.f32.f16 s8, s0 ; CHECK-NEXT: vcvtb.f32.f16 s12, s2 ; CHECK-NEXT: vcvtb.f32.f16 s10, s1 ; CHECK-NEXT: vcvtt.f32.f16 s14, s1 ; CHECK-NEXT: vcvtb.f32.f16 s1, s3 ; CHECK-NEXT: vcvtt.f32.f16 s0, s0 ; CHECK-NEXT: vcvtt.f32.f16 s2, s2 ; CHECK-NEXT: vldr s4, .LCPI46_0 ; CHECK-NEXT: vcvtt.f32.f16 s3, s3 ; CHECK-NEXT: vmaxnm.f32 s8, s8, s6 ; CHECK-NEXT: vmaxnm.f32 s10, s10, s6 ; CHECK-NEXT: vmaxnm.f32 s0, s0, s6 ; CHECK-NEXT: vmaxnm.f32 s12, s12, s6 ; CHECK-NEXT: vmaxnm.f32 s14, s14, s6 ; CHECK-NEXT: vmaxnm.f32 s2, s2, s6 ; CHECK-NEXT: vmaxnm.f32 s1, s1, s6 ; CHECK-NEXT: vmaxnm.f32 s6, s3, s6 ; CHECK-NEXT: vminnm.f32 s8, s8, s4 ; CHECK-NEXT: vminnm.f32 s10, s10, s4 ; CHECK-NEXT: vminnm.f32 s0, s0, s4 ; CHECK-NEXT: vminnm.f32 s12, s12, s4 ; CHECK-NEXT: vminnm.f32 s14, s14, s4 ; CHECK-NEXT: vminnm.f32 s2, s2, s4 ; CHECK-NEXT: vminnm.f32 s1, s1, s4 ; CHECK-NEXT: vminnm.f32 s4, s6, s4 ; CHECK-NEXT: vcvt.u32.f32 s1, s1 ; CHECK-NEXT: vcvt.u32.f32 s4, s4 ; CHECK-NEXT: vcvt.u32.f32 s2, s2 ; CHECK-NEXT: vcvt.u32.f32 s14, s14 ; CHECK-NEXT: vcvt.u32.f32 s12, s12 ; CHECK-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-NEXT: vcvt.u32.f32 s10, s10 ; CHECK-NEXT: vmov r1, s1 ; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-NEXT: vmov r4, s12 ; CHECK-NEXT: vmov r5, s10 ; CHECK-NEXT: lsrs r2, r1, #14 ; CHECK-NEXT: orr.w r12, r2, r3, lsl #5 ; CHECK-NEXT: vmov r3, s2 ; CHECK-NEXT: strh.w r12, [r0, #16] ; CHECK-NEXT: lsrs r2, r3, #1 ; CHECK-NEXT: orr.w lr, r2, r1, lsl #18 ; CHECK-NEXT: vmov r2, s14 ; CHECK-NEXT: lsrs r1, r2, #7 ; CHECK-NEXT: orr.w r1, r1, r4, lsl #12 ; CHECK-NEXT: orr.w r1, r1, r3, lsl #31 ; CHECK-NEXT: vmov r3, s0 ; CHECK-NEXT: lsrs r4, r3, #13 ; CHECK-NEXT: orr.w r4, r4, r5, lsl #6 ; CHECK-NEXT: orr.w r2, r4, r2, lsl #25 ; CHECK-NEXT: vmov r4, s8 ; CHECK-NEXT: orr.w r3, r4, r3, lsl #19 ; CHECK-NEXT: strd r3, r2, [r0] ; CHECK-NEXT: strd r1, lr, [r0, #8] ; CHECK-NEXT: lsr.w r1, r12, #16 ; CHECK-NEXT: strb r1, [r0, #18] ; CHECK-NEXT: pop {r4, r5, r7, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI46_0: ; CHECK-NEXT: .long 0x48ffffe0 @ float 524287 ; CHECK-NEXT: .LCPI46_1: ; CHECK-NEXT: .long 0x00000000 @ float 0 %x = call <8 x i19> @llvm.fptoui.sat.v8f16.v8i19(<8 x half> %f) ret <8 x i19> %x } define arm_aapcs_vfpcc <8 x i32> @test_unsigned_v8f16_v8i32_duplicate(<8 x half> %f) { ; CHECK-LABEL: test_unsigned_v8f16_v8i32_duplicate: ; CHECK: @ %bb.0: ; CHECK-NEXT: vmovx.f16 s4, s3 ; CHECK-NEXT: vmovx.f16 s6, s0 ; CHECK-NEXT: vcvt.u32.f16 s8, s4 ; CHECK-NEXT: vmovx.f16 s4, s2 ; CHECK-NEXT: vcvt.u32.f16 s10, s4 ; CHECK-NEXT: vmovx.f16 s4, s1 ; CHECK-NEXT: vcvt.u32.f16 s14, s2 ; CHECK-NEXT: vcvt.u32.f16 s2, s1 ; CHECK-NEXT: vcvt.u32.f16 s0, s0 ; CHECK-NEXT: vcvt.u32.f16 s4, s4 ; CHECK-NEXT: vcvt.u32.f16 s6, s6 ; CHECK-NEXT: vmov r0, s2 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: vcvt.u32.f16 s12, s3 ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vmov r0, s12 ; CHECK-NEXT: vmov r1, s14 ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov r0, s8 ; CHECK-NEXT: vmov r1, s10 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: bx lr %x = call <8 x i32> @llvm.fptoui.sat.v8f16.v8i32(<8 x half> %f) ret <8 x i32> %x } define arm_aapcs_vfpcc <8 x i50> @test_unsigned_v8f16_v8i50(<8 x half> %f) { ; CHECK-LABEL: test_unsigned_v8f16_v8i50: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} ; CHECK-NEXT: .pad #16 ; CHECK-NEXT: sub sp, #16 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vcvtb.f32.f16 s24, s18 ; CHECK-NEXT: vmov r0, s24 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcvtt.f32.f16 s26, s19 ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: vmov r0, s26 ; CHECK-NEXT: vcvtb.f32.f16 s22, s16 ; CHECK-NEXT: vcvtt.f32.f16 s18, s18 ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vmov r5, s22 ; CHECK-NEXT: vldr s20, .LCPI48_0 ; CHECK-NEXT: vmov r8, s18 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: mov r10, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r10, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r10, #65535 ; CHECK-NEXT: movtgt r10, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r7, #-1 ; CHECK-NEXT: str.w r7, [r4, #25] ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4] ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r6, #0 ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r6, #-1 ; CHECK-NEXT: lsl.w r0, r10, #22 ; CHECK-NEXT: str r6, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: orr.w r6, r0, r6, lsr #10 ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r5, #65535 ; CHECK-NEXT: movtgt r5, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r6, [r4, #45] ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r7, #-1 ; CHECK-NEXT: lsrs r0, r7, #14 ; CHECK-NEXT: orr.w r0, r0, r5, lsl #18 ; CHECK-NEXT: vcvtt.f32.f16 s18, s17 ; CHECK-NEXT: str.w r0, [r4, #33] ; CHECK-NEXT: vmov r0, s18 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: mov r9, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r9, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r1, r0 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r9, #65535 ; CHECK-NEXT: movtgt r9, #3 ; CHECK-NEXT: lsl.w r0, r9, #22 ; CHECK-NEXT: orr.w r0, r0, r1, lsr #10 ; CHECK-NEXT: vcvtt.f32.f16 s16, s16 ; CHECK-NEXT: str r0, [r4, #20] ; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: mov r11, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r11, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r11, #65535 ; CHECK-NEXT: movtgt r11, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r8, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r8, #-1 ; CHECK-NEXT: lsr.w r0, r8, #14 ; CHECK-NEXT: vcvtb.f32.f16 s16, s19 ; CHECK-NEXT: orr.w r0, r0, r11, lsl #18 ; CHECK-NEXT: str r0, [r4, #8] ; CHECK-NEXT: lsr.w r0, r10, #10 ; CHECK-NEXT: strb.w r0, [r4, #49] ; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r6, #0 ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: ubfx r0, r5, #14, #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r6, #-1 ; CHECK-NEXT: orr.w r0, r0, r6, lsl #4 ; CHECK-NEXT: str.w r0, [r4, #37] ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: ldr r0, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: vcvtb.f32.f16 s18, s17 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r0, #65535 ; CHECK-NEXT: movtgt r0, #3 ; CHECK-NEXT: bfc r0, #18, #14 ; CHECK-NEXT: mov r10, r1 ; CHECK-NEXT: orr.w r0, r0, r7, lsl #18 ; CHECK-NEXT: str.w r0, [r4, #29] ; CHECK-NEXT: lsr.w r0, r9, #10 ; CHECK-NEXT: strb r0, [r4, #24] ; CHECK-NEXT: vmov r0, s18 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: ubfx r2, r11, #14, #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: orr.w r2, r2, r0, lsl #4 ; CHECK-NEXT: str r2, [r4, #12] ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r2, #65535 ; CHECK-NEXT: movtgt r2, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r1, #65535 ; CHECK-NEXT: movtgt r1, #3 ; CHECK-NEXT: bfc r2, #18, #14 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r10, #0 ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r10, #65535 ; CHECK-NEXT: movtgt r10, #3 ; CHECK-NEXT: orr.w r2, r2, r8, lsl #18 ; CHECK-NEXT: str r2, [r4, #4] ; CHECK-NEXT: bfc r10, #18, #14 ; CHECK-NEXT: ldr r3, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: lsrs r2, r6, #28 ; CHECK-NEXT: bfc r1, #18, #14 ; CHECK-NEXT: orr.w r2, r2, r10, lsl #4 ; CHECK-NEXT: lsrs r0, r0, #28 ; CHECK-NEXT: orr.w r2, r2, r3, lsl #22 ; CHECK-NEXT: str.w r2, [r4, #41] ; CHECK-NEXT: orr.w r0, r0, r1, lsl #4 ; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload ; CHECK-NEXT: orr.w r0, r0, r1, lsl #22 ; CHECK-NEXT: str r0, [r4, #16] ; CHECK-NEXT: add sp, #16 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI48_0: ; CHECK-NEXT: .long 0x587fffff @ float 1.12589984E+15 %x = call <8 x i50> @llvm.fptoui.sat.v8f16.v8i50(<8 x half> %f) ret <8 x i50> %x } define arm_aapcs_vfpcc <8 x i64> @test_unsigned_v8f16_v8i64(<8 x half> %f) { ; CHECK-LABEL: test_unsigned_v8f16_v8i64: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vcvtt.f32.f16 s20, s19 ; CHECK-NEXT: vmov r0, s20 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcvtb.f32.f16 s22, s19 ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: vmov r0, s22 ; CHECK-NEXT: vldr s28, .LCPI49_0 ; CHECK-NEXT: vcmp.f32 s20, #0 ; CHECK-NEXT: vcvtt.f32.f16 s24, s16 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcvtb.f32.f16 s16, s16 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r9, #0 ; CHECK-NEXT: vcmp.f32 s20, s28 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r8, r1 ; CHECK-NEXT: vmov r5, s24 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r9, #-1 ; CHECK-NEXT: vmov r4, s16 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: mov r11, r0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, s28 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r11, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s20, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r11, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s20, s28 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r8, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r10, r1 ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r8, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r10, #0 ; CHECK-NEXT: vcmp.f32 s22, s28 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r10, #-1 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r6, #0 ; CHECK-NEXT: vcmp.f32 s24, s28 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r6, #-1 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcvtt.f32.f16 s30, s17 ; CHECK-NEXT: mov r7, r1 ; CHECK-NEXT: vmov r1, s30 ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s16, s28 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmov q5[2], q5[0], r0, r6 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s28 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r6, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r6, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, s28 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r5, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s28 ; CHECK-NEXT: vcvtb.f32.f16 s16, s17 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r7, #-1 ; CHECK-NEXT: mov r4, r1 ; CHECK-NEXT: vmov q5[3], q5[1], r7, r5 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcvtt.f32.f16 s17, s18 ; CHECK-NEXT: mov r7, r1 ; CHECK-NEXT: vmov r1, s17 ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s16, s28 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmov q6[2], q6[0], r0, r6 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s28 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r6, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r6, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s28 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r4, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r4, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s28 ; CHECK-NEXT: vcvtb.f32.f16 s16, s18 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r7, #-1 ; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: vmov q6[3], q6[1], r7, r4 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmov q3[2], q3[0], r11, r9 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s28 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s28 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r5, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vcmp.f32 s16, s28 ; CHECK-NEXT: vmov q2[2], q2[0], r0, r6 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r5 ; CHECK-NEXT: vmov q3[3], q3[1], r10, r8 ; CHECK-NEXT: vmov q0, q5 ; CHECK-NEXT: vmov q1, q6 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI49_0: ; CHECK-NEXT: .long 0x5f7fffff @ float 1.8446743E+19 %x = call <8 x i64> @llvm.fptoui.sat.v8f16.v8i64(<8 x half> %f) ret <8 x i64> %x } define arm_aapcs_vfpcc <8 x i100> @test_unsigned_v8f16_v8i100(<8 x half> %f) { ; CHECK-LABEL: test_unsigned_v8f16_v8i100: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr} ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vcvtb.f32.f16 s28, s19 ; CHECK-NEXT: vmov r0, s28 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcvtb.f32.f16 s26, s18 ; CHECK-NEXT: mov r5, r3 ; CHECK-NEXT: vmov r3, s26 ; CHECK-NEXT: vldr s20, .LCPI50_1 ; CHECK-NEXT: vcmp.f32 s28, #0 ; CHECK-NEXT: vcvtt.f32.f16 s30, s19 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: str.w r2, [r4, #83] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r1, [r4, #79] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: vcvtb.f32.f16 s22, s16 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vcvtb.f32.f16 s24, s17 ; CHECK-NEXT: str.w r0, [r4, #75] ; CHECK-NEXT: vmov r9, s30 ; CHECK-NEXT: vmov r8, s22 ; CHECK-NEXT: vmov r6, s24 ; CHECK-NEXT: mov r0, r3 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: mov r7, r3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: str.w r2, [r4, #58] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r1, [r4, #54] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str.w r0, [r4, #50] ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: mov r10, r3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: str.w r2, [r4, #33] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r1, [r4, #29] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str.w r0, [r4, #25] ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: mov r8, r3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: str r2, [r4, #8] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #4] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4] ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: lsr.w r6, r1, #28 ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: orr.w r6, r6, r2, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r6, [r4, #95] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: lsrs r6, r0, #28 ; CHECK-NEXT: orr.w r1, r6, r1, lsl #4 ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: str.w r1, [r4, #91] ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: lsrs r1, r2, #28 ; CHECK-NEXT: vcvtt.f32.f16 s30, s18 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r3, #15 ; CHECK-NEXT: orr.w r2, r1, r3, lsl #4 ; CHECK-NEXT: vmov r1, s30 ; CHECK-NEXT: strb.w r2, [r4, #99] ; CHECK-NEXT: vcmp.f32 s28, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r5, #15 ; CHECK-NEXT: and r2, r5, #15 ; CHECK-NEXT: orr.w r0, r2, r0, lsl #4 ; CHECK-NEXT: str.w r0, [r4, #87] ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: vcvtt.f32.f16 s18, s17 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: lsr.w r6, r1, #28 ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: orr.w r6, r6, r2, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r6, [r4, #70] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: lsrs r6, r0, #28 ; CHECK-NEXT: orr.w r1, r6, r1, lsl #4 ; CHECK-NEXT: str.w r1, [r4, #66] ; CHECK-NEXT: vmov r1, s18 ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: lsrs r2, r2, #28 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r3, #15 ; CHECK-NEXT: orr.w r2, r2, r3, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: strb.w r2, [r4, #74] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: vcvtt.f32.f16 s16, s16 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r7, #15 ; CHECK-NEXT: and r2, r7, #15 ; CHECK-NEXT: orr.w r0, r2, r0, lsl #4 ; CHECK-NEXT: str.w r0, [r4, #62] ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: lsr.w r7, r1, #28 ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: orr.w r7, r7, r2, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str.w r7, [r4, #45] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: lsrs r7, r0, #28 ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: orr.w r7, r7, r1, lsl #4 ; CHECK-NEXT: vmov r1, s16 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: str.w r7, [r4, #41] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: lsr.w r2, r2, #28 ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r3, #15 ; CHECK-NEXT: orr.w r2, r2, r3, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: strb.w r2, [r4, #49] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r10, #0 ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r10, #15 ; CHECK-NEXT: and r2, r10, #15 ; CHECK-NEXT: orr.w r0, r2, r0, lsl #4 ; CHECK-NEXT: str.w r0, [r4, #37] ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: b.w .LBB50_2 ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI50_1: ; CHECK-NEXT: .long 0x717fffff @ float 1.26765052E+30 ; CHECK-NEXT: .p2align 1 ; CHECK-NEXT: .LBB50_2: ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: lsrs r7, r1, #28 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: orr.w r7, r7, r2, lsl #4 ; CHECK-NEXT: str r7, [r4, #20] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: lsrs r7, r0, #28 ; CHECK-NEXT: orr.w r1, r7, r1, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: str r1, [r4, #16] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: lsr.w r1, r2, #28 ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt r3, #15 ; CHECK-NEXT: orr.w r1, r1, r3, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: strb r1, [r4, #24] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r8, #0 ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r8, #15 ; CHECK-NEXT: and r1, r8, #15 ; CHECK-NEXT: orr.w r0, r1, r0, lsl #4 ; CHECK-NEXT: str r0, [r4, #12] ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc} ; CHECK-NEXT: @ %bb.3: %x = call <8 x i100> @llvm.fptoui.sat.v8f16.v8i100(<8 x half> %f) ret <8 x i100> %x } define arm_aapcs_vfpcc <8 x i128> @test_unsigned_v8f16_v8i128(<8 x half> %f) { ; CHECK-LABEL: test_unsigned_v8f16_v8i128: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vcvtt.f32.f16 s26, s19 ; CHECK-NEXT: vcvtb.f32.f16 s22, s16 ; CHECK-NEXT: vmov r0, s26 ; CHECK-NEXT: vcvtt.f32.f16 s16, s16 ; CHECK-NEXT: vcvtb.f32.f16 s24, s17 ; CHECK-NEXT: vcvtb.f32.f16 s30, s19 ; CHECK-NEXT: vldr s20, .LCPI51_0 ; CHECK-NEXT: vmov r8, s22 ; CHECK-NEXT: vmov r9, s16 ; CHECK-NEXT: vcvtt.f32.f16 s28, s18 ; CHECK-NEXT: vmov r7, s24 ; CHECK-NEXT: vmov r6, s30 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: vcvtb.f32.f16 s18, s18 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: str r3, [r4, #124] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: str r2, [r4, #120] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #116] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #112] ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: vmov r5, s28 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: vcvtt.f32.f16 s26, s17 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: str r3, [r4, #108] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: str r2, [r4, #104] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s30, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #100] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s30, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #96] ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vmov r6, s18 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s28, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: str r3, [r4, #92] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: str r2, [r4, #88] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s28, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #84] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #80] ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: vmov r5, s26 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: str r3, [r4, #76] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: str r2, [r4, #72] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #68] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #64] ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: str r3, [r4, #60] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: str r2, [r4, #56] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #52] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #48] ; CHECK-NEXT: mov r0, r7 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: str r3, [r4, #44] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: str r2, [r4, #40] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #36] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #32] ; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: str r3, [r4, #28] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: str r2, [r4, #24] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #20] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4, #16] ; CHECK-NEXT: mov r0, r8 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r3, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r3, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: str r3, [r4, #12] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r2, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: str r2, [r4, #8] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s22, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r1, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: str r1, [r4, #4] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: str r0, [r4] ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} ; CHECK-NEXT: .p2align 2 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI51_0: ; CHECK-NEXT: .long 0x7f7fffff @ float 3.40282347E+38 %x = call <8 x i128> @llvm.fptoui.sat.v8f16.v8i128(<8 x half> %f) ret <8 x i128> %x }