Compiler projects using llvm
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-MVE
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-MVEFP

;
; Float to signed 32-bit -- Vector size variation
;

declare <1 x i32> @llvm.fptosi.sat.v1f32.v1i32 (<1 x float>)
declare <2 x i32> @llvm.fptosi.sat.v2f32.v2i32 (<2 x float>)
declare <3 x i32> @llvm.fptosi.sat.v3f32.v3i32 (<3 x float>)
declare <4 x i32> @llvm.fptosi.sat.v4f32.v4i32 (<4 x float>)
declare <5 x i32> @llvm.fptosi.sat.v5f32.v5i32 (<5 x float>)
declare <6 x i32> @llvm.fptosi.sat.v6f32.v6i32 (<6 x float>)
declare <7 x i32> @llvm.fptosi.sat.v7f32.v7i32 (<7 x float>)
declare <8 x i32> @llvm.fptosi.sat.v8f32.v8i32 (<8 x float>)

define arm_aapcs_vfpcc <1 x i32> @test_signed_v1f32_v1i32(<1 x float> %f) {
; CHECK-LABEL: test_signed_v1f32_v1i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vcvt.s32.f32 s0, s0
; CHECK-NEXT:    vmov r0, s0
; CHECK-NEXT:    bx lr
    %x = call <1 x i32> @llvm.fptosi.sat.v1f32.v1i32(<1 x float> %f)
    ret <1 x i32> %x
}

define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f32_v2i32(<2 x float> %f) {
; CHECK-LABEL: test_signed_v2f32_v2i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r7, lr}
; CHECK-NEXT:    push {r4, r5, r7, lr}
; CHECK-NEXT:    .vsave {d8, d9, d10}
; CHECK-NEXT:    vpush {d8, d9, d10}
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vmov r0, s17
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    vmov r0, s16
; CHECK-NEXT:    vldr s18, .LCPI1_0
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    vldr s20, .LCPI1_1
; CHECK-NEXT:    vcmp.f32 s17, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r5, #-2147483648
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r5, #-2147483648
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r5, #0
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s16, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r0, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r0, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s18
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r4, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r4, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s18
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r4, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r1, #-1
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r1, #0
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmov q0[3], q0[1], r1, r4
; CHECK-NEXT:    vpop {d8, d9, d10}
; CHECK-NEXT:    pop {r4, r5, r7, pc}
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI1_0:
; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
; CHECK-NEXT:  .LCPI1_1:
; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
    %x = call <2 x i32> @llvm.fptosi.sat.v2f32.v2i32(<2 x float> %f)
    ret <2 x i32> %x
}

define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f32_v3i32(<3 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v3f32_v3i32:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vcvt.s32.f32 s2, s2
; CHECK-MVE-NEXT:    vcvt.s32.f32 s0, s0
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s3
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s1
; CHECK-MVE-NEXT:    vmov r0, s2
; CHECK-MVE-NEXT:    vmov r1, s0
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-MVE-NEXT:    vmov r0, s4
; CHECK-MVE-NEXT:    vmov r1, s6
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: test_signed_v3f32_v3i32:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <3 x i32> @llvm.fptosi.sat.v3f32.v3i32(<3 x float> %f)
    ret <3 x i32> %x
}

define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f32_v4i32(<4 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v4f32_v4i32:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vcvt.s32.f32 s2, s2
; CHECK-MVE-NEXT:    vcvt.s32.f32 s0, s0
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s3
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s1
; CHECK-MVE-NEXT:    vmov r0, s2
; CHECK-MVE-NEXT:    vmov r1, s0
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-MVE-NEXT:    vmov r0, s4
; CHECK-MVE-NEXT:    vmov r1, s6
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: test_signed_v4f32_v4i32:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <4 x i32> @llvm.fptosi.sat.v4f32.v4i32(<4 x float> %f)
    ret <4 x i32> %x
}

define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f32_v5i32(<5 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v5f32_v5i32:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vcvt.s32.f32 s2, s2
; CHECK-MVE-NEXT:    vcvt.s32.f32 s0, s0
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s3
; CHECK-MVE-NEXT:    vcvt.s32.f32 s8, s1
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s4
; CHECK-MVE-NEXT:    vmov r1, s2
; CHECK-MVE-NEXT:    vmov r2, s0
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r2, r1
; CHECK-MVE-NEXT:    vmov r1, s6
; CHECK-MVE-NEXT:    vmov r2, s8
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r2, r1
; CHECK-MVE-NEXT:    vstrw.32 q0, [r0]
; CHECK-MVE-NEXT:    vstr s4, [r0, #16]
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: test_signed_v5f32_v5i32:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q1, q1
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    vmov r1, s4
; CHECK-MVEFP-NEXT:    str r1, [r0, #16]
; CHECK-MVEFP-NEXT:    vstrw.32 q0, [r0]
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <5 x i32> @llvm.fptosi.sat.v5f32.v5i32(<5 x float> %f)
    ret <5 x i32> %x
}

define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f32_v6i32(<6 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v6f32_v6i32:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vcvt.s32.f32 s2, s2
; CHECK-MVE-NEXT:    vcvt.s32.f32 s0, s0
; CHECK-MVE-NEXT:    vcvt.s32.f32 s8, s3
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s1
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s5
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s4
; CHECK-MVE-NEXT:    vmov r1, s2
; CHECK-MVE-NEXT:    vmov r2, s0
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r2, r1
; CHECK-MVE-NEXT:    vmov r1, s8
; CHECK-MVE-NEXT:    vmov r2, s10
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r2, r1
; CHECK-MVE-NEXT:    vstr s6, [r0, #20]
; CHECK-MVE-NEXT:    vstrw.32 q0, [r0]
; CHECK-MVE-NEXT:    vstr s4, [r0, #16]
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: test_signed_v6f32_v6i32:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q1, q1
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    vmov.f32 s6, s5
; CHECK-MVEFP-NEXT:    vmov r2, s4
; CHECK-MVEFP-NEXT:    vmov r1, s6
; CHECK-MVEFP-NEXT:    strd r2, r1, [r0, #16]
; CHECK-MVEFP-NEXT:    vstrw.32 q0, [r0]
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <6 x i32> @llvm.fptosi.sat.v6f32.v6i32(<6 x float> %f)
    ret <6 x i32> %x
}

define arm_aapcs_vfpcc <7 x i32> @test_signed_v7f32_v7i32(<7 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v7f32_v7i32:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vcvt.s32.f32 s2, s2
; CHECK-MVE-NEXT:    vcvt.s32.f32 s0, s0
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s3
; CHECK-MVE-NEXT:    vcvt.s32.f32 s12, s1
; CHECK-MVE-NEXT:    vcvt.s32.f32 s8, s5
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s4
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s6
; CHECK-MVE-NEXT:    vmov r1, s2
; CHECK-MVE-NEXT:    vmov r2, s0
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r2, r1
; CHECK-MVE-NEXT:    vmov r1, s10
; CHECK-MVE-NEXT:    vmov r2, s12
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r2, r1
; CHECK-MVE-NEXT:    vstr s8, [r0, #20]
; CHECK-MVE-NEXT:    vstr s4, [r0, #16]
; CHECK-MVE-NEXT:    vstrw.32 q0, [r0]
; CHECK-MVE-NEXT:    vstr s6, [r0, #24]
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: test_signed_v7f32_v7i32:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q1, q1
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    vmov.f32 s10, s5
; CHECK-MVEFP-NEXT:    vmov r2, s4
; CHECK-MVEFP-NEXT:    vmov r3, s6
; CHECK-MVEFP-NEXT:    vmov r1, s10
; CHECK-MVEFP-NEXT:    strd r2, r1, [r0, #16]
; CHECK-MVEFP-NEXT:    str r3, [r0, #24]
; CHECK-MVEFP-NEXT:    vstrw.32 q0, [r0]
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <7 x i32> @llvm.fptosi.sat.v7f32.v7i32(<7 x float> %f)
    ret <7 x i32> %x
}

define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f32_v8i32(<8 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v8f32_v8i32:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vcvt.s32.f32 s2, s2
; CHECK-MVE-NEXT:    vcvt.s32.f32 s0, s0
; CHECK-MVE-NEXT:    vcvt.s32.f32 s8, s3
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s1
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s4
; CHECK-MVE-NEXT:    vcvt.s32.f32 s12, s7
; CHECK-MVE-NEXT:    vcvt.s32.f32 s14, s5
; CHECK-MVE-NEXT:    vmov r0, s2
; CHECK-MVE-NEXT:    vmov r1, s0
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-MVE-NEXT:    vmov r0, s8
; CHECK-MVE-NEXT:    vmov r1, s10
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT:    vmov r0, s6
; CHECK-MVE-NEXT:    vmov r1, s4
; CHECK-MVE-NEXT:    vmov q1[2], q1[0], r1, r0
; CHECK-MVE-NEXT:    vmov r0, s12
; CHECK-MVE-NEXT:    vmov r1, s14
; CHECK-MVE-NEXT:    vmov q1[3], q1[1], r1, r0
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: test_signed_v8f32_v8i32:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q1, q1
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <8 x i32> @llvm.fptosi.sat.v8f32.v8i32(<8 x float> %f)
    ret <8 x i32> %x
}

;
; Double to signed 32-bit -- Vector size variation
;

declare <1 x i32> @llvm.fptosi.sat.v1f64.v1i32 (<1 x double>)
declare <2 x i32> @llvm.fptosi.sat.v2f64.v2i32 (<2 x double>)
declare <3 x i32> @llvm.fptosi.sat.v3f64.v3i32 (<3 x double>)
declare <4 x i32> @llvm.fptosi.sat.v4f64.v4i32 (<4 x double>)
declare <5 x i32> @llvm.fptosi.sat.v5f64.v5i32 (<5 x double>)
declare <6 x i32> @llvm.fptosi.sat.v6f64.v6i32 (<6 x double>)

define arm_aapcs_vfpcc <1 x i32> @test_signed_v1f64_v1i32(<1 x double> %f) {
; CHECK-LABEL: test_signed_v1f64_v1i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT:    vldr d1, .LCPI8_0
; CHECK-NEXT:    vmov r5, r4, d0
; CHECK-NEXT:    vmov r2, r3, d1
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI8_1
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_d2iz
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    cmp r7, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r6, #-2147483648
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    cmp.w r8, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r6, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r6, #0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI8_0:
; CHECK-NEXT:    .long 4290772992 @ double 2147483647
; CHECK-NEXT:    .long 1105199103
; CHECK-NEXT:  .LCPI8_1:
; CHECK-NEXT:    .long 0 @ double -2147483648
; CHECK-NEXT:    .long 3252682752
    %x = call <1 x i32> @llvm.fptosi.sat.v1f64.v1i32(<1 x double> %f)
    ret <1 x i32> %x
}

define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f64_v2i32(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI9_0
; CHECK-NEXT:    vmov r9, r8, d9
; CHECK-NEXT:    vmov r11, r10, d0
; CHECK-NEXT:    str.w r11, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    str.w r10, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI9_1
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    vmov r5, r3, d0
; CHECK-NEXT:    str r3, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    str r1, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r0, #-2147483648
; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    cmp r1, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r0, #-2147483648
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    vmov r7, r6, d8
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r11, #-2147483648
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r10, r1
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r11, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r5, #-1
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r10, #-1
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r11, r0
; CHECK-NEXT:    vmov q0[3], q0[1], r10, r5
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI9_0:
; CHECK-NEXT:    .long 4290772992 @ double 2147483647
; CHECK-NEXT:    .long 1105199103
; CHECK-NEXT:  .LCPI9_1:
; CHECK-NEXT:    .long 0 @ double -2147483648
; CHECK-NEXT:    .long 3252682752
    %x = call <2 x i32> @llvm.fptosi.sat.v2f64.v2i32(<2 x double> %f)
    ret <2 x i32> %x
}

define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f64_v3i32(<3 x double> %f) {
; CHECK-LABEL: test_signed_v3f64_v3i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #24
; CHECK-NEXT:    sub sp, #24
; CHECK-NEXT:    vmov.f32 s16, s0
; CHECK-NEXT:    vmov.f32 s17, s1
; CHECK-NEXT:    vldr d0, .LCPI10_0
; CHECK-NEXT:    vmov r4, r6, d1
; CHECK-NEXT:    vmov r2, r11, d0
; CHECK-NEXT:    vmov.f32 s18, s4
; CHECK-NEXT:    vmov.f32 s19, s5
; CHECK-NEXT:    str r2, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI10_1
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    vmov r2, r8, d0
; CHECK-NEXT:    str r2, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    str.w r8, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r10, #-2147483648
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    vmov r5, r7, d9
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r10, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldr r2, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r6, #-2147483648
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    vmov r9, r8, d8
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r6, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r6, #0
; CHECK-NEXT:    ldr r2, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    ldr r3, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    ldr r3, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r7, #-2147483648
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r7, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    vmov.32 q0[1], r10
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r7, #0
; CHECK-NEXT:    vmov q0[2], q0[0], r7, r6
; CHECK-NEXT:    add sp, #24
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI10_0:
; CHECK-NEXT:    .long 4290772992 @ double 2147483647
; CHECK-NEXT:    .long 1105199103
; CHECK-NEXT:  .LCPI10_1:
; CHECK-NEXT:    .long 0 @ double -2147483648
; CHECK-NEXT:    .long 3252682752
    %x = call <3 x i32> @llvm.fptosi.sat.v3f64.v3i32(<3 x double> %f)
    ret <3 x i32> %x
}

define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f64_v4i32(<4 x double> %f) {
; CHECK-LABEL: test_signed_v4f64_v4i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
; CHECK-NEXT:    vpush {d8, d9, d10, d11}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI11_0
; CHECK-NEXT:    vmov q5, q1
; CHECK-NEXT:    vmov r5, r6, d10
; CHECK-NEXT:    vmov r9, r3, d0
; CHECK-NEXT:    str r3, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI11_1
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    str r3, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    vmov r11, r1, d11
; CHECK-NEXT:    cmp.w r8, #0
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    vmov r7, r10, d8
; CHECK-NEXT:    str r1, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r0, #-2147483648
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r0, #-2147483648
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    ldr.w r8, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r10
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    str r4, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    str.w r9, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r10
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r10
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r6, #-2147483648
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r10
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r6, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r6, #0
; CHECK-NEXT:    ldr r5, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r11
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr.w r9, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    mov r0, r11
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r11
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r8, #-2147483648
; CHECK-NEXT:    mov r0, r11
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    cmp.w r10, #0
; CHECK-NEXT:    vmov r7, r4, d9
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r8, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r8, #0
; CHECK-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    ldr r3, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r3, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r5, #-2147483648
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    cmp.w r10, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r5, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    ldr r0, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r6, r0
; CHECK-NEXT:    vmov q0[3], q0[1], r5, r8
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9, d10, d11}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI11_0:
; CHECK-NEXT:    .long 4290772992 @ double 2147483647
; CHECK-NEXT:    .long 1105199103
; CHECK-NEXT:  .LCPI11_1:
; CHECK-NEXT:    .long 0 @ double -2147483648
; CHECK-NEXT:    .long 3252682752
    %x = call <4 x i32> @llvm.fptosi.sat.v4f64.v4i32(<4 x double> %f)
    ret <4 x i32> %x
}

define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f64_v5i32(<5 x double> %f) {
; CHECK-LABEL: test_signed_v5f64_v5i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
; CHECK-NEXT:    vpush {d8, d9, d10, d11}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov.f32 s16, s0
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    vmov.f32 s17, s1
; CHECK-NEXT:    vldr d0, .LCPI12_0
; CHECK-NEXT:    vmov r5, r4, d4
; CHECK-NEXT:    str r0, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    vmov.f32 s20, s6
; CHECK-NEXT:    vmov.f32 s18, s4
; CHECK-NEXT:    vmov.f32 s22, s2
; CHECK-NEXT:    vmov.f32 s21, s7
; CHECK-NEXT:    vmov.f32 s19, s5
; CHECK-NEXT:    vmov.f32 s23, s3
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    strd r2, r3, [sp, #20] @ 8-byte Folded Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI12_1
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    str r3, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    vmov r8, r0, d11
; CHECK-NEXT:    cmp.w r10, #0
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    vmov r9, r6, d10
; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r11, #-2147483648
; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r11, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #0
; CHECK-NEXT:    str.w r11, [r7, #16]
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    ldr.w r10, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    ldr r7, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r4, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    ldr.w r11, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r0, #-2147483648
; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    cmp r1, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r0, #-2147483648
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    str r5, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    mov r6, r11
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    cmp r7, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r10, #-2147483648
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    vmov r11, r4, d9
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r10, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r11
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    mov r0, r11
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r11
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r7, #-2147483648
; CHECK-NEXT:    mov r0, r11
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    cmp.w r8, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r7, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    vmov r5, r4, d8
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r7, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r6, #-2147483648
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    cmp.w r8, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r6, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r6, #0
; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r6, r7
; CHECK-NEXT:    vmov q0[3], q0[1], r10, r0
; CHECK-NEXT:    ldr r0, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    vstrw.32 q0, [r0]
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9, d10, d11}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI12_0:
; CHECK-NEXT:    .long 4290772992 @ double 2147483647
; CHECK-NEXT:    .long 1105199103
; CHECK-NEXT:  .LCPI12_1:
; CHECK-NEXT:    .long 0 @ double -2147483648
; CHECK-NEXT:    .long 3252682752
    %x = call <5 x i32> @llvm.fptosi.sat.v5f64.v5i32(<5 x double> %f)
    ret <5 x i32> %x
}

define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f64_v6i32(<6 x double> %f) {
; CHECK-LABEL: test_signed_v6f64_v6i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12}
; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12}
; CHECK-NEXT:    .pad #40
; CHECK-NEXT:    sub sp, #40
; CHECK-NEXT:    vmov.f32 s16, s0
; CHECK-NEXT:    str r0, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    vmov.f32 s17, s1
; CHECK-NEXT:    vldr d0, .LCPI13_0
; CHECK-NEXT:    vmov r9, r4, d5
; CHECK-NEXT:    vmov r2, r6, d0
; CHECK-NEXT:    vmov.f32 s22, s8
; CHECK-NEXT:    vmov.f32 s20, s6
; CHECK-NEXT:    vmov.f32 s18, s4
; CHECK-NEXT:    vmov.f32 s24, s2
; CHECK-NEXT:    vmov.f32 s23, s9
; CHECK-NEXT:    vmov.f32 s21, s7
; CHECK-NEXT:    vmov.f32 s19, s5
; CHECK-NEXT:    vmov.f32 s25, s3
; CHECK-NEXT:    str r2, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    str r6, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI13_1
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    strd r2, r3, [sp, #32] @ 8-byte Folded Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    vmov r8, r0, d10
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    vmov r7, r5, d11
; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    vmov r1, r0, d12
; CHECK-NEXT:    strd r1, r0, [sp, #12] @ 8-byte Folded Spill
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r10, #-2147483648
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r10, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldr.w r11, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    str.w r10, [r11, #20]
; CHECK-NEXT:    ldr.w r10, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldrd r2, r3, [sp, #32] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r6, #-2147483648
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r6, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r6, #0
; CHECK-NEXT:    str.w r6, [r11, #16]
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    ldr r4, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r7, [sp, #32] @ 4-byte Reload
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    ldr r5, [sp, #36] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    cmp r6, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r10, #-2147483648
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r10, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldr r4, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    ldr r6, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    ldr r2, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r5, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r8, #-2147483648
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    vmov r7, r6, d9
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r8, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r8, #0
; CHECK-NEXT:    ldr.w r11, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    ldr r3, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldrd r2, r3, [sp, #32] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r4, #-2147483648
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r4, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    vmov r7, r6, d8
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    ldr r3, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldrd r2, r3, [sp, #32] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r5, #-2147483648
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r5, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    vmov q0[2], q0[0], r5, r4
; CHECK-NEXT:    ldr r0, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[3], q0[1], r8, r10
; CHECK-NEXT:    vstrw.32 q0, [r0]
; CHECK-NEXT:    add sp, #40
; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI13_0:
; CHECK-NEXT:    .long 4290772992 @ double 2147483647
; CHECK-NEXT:    .long 1105199103
; CHECK-NEXT:  .LCPI13_1:
; CHECK-NEXT:    .long 0 @ double -2147483648
; CHECK-NEXT:    .long 3252682752
    %x = call <6 x i32> @llvm.fptosi.sat.v6f64.v6i32(<6 x double> %f)
    ret <6 x i32> %x
}

;
; FP16 to signed 32-bit -- Vector size variation
;

declare <1 x i32> @llvm.fptosi.sat.v1f16.v1i32 (<1 x half>)
declare <2 x i32> @llvm.fptosi.sat.v2f16.v2i32 (<2 x half>)
declare <3 x i32> @llvm.fptosi.sat.v3f16.v3i32 (<3 x half>)
declare <4 x i32> @llvm.fptosi.sat.v4f16.v4i32 (<4 x half>)
declare <5 x i32> @llvm.fptosi.sat.v5f16.v5i32 (<5 x half>)
declare <6 x i32> @llvm.fptosi.sat.v6f16.v6i32 (<6 x half>)
declare <7 x i32> @llvm.fptosi.sat.v7f16.v7i32 (<7 x half>)
declare <8 x i32> @llvm.fptosi.sat.v8f16.v8i32 (<8 x half>)

define arm_aapcs_vfpcc <1 x i32> @test_signed_v1f16_v1i32(<1 x half> %f) {
; CHECK-LABEL: test_signed_v1f16_v1i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vcvt.s32.f16 s0, s0
; CHECK-NEXT:    vmov r0, s0
; CHECK-NEXT:    bx lr
    %x = call <1 x i32> @llvm.fptosi.sat.v1f16.v1i32(<1 x half> %f)
    ret <1 x i32> %x
}

define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f16_v2i32(<2 x half> %f) {
; CHECK-LABEL: test_signed_v2f16_v2i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r7, lr}
; CHECK-NEXT:    push {r4, r5, r7, lr}
; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
; CHECK-NEXT:    vpush {d8, d9, d10, d11}
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vcvtt.f32.f16 s18, s16
; CHECK-NEXT:    vmov r0, s18
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcvtb.f32.f16 s16, s16
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    vmov r0, s16
; CHECK-NEXT:    vldr s20, .LCPI15_0
; CHECK-NEXT:    vldr s22, .LCPI15_1
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r5, #-2147483648
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r5, #-2147483648
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r5, #0
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r0, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r0, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r4, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r4, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r4, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r1, #-1
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r1, #0
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmov q0[3], q0[1], r1, r4
; CHECK-NEXT:    vpop {d8, d9, d10, d11}
; CHECK-NEXT:    pop {r4, r5, r7, pc}
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI15_0:
; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
; CHECK-NEXT:  .LCPI15_1:
; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
    %x = call <2 x i32> @llvm.fptosi.sat.v2f16.v2i32(<2 x half> %f)
    ret <2 x i32> %x
}

define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f16_v3i32(<3 x half> %f) {
; CHECK-LABEL: test_signed_v3f16_v3i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vcvt.s32.f16 s6, s0
; CHECK-NEXT:    vcvt.s32.f16 s0, s1
; CHECK-NEXT:    vcvt.s32.f16 s4, s2
; CHECK-NEXT:    vmov r0, s0
; CHECK-NEXT:    vmov.32 q0[1], r0
; CHECK-NEXT:    vmov r0, s4
; CHECK-NEXT:    vmov r1, s6
; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-NEXT:    bx lr
    %x = call <3 x i32> @llvm.fptosi.sat.v3f16.v3i32(<3 x half> %f)
    ret <3 x i32> %x
}

define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f16_v4i32(<4 x half> %f) {
; CHECK-LABEL: test_signed_v4f16_v4i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmovx.f16 s2, s1
; CHECK-NEXT:    vcvt.s32.f16 s4, s2
; CHECK-NEXT:    vmovx.f16 s2, s0
; CHECK-NEXT:    vcvt.s32.f16 s6, s2
; CHECK-NEXT:    vcvt.s32.f16 s2, s1
; CHECK-NEXT:    vcvt.s32.f16 s0, s0
; CHECK-NEXT:    vmov r0, s2
; CHECK-NEXT:    vmov r1, s0
; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-NEXT:    vmov r0, s4
; CHECK-NEXT:    vmov r1, s6
; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
; CHECK-NEXT:    bx lr
    %x = call <4 x i32> @llvm.fptosi.sat.v4f16.v4i32(<4 x half> %f)
    ret <4 x i32> %x
}

define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f16_v5i32(<5 x half> %f) {
; CHECK-LABEL: test_signed_v5f16_v5i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmovx.f16 s6, s0
; CHECK-NEXT:    vmovx.f16 s4, s1
; CHECK-NEXT:    vcvt.s32.f16 s8, s1
; CHECK-NEXT:    vcvt.s32.f16 s0, s0
; CHECK-NEXT:    vcvt.s32.f16 s4, s4
; CHECK-NEXT:    vcvt.s32.f16 s6, s6
; CHECK-NEXT:    vmov r1, s8
; CHECK-NEXT:    vcvt.s32.f16 s2, s2
; CHECK-NEXT:    vmov r2, s0
; CHECK-NEXT:    vmov q2[2], q2[0], r2, r1
; CHECK-NEXT:    vmov r1, s4
; CHECK-NEXT:    vmov r2, s6
; CHECK-NEXT:    vmov q2[3], q2[1], r2, r1
; CHECK-NEXT:    vmov r1, s2
; CHECK-NEXT:    str r1, [r0, #16]
; CHECK-NEXT:    vstrw.32 q2, [r0]
; CHECK-NEXT:    bx lr
    %x = call <5 x i32> @llvm.fptosi.sat.v5f16.v5i32(<5 x half> %f)
    ret <5 x i32> %x
}

define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f16_v6i32(<6 x half> %f) {
; CHECK-LABEL: test_signed_v6f16_v6i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmovx.f16 s8, s0
; CHECK-NEXT:    vmovx.f16 s6, s1
; CHECK-NEXT:    vcvt.s32.f16 s10, s1
; CHECK-NEXT:    vcvt.s32.f16 s0, s0
; CHECK-NEXT:    vcvt.s32.f16 s4, s2
; CHECK-NEXT:    vmovx.f16 s2, s2
; CHECK-NEXT:    vcvt.s32.f16 s6, s6
; CHECK-NEXT:    vcvt.s32.f16 s8, s8
; CHECK-NEXT:    vmov r1, s10
; CHECK-NEXT:    vcvt.s32.f16 s2, s2
; CHECK-NEXT:    vmov r2, s0
; CHECK-NEXT:    vmov q3[2], q3[0], r2, r1
; CHECK-NEXT:    vmov r1, s6
; CHECK-NEXT:    vmov r2, s8
; CHECK-NEXT:    vmov q3[3], q3[1], r2, r1
; CHECK-NEXT:    vmov r1, s2
; CHECK-NEXT:    vmov r2, s4
; CHECK-NEXT:    strd r2, r1, [r0, #16]
; CHECK-NEXT:    vstrw.32 q3, [r0]
; CHECK-NEXT:    bx lr
    %x = call <6 x i32> @llvm.fptosi.sat.v6f16.v6i32(<6 x half> %f)
    ret <6 x i32> %x
}

define arm_aapcs_vfpcc <7 x i32> @test_signed_v7f16_v7i32(<7 x half> %f) {
; CHECK-LABEL: test_signed_v7f16_v7i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmovx.f16 s10, s0
; CHECK-NEXT:    vmovx.f16 s8, s1
; CHECK-NEXT:    vcvt.s32.f16 s12, s1
; CHECK-NEXT:    vcvt.s32.f16 s0, s0
; CHECK-NEXT:    vcvt.s32.f16 s4, s2
; CHECK-NEXT:    vmovx.f16 s2, s2
; CHECK-NEXT:    vcvt.s32.f16 s8, s8
; CHECK-NEXT:    vcvt.s32.f16 s10, s10
; CHECK-NEXT:    vmov r1, s12
; CHECK-NEXT:    vcvt.s32.f16 s2, s2
; CHECK-NEXT:    vmov r2, s0
; CHECK-NEXT:    vcvt.s32.f16 s6, s3
; CHECK-NEXT:    vmov q3[2], q3[0], r2, r1
; CHECK-NEXT:    vmov r1, s8
; CHECK-NEXT:    vmov r2, s10
; CHECK-NEXT:    vmov q3[3], q3[1], r2, r1
; CHECK-NEXT:    vmov r1, s2
; CHECK-NEXT:    vmov r2, s4
; CHECK-NEXT:    vmov r3, s6
; CHECK-NEXT:    strd r2, r1, [r0, #16]
; CHECK-NEXT:    str r3, [r0, #24]
; CHECK-NEXT:    vstrw.32 q3, [r0]
; CHECK-NEXT:    bx lr
    %x = call <7 x i32> @llvm.fptosi.sat.v7f16.v7i32(<7 x half> %f)
    ret <7 x i32> %x
}

define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f16_v8i32(<8 x half> %f) {
; CHECK-LABEL: test_signed_v8f16_v8i32:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmovx.f16 s4, s3
; CHECK-NEXT:    vmovx.f16 s6, s0
; CHECK-NEXT:    vcvt.s32.f16 s8, s4
; CHECK-NEXT:    vmovx.f16 s4, s2
; CHECK-NEXT:    vcvt.s32.f16 s10, s4
; CHECK-NEXT:    vmovx.f16 s4, s1
; CHECK-NEXT:    vcvt.s32.f16 s14, s2
; CHECK-NEXT:    vcvt.s32.f16 s2, s1
; CHECK-NEXT:    vcvt.s32.f16 s0, s0
; CHECK-NEXT:    vcvt.s32.f16 s4, s4
; CHECK-NEXT:    vcvt.s32.f16 s6, s6
; CHECK-NEXT:    vmov r0, s2
; CHECK-NEXT:    vmov r1, s0
; CHECK-NEXT:    vcvt.s32.f16 s12, s3
; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-NEXT:    vmov r0, s4
; CHECK-NEXT:    vmov r1, s6
; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
; CHECK-NEXT:    vmov r0, s12
; CHECK-NEXT:    vmov r1, s14
; CHECK-NEXT:    vmov q1[2], q1[0], r1, r0
; CHECK-NEXT:    vmov r0, s8
; CHECK-NEXT:    vmov r1, s10
; CHECK-NEXT:    vmov q1[3], q1[1], r1, r0
; CHECK-NEXT:    bx lr
    %x = call <8 x i32> @llvm.fptosi.sat.v8f16.v8i32(<8 x half> %f)
    ret <8 x i32> %x
}

;
; 2-Vector float to signed integer -- result size variation
;

declare <4 x   i1> @llvm.fptosi.sat.v4f32.v4i1  (<4 x float>)
declare <4 x   i8> @llvm.fptosi.sat.v4f32.v4i8  (<4 x float>)
declare <4 x  i13> @llvm.fptosi.sat.v4f32.v4i13 (<4 x float>)
declare <4 x  i16> @llvm.fptosi.sat.v4f32.v4i16 (<4 x float>)
declare <4 x  i19> @llvm.fptosi.sat.v4f32.v4i19 (<4 x float>)
declare <4 x  i50> @llvm.fptosi.sat.v4f32.v4i50 (<4 x float>)
declare <4 x  i64> @llvm.fptosi.sat.v4f32.v4i64 (<4 x float>)
declare <4 x i100> @llvm.fptosi.sat.v4f32.v4i100(<4 x float>)
declare <4 x i128> @llvm.fptosi.sat.v4f32.v4i128(<4 x float>)

define arm_aapcs_vfpcc <4 x i1> @test_signed_v4f32_v4i1(<4 x float> %f) {
; CHECK-LABEL: test_signed_v4f32_v4i1:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmov.f32 s4, #-1.000000e+00
; CHECK-NEXT:    vldr s6, .LCPI22_0
; CHECK-NEXT:    vmaxnm.f32 s12, s0, s4
; CHECK-NEXT:    vmaxnm.f32 s8, s3, s4
; CHECK-NEXT:    vminnm.f32 s12, s12, s6
; CHECK-NEXT:    vmaxnm.f32 s10, s2, s4
; CHECK-NEXT:    vcvt.s32.f32 s12, s12
; CHECK-NEXT:    vmaxnm.f32 s4, s1, s4
; CHECK-NEXT:    vminnm.f32 s4, s4, s6
; CHECK-NEXT:    vminnm.f32 s10, s10, s6
; CHECK-NEXT:    vcvt.s32.f32 s4, s4
; CHECK-NEXT:    movs r1, #0
; CHECK-NEXT:    vcmp.f32 s0, s0
; CHECK-NEXT:    vminnm.f32 s8, s8, s6
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-NEXT:    vcmp.f32 s1, s1
; CHECK-NEXT:    vcvt.s32.f32 s8, s8
; CHECK-NEXT:    vmov r2, s12
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    vcmp.f32 s2, s2
; CHECK-NEXT:    rsb.w r2, r2, #0
; CHECK-NEXT:    bfi r1, r2, #0, #1
; CHECK-NEXT:    vmov r2, s4
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    vcmp.f32 s3, s3
; CHECK-NEXT:    rsb.w r2, r2, #0
; CHECK-NEXT:    bfi r1, r2, #1, #1
; CHECK-NEXT:    vmov r2, s10
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    rsb.w r2, r2, #0
; CHECK-NEXT:    bfi r1, r2, #2, #1
; CHECK-NEXT:    vmov r2, s8
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    rsbs r2, r2, #0
; CHECK-NEXT:    bfi r1, r2, #3, #1
; CHECK-NEXT:    strb r1, [r0]
; CHECK-NEXT:    bx lr
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI22_0:
; CHECK-NEXT:    .long 0x00000000 @ float 0
    %x = call <4 x i1> @llvm.fptosi.sat.v4f32.v4i1(<4 x float> %f)
    ret <4 x i1> %x
}

define arm_aapcs_vfpcc <4 x i8> @test_signed_v4f32_v4i8(<4 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v4f32_v4i8:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vldr s4, .LCPI23_0
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    vldr s6, .LCPI23_1
; CHECK-MVE-NEXT:    vmaxnm.f32 s12, s2, s4
; CHECK-MVE-NEXT:    vmaxnm.f32 s10, s0, s4
; CHECK-MVE-NEXT:    vminnm.f32 s12, s12, s6
; CHECK-MVE-NEXT:    vmaxnm.f32 s8, s1, s4
; CHECK-MVE-NEXT:    vminnm.f32 s10, s10, s6
; CHECK-MVE-NEXT:    vmaxnm.f32 s4, s3, s4
; CHECK-MVE-NEXT:    vcvt.s32.f32 s12, s12
; CHECK-MVE-NEXT:    vminnm.f32 s8, s8, s6
; CHECK-MVE-NEXT:    vminnm.f32 s4, s4, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-MVE-NEXT:    vcvt.s32.f32 s8, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s4
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    vmov r0, s12
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r1, s10
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r2, s4
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-MVE-NEXT:    vmov r3, s8
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r2, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r3, #0
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r3, r2
; CHECK-MVE-NEXT:    bx lr
; CHECK-MVE-NEXT:    .p2align 2
; CHECK-MVE-NEXT:  @ %bb.1:
; CHECK-MVE-NEXT:  .LCPI23_0:
; CHECK-MVE-NEXT:    .long 0xc3000000 @ float -128
; CHECK-MVE-NEXT:  .LCPI23_1:
; CHECK-MVE-NEXT:    .long 0x42fe0000 @ float 127
;
; CHECK-MVEFP-LABEL: test_signed_v4f32_v4i8:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vmov.i32 q1, #0x7f
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    vmvn.i32 q2, #0x7f
; CHECK-MVEFP-NEXT:    vmin.s32 q0, q0, q1
; CHECK-MVEFP-NEXT:    vmax.s32 q0, q0, q2
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <4 x i8> @llvm.fptosi.sat.v4f32.v4i8(<4 x float> %f)
    ret <4 x i8> %x
}

define arm_aapcs_vfpcc <4 x i13> @test_signed_v4f32_v4i13(<4 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v4f32_v4i13:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vldr s4, .LCPI24_0
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    vldr s6, .LCPI24_1
; CHECK-MVE-NEXT:    vmaxnm.f32 s12, s2, s4
; CHECK-MVE-NEXT:    vmaxnm.f32 s10, s0, s4
; CHECK-MVE-NEXT:    vminnm.f32 s12, s12, s6
; CHECK-MVE-NEXT:    vmaxnm.f32 s8, s1, s4
; CHECK-MVE-NEXT:    vminnm.f32 s10, s10, s6
; CHECK-MVE-NEXT:    vmaxnm.f32 s4, s3, s4
; CHECK-MVE-NEXT:    vcvt.s32.f32 s12, s12
; CHECK-MVE-NEXT:    vminnm.f32 s8, s8, s6
; CHECK-MVE-NEXT:    vminnm.f32 s4, s4, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-MVE-NEXT:    vcvt.s32.f32 s8, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s4
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    vmov r0, s12
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r1, s10
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r2, s4
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-MVE-NEXT:    vmov r3, s8
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r2, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r3, #0
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r3, r2
; CHECK-MVE-NEXT:    bx lr
; CHECK-MVE-NEXT:    .p2align 2
; CHECK-MVE-NEXT:  @ %bb.1:
; CHECK-MVE-NEXT:  .LCPI24_0:
; CHECK-MVE-NEXT:    .long 0xc5800000 @ float -4096
; CHECK-MVE-NEXT:  .LCPI24_1:
; CHECK-MVE-NEXT:    .long 0x457ff000 @ float 4095
;
; CHECK-MVEFP-LABEL: test_signed_v4f32_v4i13:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vmov.i32 q1, #0xfff
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    vmvn.i32 q2, #0xfff
; CHECK-MVEFP-NEXT:    vmin.s32 q0, q0, q1
; CHECK-MVEFP-NEXT:    vmax.s32 q0, q0, q2
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <4 x i13> @llvm.fptosi.sat.v4f32.v4i13(<4 x float> %f)
    ret <4 x i13> %x
}

define arm_aapcs_vfpcc <4 x i16> @test_signed_v4f32_v4i16(<4 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v4f32_v4i16:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vldr s4, .LCPI25_0
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    vldr s6, .LCPI25_1
; CHECK-MVE-NEXT:    vmaxnm.f32 s12, s2, s4
; CHECK-MVE-NEXT:    vmaxnm.f32 s10, s0, s4
; CHECK-MVE-NEXT:    vminnm.f32 s12, s12, s6
; CHECK-MVE-NEXT:    vmaxnm.f32 s8, s1, s4
; CHECK-MVE-NEXT:    vminnm.f32 s10, s10, s6
; CHECK-MVE-NEXT:    vmaxnm.f32 s4, s3, s4
; CHECK-MVE-NEXT:    vcvt.s32.f32 s12, s12
; CHECK-MVE-NEXT:    vminnm.f32 s8, s8, s6
; CHECK-MVE-NEXT:    vminnm.f32 s4, s4, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-MVE-NEXT:    vcvt.s32.f32 s8, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s4
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    vmov r0, s12
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r1, s10
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r2, s4
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-MVE-NEXT:    vmov r3, s8
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r2, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r3, #0
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r3, r2
; CHECK-MVE-NEXT:    bx lr
; CHECK-MVE-NEXT:    .p2align 2
; CHECK-MVE-NEXT:  @ %bb.1:
; CHECK-MVE-NEXT:  .LCPI25_0:
; CHECK-MVE-NEXT:    .long 0xc7000000 @ float -32768
; CHECK-MVE-NEXT:  .LCPI25_1:
; CHECK-MVE-NEXT:    .long 0x46fffe00 @ float 32767
;
; CHECK-MVEFP-LABEL: test_signed_v4f32_v4i16:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    vqmovnb.s32 q0, q0
; CHECK-MVEFP-NEXT:    vmovlb.s16 q0, q0
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <4 x i16> @llvm.fptosi.sat.v4f32.v4i16(<4 x float> %f)
    ret <4 x i16> %x
}

define arm_aapcs_vfpcc <4 x i19> @test_signed_v4f32_v4i19(<4 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v4f32_v4i19:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vldr s4, .LCPI26_0
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    vldr s6, .LCPI26_1
; CHECK-MVE-NEXT:    vmaxnm.f32 s12, s2, s4
; CHECK-MVE-NEXT:    vmaxnm.f32 s10, s0, s4
; CHECK-MVE-NEXT:    vminnm.f32 s12, s12, s6
; CHECK-MVE-NEXT:    vmaxnm.f32 s8, s1, s4
; CHECK-MVE-NEXT:    vminnm.f32 s10, s10, s6
; CHECK-MVE-NEXT:    vmaxnm.f32 s4, s3, s4
; CHECK-MVE-NEXT:    vcvt.s32.f32 s12, s12
; CHECK-MVE-NEXT:    vminnm.f32 s8, s8, s6
; CHECK-MVE-NEXT:    vminnm.f32 s4, s4, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-MVE-NEXT:    vcvt.s32.f32 s8, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s4
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    vmov r0, s12
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r1, s10
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r2, s4
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-MVE-NEXT:    vmov r3, s8
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r2, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r3, #0
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r3, r2
; CHECK-MVE-NEXT:    bx lr
; CHECK-MVE-NEXT:    .p2align 2
; CHECK-MVE-NEXT:  @ %bb.1:
; CHECK-MVE-NEXT:  .LCPI26_0:
; CHECK-MVE-NEXT:    .long 0xc8800000 @ float -262144
; CHECK-MVE-NEXT:  .LCPI26_1:
; CHECK-MVE-NEXT:    .long 0x487fffc0 @ float 262143
;
; CHECK-MVEFP-LABEL: test_signed_v4f32_v4i19:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    movs r0, #0
; CHECK-MVEFP-NEXT:    vmov.i32 q1, #0x3ffff
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    movt r0, #65532
; CHECK-MVEFP-NEXT:    vmin.s32 q0, q0, q1
; CHECK-MVEFP-NEXT:    vdup.32 q1, r0
; CHECK-MVEFP-NEXT:    vmax.s32 q0, q0, q1
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <4 x i19> @llvm.fptosi.sat.v4f32.v4i19(<4 x float> %f)
    ret <4 x i19> %x
}

define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f32_v4i32_duplicate(<4 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v4f32_v4i32_duplicate:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    vcvt.s32.f32 s2, s2
; CHECK-MVE-NEXT:    vcvt.s32.f32 s0, s0
; CHECK-MVE-NEXT:    vcvt.s32.f32 s4, s3
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s1
; CHECK-MVE-NEXT:    vmov r0, s2
; CHECK-MVE-NEXT:    vmov r1, s0
; CHECK-MVE-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-MVE-NEXT:    vmov r0, s4
; CHECK-MVE-NEXT:    vmov r1, s6
; CHECK-MVE-NEXT:    vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: test_signed_v4f32_v4i32_duplicate:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s32.f32 q0, q0
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <4 x i32> @llvm.fptosi.sat.v4f32.v4i32(<4 x float> %f)
    ret <4 x i32> %x
}

define arm_aapcs_vfpcc <4 x i50> @test_signed_v4f32_v4i50(<4 x float> %f) {
; CHECK-LABEL: test_signed_v4f32_v4i50:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
; CHECK-NEXT:    vpush {d8, d9, d10, d11}
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    vmov r0, s17
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    vmov r0, s19
; CHECK-NEXT:    vldr s20, .LCPI28_0
; CHECK-NEXT:    mov r7, r1
; CHECK-NEXT:    vmov r4, s16
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r7, #0
; CHECK-NEXT:    movtlt r7, #65534
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vldr s22, .LCPI28_1
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    mov r5, r1
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r5, #0
; CHECK-NEXT:    movtlt r5, #65534
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r7, #65535
; CHECK-NEXT:    movtgt r7, #1
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r5, #65535
; CHECK-NEXT:    movtgt r5, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    str.w r0, [r8]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r6, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r6, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    itt vs
; CHECK-NEXT:    movvs r6, #0
; CHECK-NEXT:    movvs r5, #0
; CHECK-NEXT:    lsls r0, r5, #22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    orr.w r0, r0, r6, lsr #10
; CHECK-NEXT:    str.w r0, [r8, #20]
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r7, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r9, #0
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r9, #-1
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r9, #0
; CHECK-NEXT:    lsr.w r0, r9, #14
; CHECK-NEXT:    orr.w r1, r0, r7, lsl #18
; CHECK-NEXT:    vmov r0, s18
; CHECK-NEXT:    str.w r1, [r8, #8]
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    lsrs r2, r5, #10
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    movtlt r1, #65534
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r1, #65535
; CHECK-NEXT:    movtgt r1, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r4, #0
; CHECK-NEXT:    movtlt r4, #65534
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r4, #65535
; CHECK-NEXT:    movtgt r4, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    strb.w r2, [r8, #24]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    ubfx r2, r7, #14, #4
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    orr.w r2, r2, r0, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str.w r2, [r8, #12]
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r4, #0
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    bfc r4, #18, #14
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    orr.w r2, r4, r9, lsl #18
; CHECK-NEXT:    str.w r2, [r8, #4]
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    lsrs r0, r0, #28
; CHECK-NEXT:    bfc r1, #18, #14
; CHECK-NEXT:    orr.w r0, r0, r1, lsl #4
; CHECK-NEXT:    orr.w r0, r0, r6, lsl #22
; CHECK-NEXT:    str.w r0, [r8, #16]
; CHECK-NEXT:    vpop {d8, d9, d10, d11}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI28_0:
; CHECK-NEXT:    .long 0xd8000000 @ float -5.62949953E+14
; CHECK-NEXT:  .LCPI28_1:
; CHECK-NEXT:    .long 0x57ffffff @ float 5.6294992E+14
    %x = call <4 x i50> @llvm.fptosi.sat.v4f32.v4i50(<4 x float> %f)
    ret <4 x i50> %x
}

define arm_aapcs_vfpcc <4 x i64> @test_signed_v4f32_v4i64(<4 x float> %f) {
; CHECK-LABEL: test_signed_v4f32_v4i64:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
; CHECK-NEXT:    vpush {d8, d9, d10, d11}
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vmov r0, s19
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    vmov r0, s18
; CHECK-NEXT:    vldr s22, .LCPI29_0
; CHECK-NEXT:    mov r9, r1
; CHECK-NEXT:    vldr s20, .LCPI29_1
; CHECK-NEXT:    vmov r8, s16
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r10, #0
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r10, #-1
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vmov r4, s17
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r10, #0
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r7, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r7, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r7, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r9, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r9, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    mov r6, r1
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r9, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r6, #-2147483648
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r6, #-2147483648
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r6, #0
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r5, #0
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r5, #-1
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r5, #0
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    vmov q1[2], q1[0], r7, r10
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r4, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r4, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r4, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r1, #-2147483648
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r1, #-2147483648
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    vmov q1[3], q1[1], r6, r9
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmov q0[3], q0[1], r1, r4
; CHECK-NEXT:    vpop {d8, d9, d10, d11}
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI29_0:
; CHECK-NEXT:    .long 0xdf000000 @ float -9.22337203E+18
; CHECK-NEXT:  .LCPI29_1:
; CHECK-NEXT:    .long 0x5effffff @ float 9.22337149E+18
    %x = call <4 x i64> @llvm.fptosi.sat.v4f32.v4i64(<4 x float> %f)
    ret <4 x i64> %x
}

define arm_aapcs_vfpcc <4 x i100> @test_signed_v4f32_v4i100(<4 x float> %f) {
; CHECK-LABEL: test_signed_v4f32_v4i100:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
; CHECK-NEXT:    vpush {d8, d9, d10, d11}
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    vmov r0, s18
; CHECK-NEXT:    vldr s20, .LCPI30_0
; CHECK-NEXT:    vmov r7, s19
; CHECK-NEXT:    vmov r5, s16
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vldr s22, .LCPI30_1
; CHECK-NEXT:    mov r6, r3
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    str.w r2, [r4, #33]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str.w r1, [r4, #29]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str.w r0, [r4, #25]
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    mov r5, r3
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    str r2, [r4, #8]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #4]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4]
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    lsr.w r7, r1, #28
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str.w r7, [r4, #45]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    lsrs r2, r2, #28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    lsrs r7, r0, #28
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    orr.w r7, r7, r1, lsl #4
; CHECK-NEXT:    vmov r1, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    str.w r7, [r4, #41]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    orr.w r2, r2, r3, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    strb.w r2, [r4, #49]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r6, #7
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r6, #7
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r6, #0
; CHECK-NEXT:    and r2, r6, #15
; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
; CHECK-NEXT:    str.w r0, [r4, #37]
; CHECK-NEXT:    mov r0, r1
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    lsrs r7, r1, #28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
; CHECK-NEXT:    str r7, [r4, #20]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    lsr.w r7, r0, #28
; CHECK-NEXT:    orr.w r1, r7, r1, lsl #4
; CHECK-NEXT:    str r1, [r4, #16]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    lsr.w r1, r2, #28
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    orr.w r1, r1, r3, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    strb r1, [r4, #24]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r5, #7
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r5, #7
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r5, #0
; CHECK-NEXT:    and r1, r5, #15
; CHECK-NEXT:    orr.w r0, r1, r0, lsl #4
; CHECK-NEXT:    str r0, [r4, #12]
; CHECK-NEXT:    vpop {d8, d9, d10, d11}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI30_0:
; CHECK-NEXT:    .long 0x70ffffff @ float 6.33825262E+29
; CHECK-NEXT:  .LCPI30_1:
; CHECK-NEXT:    .long 0xf1000000 @ float -6.338253E+29
    %x = call <4 x i100> @llvm.fptosi.sat.v4f32.v4i100(<4 x float> %f)
    ret <4 x i100> %x
}

define arm_aapcs_vfpcc <4 x i128> @test_signed_v4f32_v4i128(<4 x float> %f) {
; CHECK-LABEL: test_signed_v4f32_v4i128:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
; CHECK-NEXT:    vpush {d8, d9, d10, d11}
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    vmov r0, s19
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vmov r5, s18
; CHECK-NEXT:    vldr s22, .LCPI31_0
; CHECK-NEXT:    vldr s20, .LCPI31_1
; CHECK-NEXT:    vmov r7, s16
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    str r3, [r4, #60]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    str r2, [r4, #56]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #52]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #48]
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    vmov r6, s17
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    str r3, [r4, #44]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    str r2, [r4, #40]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #36]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #32]
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    str r3, [r4, #28]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    str r2, [r4, #24]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #20]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s17, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #16]
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    str r3, [r4, #12]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    str r2, [r4, #8]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #4]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4]
; CHECK-NEXT:    vpop {d8, d9, d10, d11}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI31_0:
; CHECK-NEXT:    .long 0xff000000 @ float -1.70141183E+38
; CHECK-NEXT:  .LCPI31_1:
; CHECK-NEXT:    .long 0x7effffff @ float 1.70141173E+38
    %x = call <4 x i128> @llvm.fptosi.sat.v4f32.v4i128(<4 x float> %f)
    ret <4 x i128> %x
}

;
; 2-Vector double to signed integer -- result size variation
;

declare <2 x   i1> @llvm.fptosi.sat.v2f64.v2i1  (<2 x double>)
declare <2 x   i8> @llvm.fptosi.sat.v2f64.v2i8  (<2 x double>)
declare <2 x  i13> @llvm.fptosi.sat.v2f64.v2i13 (<2 x double>)
declare <2 x  i16> @llvm.fptosi.sat.v2f64.v2i16 (<2 x double>)
declare <2 x  i19> @llvm.fptosi.sat.v2f64.v2i19 (<2 x double>)
declare <2 x  i50> @llvm.fptosi.sat.v2f64.v2i50 (<2 x double>)
declare <2 x  i64> @llvm.fptosi.sat.v2f64.v2i64 (<2 x double>)
declare <2 x i100> @llvm.fptosi.sat.v2f64.v2i100(<2 x double>)
declare <2 x i128> @llvm.fptosi.sat.v2f64.v2i128(<2 x double>)

define arm_aapcs_vfpcc <2 x i1> @test_signed_v2f64_v2i1(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i1:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #24
; CHECK-NEXT:    sub sp, #24
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI32_0
; CHECK-NEXT:    vmov r8, r7, d8
; CHECK-NEXT:    str r0, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    strd r2, r3, [sp, #12] @ 8-byte Folded Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI32_1
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    strd r2, r3, [sp, #4] @ 8-byte Folded Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    bl __aeabi_d2iz
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    cmp.w r10, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r11, #-1
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    vmov r6, r5, d9
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #0
; CHECK-NEXT:    and r0, r11, #1
; CHECK-NEXT:    ldrd r2, r3, [sp, #12] @ 8-byte Folded Reload
; CHECK-NEXT:    rsbs r0, r0, #0
; CHECK-NEXT:    movs r4, #0
; CHECK-NEXT:    bfi r4, r0, #0, #1
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldrd r2, r3, [sp, #4] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_d2iz
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r7, #-1
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    cmp.w r8, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r7, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r7, #0
; CHECK-NEXT:    and r0, r7, #1
; CHECK-NEXT:    rsbs r0, r0, #0
; CHECK-NEXT:    bfi r4, r0, #1, #1
; CHECK-NEXT:    ldr r0, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    strb r4, [r0]
; CHECK-NEXT:    add sp, #24
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI32_0:
; CHECK-NEXT:    .long 0 @ double 0
; CHECK-NEXT:    .long 0
; CHECK-NEXT:  .LCPI32_1:
; CHECK-NEXT:    .long 0 @ double -1
; CHECK-NEXT:    .long 3220176896
    %x = call <2 x i1> @llvm.fptosi.sat.v2f64.v2i1(<2 x double> %f)
    ret <2 x i1> %x
}

define arm_aapcs_vfpcc <2 x i8> @test_signed_v2f64_v2i8(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i8:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI33_0
; CHECK-NEXT:    vmov r9, r8, d9
; CHECK-NEXT:    vmov r11, r10, d0
; CHECK-NEXT:    str.w r11, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    str.w r10, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI33_1
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    vmov r5, r3, d0
; CHECK-NEXT:    str r3, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    str r1, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    mvneq r0, #127
; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    cmp r1, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r0, #127
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    vmov r7, r6, d8
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    mvneq r11, #127
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r10, r1
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #127
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r5, #-1
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r10, #-1
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r11, r0
; CHECK-NEXT:    vmov q0[3], q0[1], r10, r5
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI33_0:
; CHECK-NEXT:    .long 0 @ double 127
; CHECK-NEXT:    .long 1080016896
; CHECK-NEXT:  .LCPI33_1:
; CHECK-NEXT:    .long 0 @ double -128
; CHECK-NEXT:    .long 3227516928
    %x = call <2 x i8> @llvm.fptosi.sat.v2f64.v2i8(<2 x double> %f)
    ret <2 x i8> %x
}

define arm_aapcs_vfpcc <2 x i13> @test_signed_v2f64_v2i13(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i13:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI34_0
; CHECK-NEXT:    vmov r10, r11, d9
; CHECK-NEXT:    vmov r8, r5, d0
; CHECK-NEXT:    str.w r8, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI34_1
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    strd r3, r2, [sp, #16] @ 8-byte Folded Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r1, #-1
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r1, #0
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    vmov r7, r6, d8
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldrd r5, r2, [sp, #16] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r8, r1
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r8, #-1
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r8, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r8, #0
; CHECK-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    itt eq
; CHECK-NEXT:    movweq r9, #61440
; CHECK-NEXT:    movteq r9, #65535
; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    itt eq
; CHECK-NEXT:    movweq r0, #61440
; CHECK-NEXT:    movteq r0, #65535
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movwne r0, #4095
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movwne r9, #4095
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r9, #0
; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r9, r5
; CHECK-NEXT:    vmov q0[3], q0[1], r8, r0
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI34_0:
; CHECK-NEXT:    .long 0 @ double 4095
; CHECK-NEXT:    .long 1085275648
; CHECK-NEXT:  .LCPI34_1:
; CHECK-NEXT:    .long 0 @ double -4096
; CHECK-NEXT:    .long 3232759808
    %x = call <2 x i13> @llvm.fptosi.sat.v2f64.v2i13(<2 x double> %f)
    ret <2 x i13> %x
}

define arm_aapcs_vfpcc <2 x i16> @test_signed_v2f64_v2i16(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i16:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI35_0
; CHECK-NEXT:    vmov r10, r11, d9
; CHECK-NEXT:    vmov r8, r5, d0
; CHECK-NEXT:    str.w r8, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI35_1
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    strd r3, r2, [sp, #16] @ 8-byte Folded Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r1, #-1
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r1, #0
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    vmov r7, r6, d8
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldrd r5, r2, [sp, #16] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r8, r1
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r8, #-1
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r8, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r8, #0
; CHECK-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    itt eq
; CHECK-NEXT:    movweq r9, #32768
; CHECK-NEXT:    movteq r9, #65535
; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    itt eq
; CHECK-NEXT:    movweq r0, #32768
; CHECK-NEXT:    movteq r0, #65535
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movwne r0, #32767
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r10
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movwne r9, #32767
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r9, #0
; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r9, r5
; CHECK-NEXT:    vmov q0[3], q0[1], r8, r0
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI35_0:
; CHECK-NEXT:    .long 0 @ double 32767
; CHECK-NEXT:    .long 1088421824
; CHECK-NEXT:  .LCPI35_1:
; CHECK-NEXT:    .long 0 @ double -32768
; CHECK-NEXT:    .long 3235905536
    %x = call <2 x i16> @llvm.fptosi.sat.v2f64.v2i16(<2 x double> %f)
    ret <2 x i16> %x
}

define arm_aapcs_vfpcc <2 x i19> @test_signed_v2f64_v2i19(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i19:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #48
; CHECK-NEXT:    sub sp, #48
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI36_0
; CHECK-NEXT:    vmov r6, r5, d9
; CHECK-NEXT:    vmov r8, r3, d0
; CHECK-NEXT:    str r3, [sp, #36] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    str.w r8, [sp, #44] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI36_1
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    strd r2, r3, [sp, #16] @ 8-byte Folded Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    str r0, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r1, #-1
; CHECK-NEXT:    cmp.w r10, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r1, #0
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r1
; CHECK-NEXT:    str r5, [sp, #40] @ 4-byte Spill
; CHECK-NEXT:    vmov r7, r9, d8
; CHECK-NEXT:    mov r5, r6
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str r4, [sp, #32] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    ldr r4, [sp, #36] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r9
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r6, r9
; CHECK-NEXT:    str.w r9, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r1, r9
; CHECK-NEXT:    ldr.w r9, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    ldr.w r8, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r6, r5
; CHECK-NEXT:    mov r10, r1
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    itt eq
; CHECK-NEXT:    movweq r11, #0
; CHECK-NEXT:    movteq r11, #65532
; CHECK-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    itt ne
; CHECK-NEXT:    movwne r11, #65535
; CHECK-NEXT:    movtne r11, #3
; CHECK-NEXT:    str r5, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    ldr r5, [sp, #40] @ 4-byte Reload
; CHECK-NEXT:    ldr r2, [sp, #44] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    mov r6, r8
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r4, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    itt eq
; CHECK-NEXT:    moveq r4, #0
; CHECK-NEXT:    movteq r4, #65532
; CHECK-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    itt ne
; CHECK-NEXT:    movwne r4, #65535
; CHECK-NEXT:    movtne r4, #3
; CHECK-NEXT:    ldr r5, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    ldr r2, [sp, #44] @ 4-byte Reload
; CHECK-NEXT:    ldr r3, [sp, #36] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r10, #-1
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    cmp.w r8, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldr r0, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    ldr r1, [sp, #40] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r0
; CHECK-NEXT:    mov r3, r1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #0
; CHECK-NEXT:    ldr r0, [sp, #32] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r11, r4
; CHECK-NEXT:    vmov q0[3], q0[1], r10, r0
; CHECK-NEXT:    add sp, #48
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI36_0:
; CHECK-NEXT:    .long 0 @ double 262143
; CHECK-NEXT:    .long 1091567608
; CHECK-NEXT:  .LCPI36_1:
; CHECK-NEXT:    .long 0 @ double -262144
; CHECK-NEXT:    .long 3239051264
    %x = call <2 x i19> @llvm.fptosi.sat.v2f64.v2i19(<2 x double> %f)
    ret <2 x i19> %x
}

define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f64_v2i32_duplicate(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i32_duplicate:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI37_0
; CHECK-NEXT:    vmov r9, r8, d9
; CHECK-NEXT:    vmov r11, r10, d0
; CHECK-NEXT:    str.w r11, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    str.w r10, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI37_1
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    vmov r5, r3, d0
; CHECK-NEXT:    str r3, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    str r1, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r0, #-2147483648
; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    cmp r1, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r0, #-2147483648
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    vmov r7, r6, d8
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r11, #-2147483648
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r10, r1
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r11, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r5, #-1
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r10, #-1
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r11, r0
; CHECK-NEXT:    vmov q0[3], q0[1], r10, r5
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI37_0:
; CHECK-NEXT:    .long 4290772992 @ double 2147483647
; CHECK-NEXT:    .long 1105199103
; CHECK-NEXT:  .LCPI37_1:
; CHECK-NEXT:    .long 0 @ double -2147483648
; CHECK-NEXT:    .long 3252682752
    %x = call <2 x i32> @llvm.fptosi.sat.v2f64.v2i32(<2 x double> %f)
    ret <2 x i32> %x
}

define arm_aapcs_vfpcc <2 x i50> @test_signed_v2f64_v2i50(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i50:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI38_0
; CHECK-NEXT:    vmov r8, r5, d9
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    str r2, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    str r3, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI38_1
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    vmov r10, r9, d0
; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r9
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    str r1, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    csel r0, r0, r11, ne
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r0, #-1
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    vmov r6, r7, d8
; CHECK-NEXT:    mov r11, r5
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str r4, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    ldr r5, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    ldr r4, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r9
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    mov r10, r1
; CHECK-NEXT:    str r0, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    itt eq
; CHECK-NEXT:    movweq r10, #0
; CHECK-NEXT:    movteq r10, #65534
; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r2, r5
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    itt ne
; CHECK-NEXT:    movwne r10, #65535
; CHECK-NEXT:    movtne r10, #1
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr.w r9, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r11
; CHECK-NEXT:    mov r5, r11
; CHECK-NEXT:    ldr.w r11, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r3, r9
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    ldr r0, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    itt eq
; CHECK-NEXT:    moveq r0, #0
; CHECK-NEXT:    movteq r0, #65534
; CHECK-NEXT:    ldr r1, [sp] @ 4-byte Reload
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    cmp r1, #0
; CHECK-NEXT:    itt ne
; CHECK-NEXT:    movwne r0, #65535
; CHECK-NEXT:    movtne r0, #1
; CHECK-NEXT:    ldr r2, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    str r0, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r9
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    csel r4, r1, r0, ne
; CHECK-NEXT:    ldr r0, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r4, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    ldr r5, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r5, #0
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldr r0, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r4, r0
; CHECK-NEXT:    vmov q0[3], q0[1], r10, r5
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI38_0:
; CHECK-NEXT:    .long 4294967280 @ double 562949953421311
; CHECK-NEXT:    .long 1124073471
; CHECK-NEXT:  .LCPI38_1:
; CHECK-NEXT:    .long 0 @ double -562949953421312
; CHECK-NEXT:    .long 3271557120
    %x = call <2 x i50> @llvm.fptosi.sat.v2f64.v2i50(<2 x double> %f)
    ret <2 x i50> %x
}

define arm_aapcs_vfpcc <2 x i64> @test_signed_v2f64_v2i64(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i64:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI39_0
; CHECK-NEXT:    vmov r8, r5, d9
; CHECK-NEXT:    vmov r11, r3, d0
; CHECK-NEXT:    str r3, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    str.w r11, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI39_1
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    str r2, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    str r3, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    cmp.w r10, #0
; CHECK-NEXT:    str r1, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    csel r0, r0, r10, ne
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r0, #-1
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    vmov r7, r6, d8
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    ldr r3, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    str r4, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r4, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    ldr.w r10, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_d2lz
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    mov r9, r1
; CHECK-NEXT:    csel r11, r0, r11, ne
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r0, #-2147483648
; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    cmp r1, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r0, #-2147483648
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r3, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r9, #-2147483648
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r9, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r9, #0
; CHECK-NEXT:    ldr r0, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    vmov q0[2], q0[0], r11, r0
; CHECK-NEXT:    vmov q0[3], q0[1], r9, r10
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI39_0:
; CHECK-NEXT:    .long 4294967295 @ double 9.2233720368547748E+18
; CHECK-NEXT:    .long 1138753535
; CHECK-NEXT:  .LCPI39_1:
; CHECK-NEXT:    .long 0 @ double -9.2233720368547758E+18
; CHECK-NEXT:    .long 3286237184
    %x = call <2 x i64> @llvm.fptosi.sat.v2f64.v2i64(<2 x double> %f)
    ret <2 x i64> %x
}

define arm_aapcs_vfpcc <2 x i100> @test_signed_v2f64_v2i100(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i100:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #48
; CHECK-NEXT:    sub sp, #48
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI40_0
; CHECK-NEXT:    vmov r6, r5, d8
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    vmov r9, r8, d0
; CHECK-NEXT:    str.w r8, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI40_1
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    vmov r7, r3, d0
; CHECK-NEXT:    str r3, [sp, #32] @ 4-byte Spill
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __fixdfti
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    strd r1, r0, [sp, #8] @ 8-byte Folded Spill
; CHECK-NEXT:    csel r4, r2, r4, ne
; CHECK-NEXT:    str r3, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    cmp.w r10, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r4, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str.w r11, [sp, #44] @ 4-byte Spill
; CHECK-NEXT:    str.w r4, [r11, #8]
; CHECK-NEXT:    str.w r9, [sp, #40] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r4, [sp, #32] @ 4-byte Reload
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r10, r7
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    csel r7, r1, r0, ne
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    cmp.w r8, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r7, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r7, #0
; CHECK-NEXT:    str.w r7, [r11, #4]
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    str.w r10, [sp, #36] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    csel r7, r1, r0, ne
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r7, #-1
; CHECK-NEXT:    str r6, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    str r5, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    vmov r9, r8, d9
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r7, #0
; CHECK-NEXT:    ldr r0, [sp, #44] @ 4-byte Reload
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    mov r5, r11
; CHECK-NEXT:    str r7, [r0]
; CHECK-NEXT:    ldr r7, [sp, #40] @ 4-byte Reload
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r4, [sp, #32] @ 4-byte Reload
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r10
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __fixdfti
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    strd r2, r0, [sp, #4] @ 8-byte Folded Spill
; CHECK-NEXT:    csel r10, r1, r11, ne
; CHECK-NEXT:    str r3, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    cmp r6, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r6, [sp, #36] @ 4-byte Reload
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r3, r4
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    csel r4, r1, r0, ne
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r4, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    ldr r1, [sp, #44] @ 4-byte Reload
; CHECK-NEXT:    lsr.w r0, r10, #28
; CHECK-NEXT:    orr.w r0, r0, r4, lsl #4
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    mov r7, r5
; CHECK-NEXT:    str r0, [r1, #20]
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    ldr r6, [sp, #32] @ 4-byte Reload
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    csel r11, r1, r0, ne
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r11, #0
; CHECK-NEXT:    ldr r5, [sp, #44] @ 4-byte Reload
; CHECK-NEXT:    lsr.w r0, r11, #28
; CHECK-NEXT:    orr.w r0, r0, r10, lsl #4
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    str r0, [r5, #16]
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    ldr r2, [sp, #40] @ 4-byte Reload
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #36] @ 4-byte Reload
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    mov r10, r6
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    it eq
; CHECK-NEXT:    mvneq r0, #7
; CHECK-NEXT:    cmp r7, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r0, #7
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    mov r1, r8
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    lsr.w r0, r4, #28
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r6, #0
; CHECK-NEXT:    orr.w r0, r0, r6, lsl #4
; CHECK-NEXT:    strb r0, [r5, #24]
; CHECK-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    ldr r2, [sp, #40] @ 4-byte Reload
; CHECK-NEXT:    ldr r3, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr r2, [sp, #36] @ 4-byte Reload
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r4
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    mov r6, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    ldr r0, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    it eq
; CHECK-NEXT:    mvneq r0, #7
; CHECK-NEXT:    cmp.w r8, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r0, #7
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    mov r1, r6
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    mov r3, r6
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    and r0, r4, #15
; CHECK-NEXT:    orr.w r0, r0, r11, lsl #4
; CHECK-NEXT:    str r0, [r5, #12]
; CHECK-NEXT:    add sp, #48
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI40_0:
; CHECK-NEXT:    .long 4294967295 @ double 6.3382530011411463E+29
; CHECK-NEXT:    .long 1176502271
; CHECK-NEXT:  .LCPI40_1:
; CHECK-NEXT:    .long 0 @ double -6.338253001141147E+29
; CHECK-NEXT:    .long 3323985920
    %x = call <2 x i100> @llvm.fptosi.sat.v2f64.v2i100(<2 x double> %f)
    ret <2 x i100> %x
}

define arm_aapcs_vfpcc <2 x i128> @test_signed_v2f64_v2i128(<2 x double> %f) {
; CHECK-LABEL: test_signed_v2f64_v2i128:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9}
; CHECK-NEXT:    vpush {d8, d9}
; CHECK-NEXT:    .pad #32
; CHECK-NEXT:    sub sp, #32
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vldr d0, .LCPI41_0
; CHECK-NEXT:    vmov r8, r7, d9
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    vmov r2, r3, d0
; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r11, r3
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    vldr d0, .LCPI41_1
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    vmov r4, r3, d0
; CHECK-NEXT:    str r3, [sp, #24] @ 4-byte Spill
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    bl __fixdfti
; CHECK-NEXT:    mov r10, r3
; CHECK-NEXT:    strd r2, r1, [sp] @ 8-byte Folded Spill
; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r10, #-2147483648
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r10, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r10, #0
; CHECK-NEXT:    str.w r10, [r6, #28]
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    ldr.w r9, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    mov r5, r11
; CHECK-NEXT:    str.w r11, [sp, #16] @ 4-byte Spill
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr.w r10, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r4
; CHECK-NEXT:    mov r11, r4
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    str r4, [sp, #20] @ 4-byte Spill
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    csel r4, r1, r0, ne
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    cmp r6, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r4, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    ldr r6, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    str r4, [r6, #24]
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r11
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    csel r4, r1, r0, ne
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    cmp r5, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r4, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str r4, [r6, #20]
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    ldr.w r10, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r11, r6
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r8
; CHECK-NEXT:    mov r3, r7
; CHECK-NEXT:    csel r4, r1, r0, ne
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    mov r1, r7
; CHECK-NEXT:    cmp.w r9, #0
; CHECK-NEXT:    vmov r6, r5, d8
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r4, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    str.w r4, [r11, #16]
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    ldr r7, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r3, r10
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    ldr.w r9, [sp, #20] @ 4-byte Reload
; CHECK-NEXT:    mov r10, r0
; CHECK-NEXT:    ldr.w r8, [sp, #24] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    bl __fixdfti
; CHECK-NEXT:    mov r4, r3
; CHECK-NEXT:    strd r2, r1, [sp] @ 8-byte Folded Spill
; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    cmp.w r11, #0
; CHECK-NEXT:    it eq
; CHECK-NEXT:    moveq.w r4, #-2147483648
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    cmp.w r10, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    mvnne r4, #-2147483648
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r4, #0
; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r7
; CHECK-NEXT:    str.w r4, [r10, #12]
; CHECK-NEXT:    ldr.w r11, [sp, #16] @ 4-byte Reload
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    csel r7, r1, r0, ne
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r7, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r7, #0
; CHECK-NEXT:    str.w r7, [r10, #8]
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    csel r7, r1, r0, ne
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r7, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r7, #0
; CHECK-NEXT:    str.w r7, [r10, #4]
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r3, r11
; CHECK-NEXT:    bl __aeabi_dcmpgt
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    mov r2, r9
; CHECK-NEXT:    mov r3, r8
; CHECK-NEXT:    bl __aeabi_dcmpge
; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    mov r2, r6
; CHECK-NEXT:    mov r3, r5
; CHECK-NEXT:    csel r7, r1, r0, ne
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    mov r1, r5
; CHECK-NEXT:    cmp r4, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne.w r7, #-1
; CHECK-NEXT:    bl __aeabi_dcmpun
; CHECK-NEXT:    cmp r0, #0
; CHECK-NEXT:    it ne
; CHECK-NEXT:    movne r7, #0
; CHECK-NEXT:    str.w r7, [r10]
; CHECK-NEXT:    add sp, #32
; CHECK-NEXT:    vpop {d8, d9}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 3
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI41_0:
; CHECK-NEXT:    .long 4294967295 @ double 1.7014118346046921E+38
; CHECK-NEXT:    .long 1205862399
; CHECK-NEXT:  .LCPI41_1:
; CHECK-NEXT:    .long 0 @ double -1.7014118346046923E+38
; CHECK-NEXT:    .long 3353346048
    %x = call <2 x i128> @llvm.fptosi.sat.v2f64.v2i128(<2 x double> %f)
    ret <2 x i128> %x
}

;
; 4-Vector half to signed integer -- result size variation
;

declare <8 x   i1> @llvm.fptosi.sat.v8f16.v8i1  (<8 x half>)
declare <8 x   i8> @llvm.fptosi.sat.v8f16.v8i8  (<8 x half>)
declare <8 x  i13> @llvm.fptosi.sat.v8f16.v8i13 (<8 x half>)
declare <8 x  i16> @llvm.fptosi.sat.v8f16.v8i16 (<8 x half>)
declare <8 x  i19> @llvm.fptosi.sat.v8f16.v8i19 (<8 x half>)
declare <8 x  i50> @llvm.fptosi.sat.v8f16.v8i50 (<8 x half>)
declare <8 x  i64> @llvm.fptosi.sat.v8f16.v8i64 (<8 x half>)
declare <8 x i100> @llvm.fptosi.sat.v8f16.v8i100(<8 x half>)
declare <8 x i128> @llvm.fptosi.sat.v8f16.v8i128(<8 x half>)

define arm_aapcs_vfpcc <8 x i1> @test_signed_v8f16_v8i1(<8 x half> %f) {
; CHECK-LABEL: test_signed_v8f16_v8i1:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .vsave {d8}
; CHECK-NEXT:    vpush {d8}
; CHECK-NEXT:    vcvtb.f32.f16 s15, s0
; CHECK-NEXT:    vmov.f32 s5, #-1.000000e+00
; CHECK-NEXT:    vldr s7, .LCPI42_0
; CHECK-NEXT:    vmaxnm.f32 s16, s15, s5
; CHECK-NEXT:    vcvtt.f32.f16 s12, s2
; CHECK-NEXT:    vcvtt.f32.f16 s9, s1
; CHECK-NEXT:    vminnm.f32 s16, s16, s7
; CHECK-NEXT:    vcvtt.f32.f16 s4, s3
; CHECK-NEXT:    vcvt.s32.f32 s16, s16
; CHECK-NEXT:    vcvtb.f32.f16 s8, s3
; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
; CHECK-NEXT:    vmaxnm.f32 s6, s4, s5
; CHECK-NEXT:    vmaxnm.f32 s10, s8, s5
; CHECK-NEXT:    vmaxnm.f32 s14, s12, s5
; CHECK-NEXT:    vmaxnm.f32 s3, s2, s5
; CHECK-NEXT:    vmaxnm.f32 s11, s9, s5
; CHECK-NEXT:    vmaxnm.f32 s13, s1, s5
; CHECK-NEXT:    vmaxnm.f32 s5, s0, s5
; CHECK-NEXT:    vminnm.f32 s5, s5, s7
; CHECK-NEXT:    vminnm.f32 s13, s13, s7
; CHECK-NEXT:    vcvt.s32.f32 s5, s5
; CHECK-NEXT:    movs r1, #0
; CHECK-NEXT:    vcmp.f32 s15, s15
; CHECK-NEXT:    vminnm.f32 s11, s11, s7
; CHECK-NEXT:    vmov r2, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vcvt.s32.f32 s13, s13
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    vcmp.f32 s0, s0
; CHECK-NEXT:    rsbs r2, r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    bfi r1, r2, #0, #1
; CHECK-NEXT:    vcvt.s32.f32 s11, s11
; CHECK-NEXT:    vmov r2, s5
; CHECK-NEXT:    vminnm.f32 s3, s3, s7
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vcmp.f32 s1, s1
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    rsb.w r2, r2, #0
; CHECK-NEXT:    vcvt.s32.f32 s3, s3
; CHECK-NEXT:    bfi r1, r2, #1, #1
; CHECK-NEXT:    vmov r2, s13
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vminnm.f32 s14, s14, s7
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    vcmp.f32 s9, s9
; CHECK-NEXT:    rsbs r2, r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    bfi r1, r2, #2, #1
; CHECK-NEXT:    vmov r2, s11
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vcvt.s32.f32 s14, s14
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    vminnm.f32 s10, s10, s7
; CHECK-NEXT:    rsbs r2, r2, #0
; CHECK-NEXT:    vcmp.f32 s2, s2
; CHECK-NEXT:    bfi r1, r2, #3, #1
; CHECK-NEXT:    vmov r2, s3
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    rsbs r2, r2, #0
; CHECK-NEXT:    vminnm.f32 s6, s6, s7
; CHECK-NEXT:    bfi r1, r2, #4, #1
; CHECK-NEXT:    vcmp.f32 s12, s12
; CHECK-NEXT:    vmov r2, s14
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vcvt.s32.f32 s6, s6
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    vcmp.f32 s8, s8
; CHECK-NEXT:    rsbs r2, r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    bfi r1, r2, #5, #1
; CHECK-NEXT:    vmov r2, s10
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vcmp.f32 s4, s4
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    rsb.w r2, r2, #0
; CHECK-NEXT:    bfi r1, r2, #6, #1
; CHECK-NEXT:    vmov r2, s6
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    and r2, r2, #1
; CHECK-NEXT:    rsbs r2, r2, #0
; CHECK-NEXT:    bfi r1, r2, #7, #1
; CHECK-NEXT:    strb r1, [r0]
; CHECK-NEXT:    vpop {d8}
; CHECK-NEXT:    bx lr
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI42_0:
; CHECK-NEXT:    .long 0x00000000 @ float 0
    %x = call <8 x i1> @llvm.fptosi.sat.v8f16.v8i1(<8 x half> %f)
    ret <8 x i1> %x
}

define arm_aapcs_vfpcc <8 x i8> @test_signed_v8f16_v8i8(<8 x half> %f) {
; CHECK-MVE-LABEL: test_signed_v8f16_v8i8:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    .save {r4, r5, r7, lr}
; CHECK-MVE-NEXT:    push {r4, r5, r7, lr}
; CHECK-MVE-NEXT:    .vsave {d8}
; CHECK-MVE-NEXT:    vpush {d8}
; CHECK-MVE-NEXT:    vldr s8, .LCPI43_1
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s13, s3
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s3, s3
; CHECK-MVE-NEXT:    vldr s6, .LCPI43_0
; CHECK-MVE-NEXT:    vmaxnm.f32 s16, s3, s8
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s4, s0
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s12, s1
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s7, s2
; CHECK-MVE-NEXT:    vmaxnm.f32 s15, s13, s8
; CHECK-MVE-NEXT:    vminnm.f32 s16, s16, s6
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s0, s0
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s1, s1
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s2, s2
; CHECK-MVE-NEXT:    vmaxnm.f32 s10, s4, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s14, s12, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s5, s0, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s9, s7, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s11, s1, s8
; CHECK-MVE-NEXT:    vminnm.f32 s15, s15, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s16, s16
; CHECK-MVE-NEXT:    vmaxnm.f32 s8, s2, s8
; CHECK-MVE-NEXT:    vminnm.f32 s10, s10, s6
; CHECK-MVE-NEXT:    vminnm.f32 s14, s14, s6
; CHECK-MVE-NEXT:    vminnm.f32 s5, s5, s6
; CHECK-MVE-NEXT:    vminnm.f32 s9, s9, s6
; CHECK-MVE-NEXT:    vminnm.f32 s11, s11, s6
; CHECK-MVE-NEXT:    vminnm.f32 s6, s8, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s15, s15
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s9, s9
; CHECK-MVE-NEXT:    vcvt.s32.f32 s11, s11
; CHECK-MVE-NEXT:    vcvt.s32.f32 s14, s14
; CHECK-MVE-NEXT:    vcvt.s32.f32 s5, s5
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r12, s16
; CHECK-MVE-NEXT:    vcmp.f32 s13, s13
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs.w r12, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov lr, s15
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs.w lr, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r2, s6
; CHECK-MVE-NEXT:    vcmp.f32 s7, s7
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r2, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r3, s9
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r3, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r0, s11
; CHECK-MVE-NEXT:    vcmp.f32 s12, s12
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r1, s14
; CHECK-MVE-NEXT:    vmov r4, s5
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r1, #0
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r4, #0
; CHECK-MVE-NEXT:    vcmp.f32 s4, s4
; CHECK-MVE-NEXT:    vmov.16 q0[0], r4
; CHECK-MVE-NEXT:    vmov r5, s10
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r5, #0
; CHECK-MVE-NEXT:    vmov.16 q0[1], r5
; CHECK-MVE-NEXT:    vmov.16 q0[2], r0
; CHECK-MVE-NEXT:    vmov.16 q0[3], r1
; CHECK-MVE-NEXT:    vmov.16 q0[4], r2
; CHECK-MVE-NEXT:    vmov.16 q0[5], r3
; CHECK-MVE-NEXT:    vmov.16 q0[6], r12
; CHECK-MVE-NEXT:    vmov.16 q0[7], lr
; CHECK-MVE-NEXT:    vpop {d8}
; CHECK-MVE-NEXT:    pop {r4, r5, r7, pc}
; CHECK-MVE-NEXT:    .p2align 2
; CHECK-MVE-NEXT:  @ %bb.1:
; CHECK-MVE-NEXT:  .LCPI43_0:
; CHECK-MVE-NEXT:    .long 0x42fe0000 @ float 127
; CHECK-MVE-NEXT:  .LCPI43_1:
; CHECK-MVE-NEXT:    .long 0xc3000000 @ float -128
;
; CHECK-MVEFP-LABEL: test_signed_v8f16_v8i8:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s16.f16 q0, q0
; CHECK-MVEFP-NEXT:    vqmovnb.s16 q0, q0
; CHECK-MVEFP-NEXT:    vmovlb.s8 q0, q0
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <8 x i8> @llvm.fptosi.sat.v8f16.v8i8(<8 x half> %f)
    ret <8 x i8> %x
}

define arm_aapcs_vfpcc <8 x i13> @test_signed_v8f16_v8i13(<8 x half> %f) {
; CHECK-MVE-LABEL: test_signed_v8f16_v8i13:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    .save {r4, r5, r7, lr}
; CHECK-MVE-NEXT:    push {r4, r5, r7, lr}
; CHECK-MVE-NEXT:    .vsave {d8}
; CHECK-MVE-NEXT:    vpush {d8}
; CHECK-MVE-NEXT:    vldr s8, .LCPI44_1
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s13, s3
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s3, s3
; CHECK-MVE-NEXT:    vldr s6, .LCPI44_0
; CHECK-MVE-NEXT:    vmaxnm.f32 s16, s3, s8
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s4, s0
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s12, s1
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s7, s2
; CHECK-MVE-NEXT:    vmaxnm.f32 s15, s13, s8
; CHECK-MVE-NEXT:    vminnm.f32 s16, s16, s6
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s0, s0
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s1, s1
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s2, s2
; CHECK-MVE-NEXT:    vmaxnm.f32 s10, s4, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s14, s12, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s5, s0, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s9, s7, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s11, s1, s8
; CHECK-MVE-NEXT:    vminnm.f32 s15, s15, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s16, s16
; CHECK-MVE-NEXT:    vmaxnm.f32 s8, s2, s8
; CHECK-MVE-NEXT:    vminnm.f32 s10, s10, s6
; CHECK-MVE-NEXT:    vminnm.f32 s14, s14, s6
; CHECK-MVE-NEXT:    vminnm.f32 s5, s5, s6
; CHECK-MVE-NEXT:    vminnm.f32 s9, s9, s6
; CHECK-MVE-NEXT:    vminnm.f32 s11, s11, s6
; CHECK-MVE-NEXT:    vminnm.f32 s6, s8, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s15, s15
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s9, s9
; CHECK-MVE-NEXT:    vcvt.s32.f32 s11, s11
; CHECK-MVE-NEXT:    vcvt.s32.f32 s14, s14
; CHECK-MVE-NEXT:    vcvt.s32.f32 s5, s5
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r12, s16
; CHECK-MVE-NEXT:    vcmp.f32 s13, s13
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs.w r12, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov lr, s15
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs.w lr, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r2, s6
; CHECK-MVE-NEXT:    vcmp.f32 s7, s7
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r2, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r3, s9
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r3, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r0, s11
; CHECK-MVE-NEXT:    vcmp.f32 s12, s12
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r1, s14
; CHECK-MVE-NEXT:    vmov r4, s5
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r1, #0
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r4, #0
; CHECK-MVE-NEXT:    vcmp.f32 s4, s4
; CHECK-MVE-NEXT:    vmov.16 q0[0], r4
; CHECK-MVE-NEXT:    vmov r5, s10
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r5, #0
; CHECK-MVE-NEXT:    vmov.16 q0[1], r5
; CHECK-MVE-NEXT:    vmov.16 q0[2], r0
; CHECK-MVE-NEXT:    vmov.16 q0[3], r1
; CHECK-MVE-NEXT:    vmov.16 q0[4], r2
; CHECK-MVE-NEXT:    vmov.16 q0[5], r3
; CHECK-MVE-NEXT:    vmov.16 q0[6], r12
; CHECK-MVE-NEXT:    vmov.16 q0[7], lr
; CHECK-MVE-NEXT:    vpop {d8}
; CHECK-MVE-NEXT:    pop {r4, r5, r7, pc}
; CHECK-MVE-NEXT:    .p2align 2
; CHECK-MVE-NEXT:  @ %bb.1:
; CHECK-MVE-NEXT:  .LCPI44_0:
; CHECK-MVE-NEXT:    .long 0x457ff000 @ float 4095
; CHECK-MVE-NEXT:  .LCPI44_1:
; CHECK-MVE-NEXT:    .long 0xc5800000 @ float -4096
;
; CHECK-MVEFP-LABEL: test_signed_v8f16_v8i13:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vmvn.i16 q1, #0xf000
; CHECK-MVEFP-NEXT:    vcvt.s16.f16 q0, q0
; CHECK-MVEFP-NEXT:    vmov.i16 q2, #0xf000
; CHECK-MVEFP-NEXT:    vmin.s16 q0, q0, q1
; CHECK-MVEFP-NEXT:    vmax.s16 q0, q0, q2
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <8 x i13> @llvm.fptosi.sat.v8f16.v8i13(<8 x half> %f)
    ret <8 x i13> %x
}

define arm_aapcs_vfpcc <8 x i16> @test_signed_v8f16_v8i16(<8 x half> %f) {
; CHECK-MVE-LABEL: test_signed_v8f16_v8i16:
; CHECK-MVE:       @ %bb.0:
; CHECK-MVE-NEXT:    .save {r4, r5, r7, lr}
; CHECK-MVE-NEXT:    push {r4, r5, r7, lr}
; CHECK-MVE-NEXT:    .vsave {d8}
; CHECK-MVE-NEXT:    vpush {d8}
; CHECK-MVE-NEXT:    vldr s8, .LCPI45_1
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s13, s3
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s3, s3
; CHECK-MVE-NEXT:    vldr s6, .LCPI45_0
; CHECK-MVE-NEXT:    vmaxnm.f32 s16, s3, s8
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s4, s0
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s12, s1
; CHECK-MVE-NEXT:    vcvtt.f32.f16 s7, s2
; CHECK-MVE-NEXT:    vmaxnm.f32 s15, s13, s8
; CHECK-MVE-NEXT:    vminnm.f32 s16, s16, s6
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s0, s0
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s1, s1
; CHECK-MVE-NEXT:    vcvtb.f32.f16 s2, s2
; CHECK-MVE-NEXT:    vmaxnm.f32 s10, s4, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s14, s12, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s5, s0, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s9, s7, s8
; CHECK-MVE-NEXT:    vmaxnm.f32 s11, s1, s8
; CHECK-MVE-NEXT:    vminnm.f32 s15, s15, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s16, s16
; CHECK-MVE-NEXT:    vmaxnm.f32 s8, s2, s8
; CHECK-MVE-NEXT:    vminnm.f32 s10, s10, s6
; CHECK-MVE-NEXT:    vminnm.f32 s14, s14, s6
; CHECK-MVE-NEXT:    vminnm.f32 s5, s5, s6
; CHECK-MVE-NEXT:    vminnm.f32 s9, s9, s6
; CHECK-MVE-NEXT:    vminnm.f32 s11, s11, s6
; CHECK-MVE-NEXT:    vminnm.f32 s6, s8, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s15, s15
; CHECK-MVE-NEXT:    vcvt.s32.f32 s6, s6
; CHECK-MVE-NEXT:    vcvt.s32.f32 s9, s9
; CHECK-MVE-NEXT:    vcvt.s32.f32 s11, s11
; CHECK-MVE-NEXT:    vcvt.s32.f32 s14, s14
; CHECK-MVE-NEXT:    vcvt.s32.f32 s5, s5
; CHECK-MVE-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r12, s16
; CHECK-MVE-NEXT:    vcmp.f32 s13, s13
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs.w r12, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov lr, s15
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs.w lr, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r2, s6
; CHECK-MVE-NEXT:    vcmp.f32 s7, s7
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r2, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r3, s9
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r3, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r0, s11
; CHECK-MVE-NEXT:    vcmp.f32 s12, s12
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmov r1, s14
; CHECK-MVE-NEXT:    vmov r4, s5
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r1, #0
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r4, #0
; CHECK-MVE-NEXT:    vcmp.f32 s4, s4
; CHECK-MVE-NEXT:    vmov.16 q0[0], r4
; CHECK-MVE-NEXT:    vmov r5, s10
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r5, #0
; CHECK-MVE-NEXT:    vmov.16 q0[1], r5
; CHECK-MVE-NEXT:    vmov.16 q0[2], r0
; CHECK-MVE-NEXT:    vmov.16 q0[3], r1
; CHECK-MVE-NEXT:    vmov.16 q0[4], r2
; CHECK-MVE-NEXT:    vmov.16 q0[5], r3
; CHECK-MVE-NEXT:    vmov.16 q0[6], r12
; CHECK-MVE-NEXT:    vmov.16 q0[7], lr
; CHECK-MVE-NEXT:    vpop {d8}
; CHECK-MVE-NEXT:    pop {r4, r5, r7, pc}
; CHECK-MVE-NEXT:    .p2align 2
; CHECK-MVE-NEXT:  @ %bb.1:
; CHECK-MVE-NEXT:  .LCPI45_0:
; CHECK-MVE-NEXT:    .long 0x46fffe00 @ float 32767
; CHECK-MVE-NEXT:  .LCPI45_1:
; CHECK-MVE-NEXT:    .long 0xc7000000 @ float -32768
;
; CHECK-MVEFP-LABEL: test_signed_v8f16_v8i16:
; CHECK-MVEFP:       @ %bb.0:
; CHECK-MVEFP-NEXT:    vcvt.s16.f16 q0, q0
; CHECK-MVEFP-NEXT:    bx lr
    %x = call <8 x i16> @llvm.fptosi.sat.v8f16.v8i16(<8 x half> %f)
    ret <8 x i16> %x
}

define arm_aapcs_vfpcc <8 x i19> @test_signed_v8f16_v8i19(<8 x half> %f) {
; CHECK-LABEL: test_signed_v8f16_v8i19:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r7, lr}
; CHECK-NEXT:    push {r7, lr}
; CHECK-NEXT:    .vsave {d8}
; CHECK-NEXT:    vpush {d8}
; CHECK-NEXT:    vldr s12, .LCPI46_0
; CHECK-NEXT:    vcvtt.f32.f16 s15, s3
; CHECK-NEXT:    vldr s14, .LCPI46_1
; CHECK-NEXT:    vcvtb.f32.f16 s7, s0
; CHECK-NEXT:    vmaxnm.f32 s16, s15, s12
; CHECK-NEXT:    vcvtb.f32.f16 s4, s1
; CHECK-NEXT:    vcvtt.f32.f16 s8, s1
; CHECK-NEXT:    vcvtb.f32.f16 s1, s2
; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
; CHECK-NEXT:    vcvtt.f32.f16 s2, s2
; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
; CHECK-NEXT:    vmaxnm.f32 s6, s4, s12
; CHECK-NEXT:    vmaxnm.f32 s10, s8, s12
; CHECK-NEXT:    vmaxnm.f32 s5, s1, s12
; CHECK-NEXT:    vmaxnm.f32 s9, s7, s12
; CHECK-NEXT:    vmaxnm.f32 s11, s0, s12
; CHECK-NEXT:    vmaxnm.f32 s13, s2, s12
; CHECK-NEXT:    vminnm.f32 s16, s16, s14
; CHECK-NEXT:    vmaxnm.f32 s12, s3, s12
; CHECK-NEXT:    vcvt.s32.f32 s16, s16
; CHECK-NEXT:    vminnm.f32 s12, s12, s14
; CHECK-NEXT:    vminnm.f32 s13, s13, s14
; CHECK-NEXT:    vcvt.s32.f32 s12, s12
; CHECK-NEXT:    vminnm.f32 s9, s9, s14
; CHECK-NEXT:    vcvt.s32.f32 s13, s13
; CHECK-NEXT:    vminnm.f32 s11, s11, s14
; CHECK-NEXT:    vcvt.s32.f32 s11, s11
; CHECK-NEXT:    vminnm.f32 s5, s5, s14
; CHECK-NEXT:    vcvt.s32.f32 s9, s9
; CHECK-NEXT:    vminnm.f32 s10, s10, s14
; CHECK-NEXT:    vcmp.f32 s15, s15
; CHECK-NEXT:    vminnm.f32 s6, s6, s14
; CHECK-NEXT:    vmov r1, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    lsrs r2, r1, #11
; CHECK-NEXT:    vcmp.f32 s3, s3
; CHECK-NEXT:    strb r2, [r0, #18]
; CHECK-NEXT:    vmov r3, s12
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    ubfx r2, r3, #14, #5
; CHECK-NEXT:    vcvt.s32.f32 s5, s5
; CHECK-NEXT:    orr.w r1, r2, r1, lsl #5
; CHECK-NEXT:    vcmp.f32 s2, s2
; CHECK-NEXT:    strh r1, [r0, #16]
; CHECK-NEXT:    vmov lr, s13
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w lr, #0
; CHECK-NEXT:    ubfx r1, lr, #1, #18
; CHECK-NEXT:    vcmp.f32 s0, s0
; CHECK-NEXT:    orr.w r1, r1, r3, lsl #18
; CHECK-NEXT:    vcvt.s32.f32 s10, s10
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vmov r12, s11
; CHECK-NEXT:    str r1, [r0, #12]
; CHECK-NEXT:    vmov r3, s9
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r12, #0
; CHECK-NEXT:    vcmp.f32 s7, s7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    bfc r3, #19, #13
; CHECK-NEXT:    vcvt.s32.f32 s6, s6
; CHECK-NEXT:    orr.w r3, r3, r12, lsl #19
; CHECK-NEXT:    str r3, [r0]
; CHECK-NEXT:    vcmp.f32 s1, s1
; CHECK-NEXT:    vmov r3, s5
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vcmp.f32 s8, s8
; CHECK-NEXT:    bfc r3, #19, #13
; CHECK-NEXT:    vmov r1, s10
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    ubfx r2, r1, #7, #12
; CHECK-NEXT:    vcmp.f32 s4, s4
; CHECK-NEXT:    orr.w r2, r2, r3, lsl #12
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    orr.w r2, r2, lr, lsl #31
; CHECK-NEXT:    str r2, [r0, #8]
; CHECK-NEXT:    vmov r2, s6
; CHECK-NEXT:    ubfx r3, r12, #13, #6
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    bfc r2, #19, #13
; CHECK-NEXT:    orr.w r2, r3, r2, lsl #6
; CHECK-NEXT:    orr.w r1, r2, r1, lsl #25
; CHECK-NEXT:    str r1, [r0, #4]
; CHECK-NEXT:    vpop {d8}
; CHECK-NEXT:    pop {r7, pc}
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI46_0:
; CHECK-NEXT:    .long 0xc8800000 @ float -262144
; CHECK-NEXT:  .LCPI46_1:
; CHECK-NEXT:    .long 0x487fffc0 @ float 262143
    %x = call <8 x i19> @llvm.fptosi.sat.v8f16.v8i19(<8 x half> %f)
    ret <8 x i19> %x
}

define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f16_v8i32_duplicate(<8 x half> %f) {
; CHECK-LABEL: test_signed_v8f16_v8i32_duplicate:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmovx.f16 s4, s3
; CHECK-NEXT:    vmovx.f16 s6, s0
; CHECK-NEXT:    vcvt.s32.f16 s8, s4
; CHECK-NEXT:    vmovx.f16 s4, s2
; CHECK-NEXT:    vcvt.s32.f16 s10, s4
; CHECK-NEXT:    vmovx.f16 s4, s1
; CHECK-NEXT:    vcvt.s32.f16 s14, s2
; CHECK-NEXT:    vcvt.s32.f16 s2, s1
; CHECK-NEXT:    vcvt.s32.f16 s0, s0
; CHECK-NEXT:    vcvt.s32.f16 s4, s4
; CHECK-NEXT:    vcvt.s32.f16 s6, s6
; CHECK-NEXT:    vmov r0, s2
; CHECK-NEXT:    vmov r1, s0
; CHECK-NEXT:    vcvt.s32.f16 s12, s3
; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
; CHECK-NEXT:    vmov r0, s4
; CHECK-NEXT:    vmov r1, s6
; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
; CHECK-NEXT:    vmov r0, s12
; CHECK-NEXT:    vmov r1, s14
; CHECK-NEXT:    vmov q1[2], q1[0], r1, r0
; CHECK-NEXT:    vmov r0, s8
; CHECK-NEXT:    vmov r1, s10
; CHECK-NEXT:    vmov q1[3], q1[1], r1, r0
; CHECK-NEXT:    bx lr
    %x = call <8 x i32> @llvm.fptosi.sat.v8f16.v8i32(<8 x half> %f)
    ret <8 x i32> %x
}

define arm_aapcs_vfpcc <8 x i50> @test_signed_v8f16_v8i50(<8 x half> %f) {
; CHECK-LABEL: test_signed_v8f16_v8i50:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14}
; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14}
; CHECK-NEXT:    .pad #16
; CHECK-NEXT:    sub sp, #16
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    vcvtt.f32.f16 s28, s19
; CHECK-NEXT:    vmov r0, s28
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcvtb.f32.f16 s26, s18
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    vmov r0, s26
; CHECK-NEXT:    vldr s22, .LCPI48_1
; CHECK-NEXT:    vcvtb.f32.f16 s24, s16
; CHECK-NEXT:    vcvtt.f32.f16 s18, s18
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vmov r6, s24
; CHECK-NEXT:    vldr s20, .LCPI48_0
; CHECK-NEXT:    vmov r5, s18
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r4, #0
; CHECK-NEXT:    movtlt r4, #65534
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s26, s22
; CHECK-NEXT:    str r1, [sp, #4] @ 4-byte Spill
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s20
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r4, #65535
; CHECK-NEXT:    movtgt r4, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str.w r0, [r11, #25]
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s24, s22
; CHECK-NEXT:    str r1, [sp, #8] @ 4-byte Spill
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s24
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str.w r0, [r11]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r7, #0
; CHECK-NEXT:    vcmp.f32 s28, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r7, #-1
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r7, #0
; CHECK-NEXT:    str r7, [sp, #12] @ 4-byte Spill
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r4, #0
; CHECK-NEXT:    lsls r0, r4, #22
; CHECK-NEXT:    orr.w r7, r0, r7, lsr #10
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    mov r6, r1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r6, #0
; CHECK-NEXT:    movtlt r6, #65534
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r6, #65535
; CHECK-NEXT:    movtgt r6, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    mov r5, r0
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    str.w r7, [r11, #45]
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r6, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r5, #0
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r5, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r5, #0
; CHECK-NEXT:    lsrs r0, r5, #14
; CHECK-NEXT:    orr.w r0, r0, r6, lsl #18
; CHECK-NEXT:    vcvtt.f32.f16 s18, s17
; CHECK-NEXT:    str.w r0, [r11, #33]
; CHECK-NEXT:    vmov r0, s18
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    mov r9, r1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movwlt r9, #0
; CHECK-NEXT:    movtlt r9, #65534
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    mov r1, r0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r9, #65535
; CHECK-NEXT:    movtgt r9, #1
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    vcvtt.f32.f16 s16, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r9, #0
; CHECK-NEXT:    lsl.w r0, r9, #22
; CHECK-NEXT:    orr.w r0, r0, r1, lsr #10
; CHECK-NEXT:    str.w r0, [r11, #20]
; CHECK-NEXT:    vmov r0, s16
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    mov r8, r0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r8, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r8, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    mov r10, r1
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r8, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movwlt r10, #0
; CHECK-NEXT:    movtlt r10, #65534
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    lsr.w r0, r8, #14
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r10, #65535
; CHECK-NEXT:    movtgt r10, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r10, #0
; CHECK-NEXT:    orr.w r0, r0, r10, lsl #18
; CHECK-NEXT:    str.w r0, [r11, #8]
; CHECK-NEXT:    lsrs r0, r4, #10
; CHECK-NEXT:    vcvtb.f32.f16 s16, s19
; CHECK-NEXT:    strb.w r0, [r11, #49]
; CHECK-NEXT:    vmov r0, s16
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    mov r7, r0
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r7, #0
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    ubfx r0, r6, #14, #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r7, #-1
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    vcvtb.f32.f16 s18, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r7, #0
; CHECK-NEXT:    orr.w r0, r0, r7, lsl #4
; CHECK-NEXT:    str.w r0, [r11, #37]
; CHECK-NEXT:    vcmp.f32 s26, s22
; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    movtlt r0, #65534
; CHECK-NEXT:    vcmp.f32 s26, s20
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r0, #65535
; CHECK-NEXT:    movtgt r0, #1
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    bfc r0, #18, #14
; CHECK-NEXT:    orr.w r0, r0, r5, lsl #18
; CHECK-NEXT:    str.w r0, [r11, #29]
; CHECK-NEXT:    lsr.w r0, r9, #10
; CHECK-NEXT:    strb.w r0, [r11, #24]
; CHECK-NEXT:    vmov r0, s18
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    ubfx r2, r10, #14, #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    orr.w r2, r2, r0, lsl #4
; CHECK-NEXT:    str.w r2, [r11, #12]
; CHECK-NEXT:    vcmp.f32 s24, s22
; CHECK-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s20
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    movtlt r2, #65534
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s24
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r2, #65535
; CHECK-NEXT:    movtgt r2, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    movtlt r1, #65534
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s22
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r1, #65535
; CHECK-NEXT:    movtgt r1, #1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    itt lt
; CHECK-NEXT:    movlt r4, #0
; CHECK-NEXT:    movtlt r4, #65534
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    bfc r2, #18, #14
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    itt gt
; CHECK-NEXT:    movwgt r4, #65535
; CHECK-NEXT:    movtgt r4, #1
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    orr.w r2, r2, r8, lsl #18
; CHECK-NEXT:    str.w r2, [r11, #4]
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r4, #0
; CHECK-NEXT:    bfc r4, #18, #14
; CHECK-NEXT:    ldr r3, [sp, #12] @ 4-byte Reload
; CHECK-NEXT:    lsrs r2, r7, #28
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    orr.w r2, r2, r4, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    orr.w r2, r2, r3, lsl #22
; CHECK-NEXT:    str.w r2, [r11, #41]
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    lsrs r0, r0, #28
; CHECK-NEXT:    bfc r1, #18, #14
; CHECK-NEXT:    orr.w r0, r0, r1, lsl #4
; CHECK-NEXT:    ldr r1, [sp] @ 4-byte Reload
; CHECK-NEXT:    orr.w r0, r0, r1, lsl #22
; CHECK-NEXT:    str.w r0, [r11, #16]
; CHECK-NEXT:    add sp, #16
; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI48_0:
; CHECK-NEXT:    .long 0x57ffffff @ float 5.6294992E+14
; CHECK-NEXT:  .LCPI48_1:
; CHECK-NEXT:    .long 0xd8000000 @ float -5.62949953E+14
    %x = call <8 x i50> @llvm.fptosi.sat.v8f16.v8i50(<8 x half> %f)
    ret <8 x i50> %x
}

define arm_aapcs_vfpcc <8 x i64> @test_signed_v8f16_v8i64(<8 x half> %f) {
; CHECK-LABEL: test_signed_v8f16_v8i64:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    vcvtt.f32.f16 s20, s19
; CHECK-NEXT:    vmov r0, s20
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcvtb.f32.f16 s22, s19
; CHECK-NEXT:    mov r9, r0
; CHECK-NEXT:    vmov r0, s22
; CHECK-NEXT:    vldr s30, .LCPI49_1
; CHECK-NEXT:    vldr s28, .LCPI49_0
; CHECK-NEXT:    vcvtb.f32.f16 s24, s16
; CHECK-NEXT:    vcmp.f32 s20, s30
; CHECK-NEXT:    vcvtt.f32.f16 s16, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r9, #0
; CHECK-NEXT:    vcmp.f32 s20, s28
; CHECK-NEXT:    mov r8, r1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r9, #-1
; CHECK-NEXT:    vcmp.f32 s20, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vmov r4, s24
; CHECK-NEXT:    vmov r5, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r9, #0
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s22, s30
; CHECK-NEXT:    mov r11, r0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s22, s28
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r11, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s22, s22
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r11, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s30
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r11, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s28
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r8, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s20
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r8, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    mov r10, r1
; CHECK-NEXT:    vcmp.f32 s22, s30
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r8, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r10, #-2147483648
; CHECK-NEXT:    vcmp.f32 s22, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r10, #-2147483648
; CHECK-NEXT:    vcmp.f32 s22, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r10, #0
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    vcmp.f32 s16, s30
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r6, #0
; CHECK-NEXT:    vcmp.f32 s16, s28
; CHECK-NEXT:    mov r0, r4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r6, #-1
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    mov r5, r1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r6, #0
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcvtt.f32.f16 s19, s17
; CHECK-NEXT:    mov r7, r1
; CHECK-NEXT:    vmov r1, s19
; CHECK-NEXT:    vcmp.f32 s24, s30
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s24, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s24, s24
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmov q5[2], q5[0], r0, r6
; CHECK-NEXT:    mov r0, r1
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcvtb.f32.f16 s17, s17
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    vmov r0, s17
; CHECK-NEXT:    mov r4, r1
; CHECK-NEXT:    vcmp.f32 s19, s30
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s28
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r6, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r6, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s30
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r6, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s28
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r5, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r5, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s30
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r5, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r7, #-2147483648
; CHECK-NEXT:    vcmp.f32 s24, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r7, #-2147483648
; CHECK-NEXT:    vcmp.f32 s24, s24
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r7, #0
; CHECK-NEXT:    vmov q5[3], q5[1], r7, r5
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcvtt.f32.f16 s16, s18
; CHECK-NEXT:    mov r7, r1
; CHECK-NEXT:    vmov r1, s16
; CHECK-NEXT:    vcmp.f32 s17, s30
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s17, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmov q6[2], q6[0], r0, r6
; CHECK-NEXT:    mov r0, r1
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcvtb.f32.f16 s18, s18
; CHECK-NEXT:    mov r6, r0
; CHECK-NEXT:    vmov r0, s18
; CHECK-NEXT:    mov r5, r1
; CHECK-NEXT:    vcmp.f32 s16, s30
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s28
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r6, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r6, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s30
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r6, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s28
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r4, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r4, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s17, s30
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r4, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r7, #-2147483648
; CHECK-NEXT:    vcmp.f32 s17, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r7, #-2147483648
; CHECK-NEXT:    vcmp.f32 s17, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r7, #0
; CHECK-NEXT:    vmov q6[3], q6[1], r7, r4
; CHECK-NEXT:    bl __aeabi_f2lz
; CHECK-NEXT:    vcmp.f32 s18, s30
; CHECK-NEXT:    vmov q3[2], q3[0], r11, r9
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s28
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s30
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s28
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r5, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r5, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s30
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r5, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r1, #-2147483648
; CHECK-NEXT:    vcmp.f32 s18, s28
; CHECK-NEXT:    vmov q2[2], q2[0], r0, r6
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r1, #-2147483648
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    vmov q3[3], q3[1], r10, r8
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmov q2[3], q2[1], r1, r5
; CHECK-NEXT:    vmov q0, q5
; CHECK-NEXT:    vmov q1, q6
; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI49_0:
; CHECK-NEXT:    .long 0x5effffff @ float 9.22337149E+18
; CHECK-NEXT:  .LCPI49_1:
; CHECK-NEXT:    .long 0xdf000000 @ float -9.22337203E+18
    %x = call <8 x i64> @llvm.fptosi.sat.v8f16.v8i64(<8 x half> %f)
    ret <8 x i64> %x
}

define arm_aapcs_vfpcc <8 x i100> @test_signed_v8f16_v8i100(<8 x half> %f) {
; CHECK-LABEL: test_signed_v8f16_v8i100:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    vcvtb.f32.f16 s30, s19
; CHECK-NEXT:    vmov r0, s30
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcvtb.f32.f16 s28, s18
; CHECK-NEXT:    mov r5, r3
; CHECK-NEXT:    vmov r3, s28
; CHECK-NEXT:    vldr s24, .LCPI50_2
; CHECK-NEXT:    vldr s20, .LCPI50_3
; CHECK-NEXT:    vcvtt.f32.f16 s19, s19
; CHECK-NEXT:    vcmp.f32 s30, s24
; CHECK-NEXT:    vcvtb.f32.f16 s22, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s30
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s20
; CHECK-NEXT:    str.w r2, [r4, #83]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s30
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str.w r1, [r4, #79]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s30, s20
; CHECK-NEXT:    vcvtb.f32.f16 s26, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s30, s30
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str.w r0, [r4, #75]
; CHECK-NEXT:    vmov r9, s19
; CHECK-NEXT:    vmov r8, s22
; CHECK-NEXT:    mov r0, r3
; CHECK-NEXT:    vmov r6, s26
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s28, s24
; CHECK-NEXT:    mov r7, r3
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s20
; CHECK-NEXT:    str.w r2, [r4, #58]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str.w r1, [r4, #54]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s28, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str.w r0, [r4, #50]
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s26, s24
; CHECK-NEXT:    mov r10, r3
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s20
; CHECK-NEXT:    str.w r2, [r4, #33]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str.w r1, [r4, #29]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s26, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str.w r0, [r4, #25]
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s22, s24
; CHECK-NEXT:    mov r8, r3
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s22, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s22, s22
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s22, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s22, s20
; CHECK-NEXT:    str r2, [r4, #8]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s22, s22
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s22, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #4]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s22, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s22, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4]
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s19, s24
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    lsrs r6, r1, #28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    orr.w r6, r6, r2, lsl #4
; CHECK-NEXT:    str.w r6, [r4, #95]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    lsrs r6, r0, #28
; CHECK-NEXT:    orr.w r1, r6, r1, lsl #4
; CHECK-NEXT:    str.w r1, [r4, #91]
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r3, #7
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r3, #7
; CHECK-NEXT:    lsrs r1, r2, #28
; CHECK-NEXT:    vcvtt.f32.f16 s19, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    orr.w r2, r1, r3, lsl #4
; CHECK-NEXT:    vmov r1, s19
; CHECK-NEXT:    strb.w r2, [r4, #99]
; CHECK-NEXT:    vcmp.f32 s30, s24
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r5, #7
; CHECK-NEXT:    vcmp.f32 s30, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r5, #7
; CHECK-NEXT:    vcmp.f32 s30, s30
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r5, #0
; CHECK-NEXT:    and r2, r5, #15
; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
; CHECK-NEXT:    str.w r0, [r4, #87]
; CHECK-NEXT:    mov r0, r1
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s19, s24
; CHECK-NEXT:    vcvtt.f32.f16 s18, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    lsr.w r6, r1, #28
; CHECK-NEXT:    vcmp.f32 s19, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    orr.w r6, r6, r2, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str.w r6, [r4, #70]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    lsrs r2, r2, #28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    lsrs r6, r0, #28
; CHECK-NEXT:    orr.w r1, r6, r1, lsl #4
; CHECK-NEXT:    str.w r1, [r4, #66]
; CHECK-NEXT:    vmov r1, s18
; CHECK-NEXT:    vcmp.f32 s19, s24
; CHECK-NEXT:    vcvtt.f32.f16 s16, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    orr.w r2, r2, r3, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    b.w .LBB50_3
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI50_2:
; CHECK-NEXT:    .long 0xf1000000 @ float -6.338253E+29
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.2:
; CHECK-NEXT:  .LCPI50_3:
; CHECK-NEXT:    .long 0x70ffffff @ float 6.33825262E+29
; CHECK-NEXT:    .p2align 1
; CHECK-NEXT:  .LBB50_3:
; CHECK-NEXT:    strb.w r2, [r4, #74]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r7, #7
; CHECK-NEXT:    vcmp.f32 s28, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r7, #7
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r7, #0
; CHECK-NEXT:    and r2, r7, #15
; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
; CHECK-NEXT:    str.w r0, [r4, #62]
; CHECK-NEXT:    mov r0, r1
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s18, s24
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    lsr.w r7, r1, #28
; CHECK-NEXT:    vcmp.f32 s18, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str.w r7, [r4, #45]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    lsrs r2, r2, #28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    lsrs r7, r0, #28
; CHECK-NEXT:    vcmp.f32 s18, s24
; CHECK-NEXT:    orr.w r7, r7, r1, lsl #4
; CHECK-NEXT:    vmov r1, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s20
; CHECK-NEXT:    str.w r7, [r4, #41]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    orr.w r2, r2, r3, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    strb.w r2, [r4, #49]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r10, #7
; CHECK-NEXT:    vcmp.f32 s26, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r10, #7
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r10, #0
; CHECK-NEXT:    and r2, r10, #15
; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
; CHECK-NEXT:    str.w r0, [r4, #37]
; CHECK-NEXT:    mov r0, r1
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s16, s24
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    lsrs r7, r1, #28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
; CHECK-NEXT:    str r7, [r4, #20]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s20
; CHECK-NEXT:    lsr.w r7, r0, #28
; CHECK-NEXT:    orr.w r1, r7, r1, lsl #4
; CHECK-NEXT:    str r1, [r4, #16]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s16, s16
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt r3, #7
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    lsr.w r1, r2, #28
; CHECK-NEXT:    vcmp.f32 s22, s24
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    orr.w r1, r1, r3, lsl #4
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    strb r1, [r4, #24]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    mvnlt r8, #7
; CHECK-NEXT:    vcmp.f32 s22, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r8, #7
; CHECK-NEXT:    vcmp.f32 s22, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs.w r8, #0
; CHECK-NEXT:    and r1, r8, #15
; CHECK-NEXT:    orr.w r0, r1, r0, lsl #4
; CHECK-NEXT:    str r0, [r4, #12]
; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
; CHECK-NEXT:  @ %bb.4:
    %x = call <8 x i100> @llvm.fptosi.sat.v8f16.v8i100(<8 x half> %f)
    ret <8 x i100> %x
}

define arm_aapcs_vfpcc <8 x i128> @test_signed_v8f16_v8i128(<8 x half> %f) {
; CHECK-LABEL: test_signed_v8f16_v8i128:
; CHECK:       @ %bb.0:
; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, lr}
; CHECK-NEXT:    .pad #4
; CHECK-NEXT:    sub sp, #4
; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT:    vmov q4, q0
; CHECK-NEXT:    mov r4, r0
; CHECK-NEXT:    vcvtt.f32.f16 s28, s19
; CHECK-NEXT:    vcvtb.f32.f16 s20, s16
; CHECK-NEXT:    vmov r0, s28
; CHECK-NEXT:    vcvtt.f32.f16 s24, s16
; CHECK-NEXT:    vcvtb.f32.f16 s26, s17
; CHECK-NEXT:    vcvtb.f32.f16 s19, s19
; CHECK-NEXT:    vldr s22, .LCPI51_2
; CHECK-NEXT:    vmov r8, s20
; CHECK-NEXT:    vmov r9, s24
; CHECK-NEXT:    vcvtt.f32.f16 s30, s18
; CHECK-NEXT:    vmov r7, s26
; CHECK-NEXT:    vmov r6, s19
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vldr s16, .LCPI51_3
; CHECK-NEXT:    vmov r5, s30
; CHECK-NEXT:    vcvtb.f32.f16 s18, s18
; CHECK-NEXT:    vcmp.f32 s28, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    str r3, [r4, #124]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    str r2, [r4, #120]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #116]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #112]
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s19, s16
; CHECK-NEXT:    vcvtt.f32.f16 s28, s17
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    str r3, [r4, #108]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    str r2, [r4, #104]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s19, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #100]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s19, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s19, s19
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #96]
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    vmov r6, s18
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s30, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s30
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s22
; CHECK-NEXT:    str r3, [r4, #92]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s30
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s22
; CHECK-NEXT:    str r2, [r4, #88]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s30
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s30, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #84]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s30, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s30, s30
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #80]
; CHECK-NEXT:    mov r0, r6
; CHECK-NEXT:    vmov r5, s28
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s18, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    str r3, [r4, #76]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    str r2, [r4, #72]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s18, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #68]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s18, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s18, s18
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #64]
; CHECK-NEXT:    mov r0, r5
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s28, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    str r3, [r4, #60]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    str r2, [r4, #56]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s28, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #52]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s28, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s28, s28
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #48]
; CHECK-NEXT:    mov r0, r7
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s26, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s22
; CHECK-NEXT:    str r3, [r4, #44]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s22
; CHECK-NEXT:    str r2, [r4, #40]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s26, s16
; CHECK-NEXT:    b.w .LBB51_3
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.1:
; CHECK-NEXT:  .LCPI51_2:
; CHECK-NEXT:    .long 0x7effffff @ float 1.70141173E+38
; CHECK-NEXT:    .p2align 2
; CHECK-NEXT:  @ %bb.2:
; CHECK-NEXT:  .LCPI51_3:
; CHECK-NEXT:    .long 0xff000000 @ float -1.70141183E+38
; CHECK-NEXT:    .p2align 1
; CHECK-NEXT:  .LBB51_3:
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #36]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s26, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s26, s26
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #32]
; CHECK-NEXT:    mov r0, r9
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s24, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s24
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s22
; CHECK-NEXT:    str r3, [r4, #28]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s24
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s22
; CHECK-NEXT:    str r2, [r4, #24]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s24
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s24, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #20]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s24, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s24, s24
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4, #16]
; CHECK-NEXT:    mov r0, r8
; CHECK-NEXT:    bl __fixsfti
; CHECK-NEXT:    vcmp.f32 s20, s16
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s22
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt.w r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s20
; CHECK-NEXT:    it gt
; CHECK-NEXT:    mvngt r3, #-2147483648
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r3, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s22
; CHECK-NEXT:    str r3, [r4, #12]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s20
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r2, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r2, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s22
; CHECK-NEXT:    str r2, [r4, #8]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s20
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r1, #-1
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    vcmp.f32 s20, s16
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r1, #0
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    str r1, [r4, #4]
; CHECK-NEXT:    it lt
; CHECK-NEXT:    movlt r0, #0
; CHECK-NEXT:    vcmp.f32 s20, s22
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it gt
; CHECK-NEXT:    movgt.w r0, #-1
; CHECK-NEXT:    vcmp.f32 s20, s20
; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-NEXT:    it vs
; CHECK-NEXT:    movvs r0, #0
; CHECK-NEXT:    str r0, [r4]
; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT:    add sp, #4
; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
; CHECK-NEXT:  @ %bb.4:
    %x = call <8 x i128> @llvm.fptosi.sat.v8f16.v8i128(<8 x half> %f)
    ret <8 x i128> %x
}